[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 42/42] migration: refactor ram_save_target_page functions
From: |
Fabiano Rosas |
Subject: |
[PULL 42/42] migration: refactor ram_save_target_page functions |
Date: |
Wed, 29 Jan 2025 13:00:59 -0300 |
From: Prasad Pandit <pjp@fedoraproject.org>
Refactor ram_save_target_page legacy and multifd
functions into one. Other than simplifying it,
it frees 'migration_ops' object from usage, so it
is expunged.
Signed-off-by: Prasad Pandit <pjp@fedoraproject.org>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Message-ID: <20250127120823.144949-3-ppandit@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
migration/ram.c | 67 +++++++++++++------------------------------------
1 file changed, 17 insertions(+), 50 deletions(-)
diff --git a/migration/ram.c b/migration/ram.c
index 5aace00bf1..6f460fd22d 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -448,13 +448,6 @@ void ram_transferred_add(uint64_t bytes)
}
}
-struct MigrationOps {
- int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss);
-};
-typedef struct MigrationOps MigrationOps;
-
-MigrationOps *migration_ops;
-
static int ram_save_host_page_urgent(PageSearchStatus *pss);
/* NOTE: page is the PFN not real ram_addr_t. */
@@ -1960,55 +1953,36 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t
start, ram_addr_t len,
}
/**
- * ram_save_target_page_legacy: save one target page
- *
- * Returns the number of pages written
+ * ram_save_target_page: save one target page to the precopy thread
+ * OR to multifd workers.
*
* @rs: current RAM state
* @pss: data about the page we want to send
*/
-static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
+static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
{
ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
int res;
+ if (!migrate_multifd()
+ || migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
+ if (save_zero_page(rs, pss, offset)) {
+ return 1;
+ }
+ }
+
+ if (migrate_multifd()) {
+ RAMBlock *block = pss->block;
+ return ram_save_multifd_page(block, offset);
+ }
+
if (control_save_page(pss, offset, &res)) {
return res;
}
- if (save_zero_page(rs, pss, offset)) {
- return 1;
- }
-
return ram_save_page(rs, pss);
}
-/**
- * ram_save_target_page_multifd: send one target page to multifd workers
- *
- * Returns 1 if the page was queued, -1 otherwise.
- *
- * @rs: current RAM state
- * @pss: data about the page we want to send
- */
-static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss)
-{
- RAMBlock *block = pss->block;
- ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
-
- /*
- * While using multifd live migration, we still need to handle zero
- * page checking on the migration main thread.
- */
- if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
- if (save_zero_page(rs, pss, offset)) {
- return 1;
- }
- }
-
- return ram_save_multifd_page(block, offset);
-}
-
/* Should be called before sending a host page */
static void pss_host_page_prepare(PageSearchStatus *pss)
{
@@ -2095,7 +2069,7 @@ static int ram_save_host_page_urgent(PageSearchStatus
*pss)
if (page_dirty) {
/* Be strict to return code; it must be 1, or what else? */
- if (migration_ops->ram_save_target_page(rs, pss) != 1) {
+ if (ram_save_target_page(rs, pss) != 1) {
error_report_once("%s: ram_save_target_page failed", __func__);
ret = -1;
goto out;
@@ -2164,7 +2138,7 @@ static int ram_save_host_page(RAMState *rs,
PageSearchStatus *pss)
if (preempt_active) {
qemu_mutex_unlock(&rs->bitmap_mutex);
}
- tmppages = migration_ops->ram_save_target_page(rs, pss);
+ tmppages = ram_save_target_page(rs, pss);
if (tmppages >= 0) {
pages += tmppages;
/*
@@ -2362,8 +2336,6 @@ static void ram_save_cleanup(void *opaque)
xbzrle_cleanup();
multifd_ram_save_cleanup();
ram_state_cleanup(rsp);
- g_free(migration_ops);
- migration_ops = NULL;
}
static void ram_state_reset(RAMState *rs)
@@ -3029,13 +3001,8 @@ static int ram_save_setup(QEMUFile *f, void *opaque,
Error **errp)
return ret;
}
- migration_ops = g_malloc0(sizeof(MigrationOps));
-
if (migrate_multifd()) {
multifd_ram_save_setup();
- migration_ops->ram_save_target_page = ram_save_target_page_multifd;
- } else {
- migration_ops->ram_save_target_page = ram_save_target_page_legacy;
}
/*
--
2.35.3
- [PULL 08/42] migration: cpr-state, (continued)
- [PULL 08/42] migration: cpr-state, Fabiano Rosas, 2025/01/29
- [PULL 13/42] migration: incoming channel, Fabiano Rosas, 2025/01/29
- [PULL 19/42] tests/qtest: optimize migrate_set_ports, Fabiano Rosas, 2025/01/29
- [PULL 18/42] migration-test: memory_backend, Fabiano Rosas, 2025/01/29
- [PULL 16/42] migration: cpr-transfer save and load, Fabiano Rosas, 2025/01/29
- [PULL 27/42] migration: Do not construct JSON description if suppressed, Fabiano Rosas, 2025/01/29
- [PULL 32/42] migration: Adjust postcopy bandwidth during switchover, Fabiano Rosas, 2025/01/29
- [PULL 31/42] migration: Synchronize all CPU states only for non-iterable dump, Fabiano Rosas, 2025/01/29
- [PULL 36/42] migration: Notify COMPLETE once for postcopy, Fabiano Rosas, 2025/01/29
- [PULL 41/42] migration: Trivial cleanup on JSON writer of vmstate_save(), Fabiano Rosas, 2025/01/29
- [PULL 42/42] migration: refactor ram_save_target_page functions,
Fabiano Rosas <=
- [PULL 17/42] migration: cpr-transfer mode, Fabiano Rosas, 2025/01/29
- [PULL 35/42] migration: Take BQL slightly longer in postcopy_start(), Fabiano Rosas, 2025/01/29
- [PULL 37/42] migration: Unwrap qemu_savevm_state_complete_precopy() in postcopy, Fabiano Rosas, 2025/01/29
- [PULL 38/42] migration: Cleanup qemu_savevm_state_complete_precopy(), Fabiano Rosas, 2025/01/29
- [PULL 39/42] migration: Always set DEVICE state, Fabiano Rosas, 2025/01/29
- [PULL 40/42] migration: Merge precopy/postcopy on switchover start, Fabiano Rosas, 2025/01/29