[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 05/25] migration/multifd: Unify RAM_SAVE_FLAG_MULTIFD_FLUSH messag
From: |
Fabiano Rosas |
Subject: |
[PULL 05/25] migration/multifd: Unify RAM_SAVE_FLAG_MULTIFD_FLUSH messages |
Date: |
Fri, 10 Jan 2025 09:13:53 -0300 |
From: Peter Xu <peterx@redhat.com>
RAM_SAVE_FLAG_MULTIFD_FLUSH message should always be correlated to a sync
request on src. Unify such message into one place, and conditionally send
the message only if necessary.
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20241206224755.1108686-5-peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
migration/multifd-nocomp.c | 27 +++++++++++++++++++++++++--
migration/multifd.h | 2 +-
migration/ram.c | 18 ++++--------------
3 files changed, 30 insertions(+), 17 deletions(-)
diff --git a/migration/multifd-nocomp.c b/migration/multifd-nocomp.c
index 219f9e58ef..58372db0f4 100644
--- a/migration/multifd-nocomp.c
+++ b/migration/multifd-nocomp.c
@@ -20,6 +20,7 @@
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "trace.h"
+#include "qemu-file.h"
static MultiFDSendData *multifd_ram_send;
@@ -343,9 +344,10 @@ retry:
return true;
}
-int multifd_ram_flush_and_sync(void)
+int multifd_ram_flush_and_sync(QEMUFile *f)
{
MultiFDSyncReq req;
+ int ret;
if (!migrate_multifd()) {
return 0;
@@ -361,7 +363,28 @@ int multifd_ram_flush_and_sync(void)
/* File migrations only need to sync with threads */
req = migrate_mapped_ram() ? MULTIFD_SYNC_LOCAL : MULTIFD_SYNC_ALL;
- return multifd_send_sync_main(req);
+ ret = multifd_send_sync_main(req);
+ if (ret) {
+ return ret;
+ }
+
+ /* If we don't need to sync with remote at all, nothing else to do */
+ if (req == MULTIFD_SYNC_LOCAL) {
+ return 0;
+ }
+
+ /*
+ * Old QEMUs don't understand RAM_SAVE_FLAG_MULTIFD_FLUSH, it relies
+ * on RAM_SAVE_FLAG_EOS instead.
+ */
+ if (migrate_multifd_flush_after_each_section()) {
+ return 0;
+ }
+
+ qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
+ qemu_fflush(f);
+
+ return 0;
}
bool multifd_send_prepare_common(MultiFDSendParams *p)
diff --git a/migration/multifd.h b/migration/multifd.h
index 6493512305..0fef431f6b 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -354,7 +354,7 @@ static inline uint32_t multifd_ram_page_count(void)
void multifd_ram_save_setup(void);
void multifd_ram_save_cleanup(void);
-int multifd_ram_flush_and_sync(void);
+int multifd_ram_flush_and_sync(QEMUFile *f);
size_t multifd_ram_payload_size(void);
void multifd_ram_fill_packet(MultiFDSendParams *p);
int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp);
diff --git a/migration/ram.c b/migration/ram.c
index 01521de71f..ef683d11f0 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1306,15 +1306,10 @@ static int find_dirty_block(RAMState *rs,
PageSearchStatus *pss)
(!migrate_multifd_flush_after_each_section() ||
migrate_mapped_ram())) {
QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
- int ret = multifd_ram_flush_and_sync();
+ int ret = multifd_ram_flush_and_sync(f);
if (ret < 0) {
return ret;
}
-
- if (!migrate_mapped_ram()) {
- qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
- qemu_fflush(f);
- }
}
/* Hit the end of the list */
@@ -3044,18 +3039,13 @@ static int ram_save_setup(QEMUFile *f, void *opaque,
Error **errp)
}
bql_unlock();
- ret = multifd_ram_flush_and_sync();
+ ret = multifd_ram_flush_and_sync(f);
bql_lock();
if (ret < 0) {
error_setg(errp, "%s: multifd synchronization failed", __func__);
return ret;
}
- if (migrate_multifd() && !migrate_multifd_flush_after_each_section()
- && !migrate_mapped_ram()) {
- qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
- }
-
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
ret = qemu_fflush(f);
if (ret < 0) {
@@ -3190,7 +3180,7 @@ out:
if (ret >= 0 && migration_is_running()) {
if (migrate_multifd() && migrate_multifd_flush_after_each_section() &&
!migrate_mapped_ram()) {
- ret = multifd_ram_flush_and_sync();
+ ret = multifd_ram_flush_and_sync(f);
if (ret < 0) {
return ret;
}
@@ -3268,7 +3258,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
* Only the old dest QEMU will need this sync, because each EOS
* will require one SYNC message on each channel.
*/
- ret = multifd_ram_flush_and_sync();
+ ret = multifd_ram_flush_and_sync(f);
if (ret < 0) {
return ret;
}
--
2.35.3
- [PULL 00/25] Migration patches for 2025-01-10, Fabiano Rosas, 2025/01/10
- [PULL 01/25] migration/multifd: Fix compile error caused by page_size usage, Fabiano Rosas, 2025/01/10
- [PULL 02/25] migration/multifd: Further remove the SYNC on complete, Fabiano Rosas, 2025/01/10
- [PULL 03/25] migration/multifd: Allow to sync with sender threads only, Fabiano Rosas, 2025/01/10
- [PULL 04/25] migration/ram: Move RAM_SAVE_FLAG* into ram.h, Fabiano Rosas, 2025/01/10
- [PULL 05/25] migration/multifd: Unify RAM_SAVE_FLAG_MULTIFD_FLUSH messages,
Fabiano Rosas <=
- [PULL 06/25] migration/multifd: Remove sync processing on postcopy, Fabiano Rosas, 2025/01/10
- [PULL 07/25] migration/multifd: Cleanup src flushes on condition check, Fabiano Rosas, 2025/01/10
- [PULL 08/25] migration/multifd: Document the reason to sync for save_setup(), Fabiano Rosas, 2025/01/10
- [PULL 09/25] migration/multifd: Fix compat with QEMU < 9.0, Fabiano Rosas, 2025/01/10
- [PULL 10/25] migration: Add helper to get target runstate, Fabiano Rosas, 2025/01/10
- [PULL 11/25] qmp/cont: Only activate disks if migration completed, Fabiano Rosas, 2025/01/10
- [PULL 12/25] migration/block: Make late-block-active the default, Fabiano Rosas, 2025/01/10
- [PULL 13/25] migration/block: Apply late-block-active behavior to postcopy, Fabiano Rosas, 2025/01/10
- [PULL 14/25] migration/block: Fix possible race with block_inactive, Fabiano Rosas, 2025/01/10
- [PULL 15/25] migration/block: Rewrite disk activation, Fabiano Rosas, 2025/01/10