[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 05/16] migration: Drop inactivate_disk param in qemu_savevm_state
From: |
Peter Xu |
Subject: |
[PATCH 05/16] migration: Drop inactivate_disk param in qemu_savevm_state_complete* |
Date: |
Tue, 14 Jan 2025 18:07:35 -0500 |
This parameter is only used by one caller, which is the genuine precopy
complete path (migration_completion_precopy).
The parameter was introduced in a1fbe750fd ("migration: Fix race of image
locking between src and dst") to make sure the inactivate will happen
before EOF to make sure dest will always be able to activate the disk
properly. However there's no limitation on how early we inactivate the
disk. For precopy completion path, we can always do that as long as VM is
stopped.
Move the disk inactivate there, then we can remove this inactivate_disk
parameter in the whole call stack, because all the rest users pass in false
always.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/savevm.h | 5 ++---
migration/migration.c | 22 ++++++++++++++++------
migration/savevm.c | 27 +++++----------------------
3 files changed, 23 insertions(+), 31 deletions(-)
diff --git a/migration/savevm.h b/migration/savevm.h
index 9ec96a995c..c48a53e95e 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -39,8 +39,7 @@ void qemu_savevm_state_header(QEMUFile *f);
int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
void qemu_savevm_state_cleanup(void);
void qemu_savevm_state_complete_postcopy(QEMUFile *f);
-int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
- bool inactivate_disks);
+int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only);
void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
uint64_t *can_postcopy);
void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
@@ -68,6 +67,6 @@ int qemu_loadvm_state_main(QEMUFile *f,
MigrationIncomingState *mis);
int qemu_load_device_state(QEMUFile *f);
int qemu_loadvm_approve_switchover(void);
int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
- bool in_postcopy, bool inactivate_disks);
+ bool in_postcopy);
#endif
diff --git a/migration/migration.c b/migration/migration.c
index e1fc1a7fdc..b33baab950 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -2555,7 +2555,7 @@ static int postcopy_start(MigrationState *ms, Error
**errp)
* Cause any non-postcopiable, but iterative devices to
* send out their final data.
*/
- qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
+ qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
/*
* in Finish migrate and with the io-lock held everything should
@@ -2600,7 +2600,7 @@ static int postcopy_start(MigrationState *ms, Error
**errp)
*/
qemu_savevm_send_postcopy_listen(fb);
- qemu_savevm_state_complete_precopy(fb, false, false);
+ qemu_savevm_state_complete_precopy(fb, false);
if (migrate_postcopy_ram()) {
qemu_savevm_send_ping(fb, 3);
}
@@ -2732,11 +2732,21 @@ static int migration_completion_precopy(MigrationState
*s,
goto out_unlock;
}
+ /* Inactivate disks except in COLO */
+ if (!migrate_colo()) {
+ /*
+ * Inactivate before sending QEMU_VM_EOF so that the
+ * bdrv_activate_all() on the other end won't fail.
+ */
+ if (!migration_block_inactivate()) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ }
+
migration_rate_set(RATE_LIMIT_DISABLED);
- /* Inactivate disks except in COLO */
- ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
- !migrate_colo());
+ ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false);
out_unlock:
bql_unlock();
return ret;
@@ -3617,7 +3627,7 @@ static void *bg_migration_thread(void *opaque)
* save their state to channel-buffer along with devices.
*/
cpu_synchronize_all_states();
- if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
+ if (qemu_savevm_state_complete_precopy_non_iterable(fb, false)) {
goto fail;
}
/*
diff --git a/migration/savevm.c b/migration/savevm.c
index fa03a0a264..5e56a5d9fc 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1521,8 +1521,7 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile
*f, bool in_postcopy)
}
int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
- bool in_postcopy,
- bool inactivate_disks)
+ bool in_postcopy)
{
MigrationState *ms = migrate_get_current();
int64_t start_ts_each, end_ts_each;
@@ -1553,20 +1552,6 @@ int
qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
end_ts_each - start_ts_each);
}
- if (inactivate_disks) {
- /*
- * Inactivate before sending QEMU_VM_EOF so that the
- * bdrv_activate_all() on the other end won't fail.
- */
- if (!migration_block_inactivate()) {
- error_setg(&local_err, "%s: bdrv_inactivate_all() failed",
- __func__);
- migrate_set_error(ms, local_err);
- error_report_err(local_err);
- qemu_file_set_error(f, -EFAULT);
- return ret;
- }
- }
if (!in_postcopy) {
/* Postcopy stream will still be going */
qemu_put_byte(f, QEMU_VM_EOF);
@@ -1587,8 +1572,7 @@ int
qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
return 0;
}
-int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
- bool inactivate_disks)
+int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
{
int ret;
Error *local_err = NULL;
@@ -1613,8 +1597,7 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool
iterable_only,
goto flush;
}
- ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy,
- inactivate_disks);
+ ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy);
if (ret) {
return ret;
}
@@ -1717,7 +1700,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
ret = qemu_file_get_error(f);
if (ret == 0) {
- qemu_savevm_state_complete_precopy(f, false, false);
+ qemu_savevm_state_complete_precopy(f, false);
ret = qemu_file_get_error(f);
}
if (ret != 0) {
@@ -1743,7 +1726,7 @@ cleanup:
void qemu_savevm_live_state(QEMUFile *f)
{
/* save QEMU_VM_SECTION_END section */
- qemu_savevm_state_complete_precopy(f, true, false);
+ qemu_savevm_state_complete_precopy(f, true);
qemu_put_byte(f, QEMU_VM_EOF);
}
--
2.47.0
- [PATCH 00/16] migration: Switchover phase refactoring, Peter Xu, 2025/01/14
- [PATCH 01/16] migration: Remove postcopy implications in should_send_vmdesc(), Peter Xu, 2025/01/14
- [PATCH 02/16] migration: Do not construct JSON description if suppressed, Peter Xu, 2025/01/14
- [PATCH 03/16] migration: Optimize postcopy on downtime by avoiding JSON writer, Peter Xu, 2025/01/14
- [PATCH 04/16] migration: Avoid two src-downtime-end tracepoints for postcopy, Peter Xu, 2025/01/14
- [PATCH 05/16] migration: Drop inactivate_disk param in qemu_savevm_state_complete*,
Peter Xu <=
- [PATCH 06/16] migration: Synchronize all CPU states only for non-iterable dump, Peter Xu, 2025/01/14
- [PATCH 08/16] migration: Adjust locking in migration_maybe_pause(), Peter Xu, 2025/01/14
- [PATCH 07/16] migration: Adjust postcopy bandwidth during switchover, Peter Xu, 2025/01/14
- [PATCH 10/16] migration: Take BQL slightly longer in postcopy_start(), Peter Xu, 2025/01/14
- [PATCH 12/16] migration: Unwrap qemu_savevm_state_complete_precopy() in postcopy, Peter Xu, 2025/01/14
- [PATCH 13/16] migration: Cleanup qemu_savevm_state_complete_precopy(), Peter Xu, 2025/01/14
- [PATCH 11/16] migration: Notify COMPLETE once for postcopy, Peter Xu, 2025/01/14
- [PATCH 14/16] migration: Always set DEVICE state, Peter Xu, 2025/01/14
- [PATCH 15/16] migration: Merge precopy/postcopy on switchover start, Peter Xu, 2025/01/14
- [PATCH 16/16] migration: Trivial cleanup on JSON writer of vmstate_save(), Peter Xu, 2025/01/14