qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH RFC 03/12] migration: Create the multi-rdma-channels parameter


From: Zhimin Feng
Subject: [PATCH RFC 03/12] migration: Create the multi-rdma-channels parameter
Date: Thu, 9 Jan 2020 12:59:13 +0800

From: fengzhimin <address@hidden>

Indicates the number of RDMA threads that we would create.
By default we create 2 threads for RDMA migration.

Signed-off-by: fengzhimin <address@hidden>
---
 migration/migration.c | 32 ++++++++++++++++++++++++++++++++
 migration/migration.h |  1 +
 monitor/hmp-cmds.c    |  7 +++++++
 qapi/migration.json   | 23 +++++++++++++++++++----
 4 files changed, 59 insertions(+), 4 deletions(-)

diff --git a/migration/migration.c b/migration/migration.c
index d9d73a5eac..5756a4806e 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -88,6 +88,9 @@
 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100)
 #define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2
 
+/* Define default MultiRDMA thread number */
+#define DEFAULT_MIGRATE_MULTIRDMA_CHANNELS 2
+
 /* Background transfer rate for postcopy, 0 means unlimited, note
  * that page requests can still exceed this limit.
  */
@@ -788,6 +791,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error 
**errp)
     params->announce_rounds = s->parameters.announce_rounds;
     params->has_announce_step = true;
     params->announce_step = s->parameters.announce_step;
+    params->has_multi_rdma_channels = true;
+    params->multi_rdma_channels = s->parameters.multi_rdma_channels;
 
     return params;
 }
@@ -1171,6 +1176,14 @@ static bool migrate_params_check(MigrationParameters 
*params, Error **errp)
         return false;
     }
 
+    if (params->has_multi_rdma_channels
+        && (params->multi_rdma_channels < 1)) {
+        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
+                   "multi_rdma_channels",
+                   "is invalid, it should be in the range of 1 to 5");
+        return false;
+    }
+
     if (params->has_xbzrle_cache_size &&
         (params->xbzrle_cache_size < qemu_target_page_size() ||
          !is_power_of_2(params->xbzrle_cache_size))) {
@@ -1302,6 +1315,9 @@ static void 
migrate_params_test_apply(MigrateSetParameters *params,
     if (params->has_announce_step) {
         dest->announce_step = params->announce_step;
     }
+    if (params->has_multi_rdma_channels) {
+        dest->multi_rdma_channels = params->multi_rdma_channels;
+    }
 }
 
 static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
@@ -1403,6 +1419,9 @@ static void migrate_params_apply(MigrateSetParameters 
*params, Error **errp)
     if (params->has_announce_step) {
         s->parameters.announce_step = params->announce_step;
     }
+    if (params->has_multi_rdma_channels) {
+        s->parameters.multi_rdma_channels = params->multi_rdma_channels;
+    }
 }
 
 void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp)
@@ -2222,6 +2241,15 @@ int migrate_multifd_channels(void)
     return s->parameters.multifd_channels;
 }
 
+int migrate_multiRDMA_channels(void)
+{
+    MigrationState *s;
+
+    s = migrate_get_current();
+
+    return s->parameters.multi_rdma_channels;
+}
+
 int migrate_use_xbzrle(void)
 {
     MigrationState *s;
@@ -3513,6 +3541,9 @@ static Property migration_properties[] = {
     DEFINE_PROP_SIZE("announce-step", MigrationState,
                       parameters.announce_step,
                       DEFAULT_MIGRATE_ANNOUNCE_STEP),
+    DEFINE_PROP_UINT8("multiRDMA-channels", MigrationState,
+                      parameters.multi_rdma_channels,
+                      DEFAULT_MIGRATE_MULTIRDMA_CHANNELS),
 
     /* Migration capabilities */
     DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE),
@@ -3591,6 +3622,7 @@ static void migration_instance_init(Object *obj)
     params->has_announce_max = true;
     params->has_announce_rounds = true;
     params->has_announce_step = true;
+    params->has_multi_rdma_channels = true;
 
     qemu_sem_init(&ms->postcopy_pause_sem, 0);
     qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
diff --git a/migration/migration.h b/migration/migration.h
index 0a23375b2f..4192c22d8c 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -271,6 +271,7 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error 
**errp);
 void migration_incoming_process(void);
 
 bool  migration_has_all_channels(void);
+int migrate_multiRDMA_channels(void);
 
 uint64_t migrate_max_downtime(void);
 
diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c
index d0e0af893a..80898c8942 100644
--- a/monitor/hmp-cmds.c
+++ b/monitor/hmp-cmds.c
@@ -456,6 +456,9 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict 
*qdict)
         monitor_printf(mon, " %s: '%s'\n",
             MigrationParameter_str(MIGRATION_PARAMETER_TLS_AUTHZ),
             params->has_tls_authz ? params->tls_authz : "");
+        monitor_printf(mon, "%s: %u\n",
+            MigrationParameter_str(MIGRATION_PARAMETER_MULTI_RDMA_CHANNELS),
+            params->multi_rdma_channels);
     }
 
     qapi_free_MigrationParameters(params);
@@ -1855,6 +1858,10 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict 
*qdict)
         p->has_announce_step = true;
         visit_type_size(v, param, &p->announce_step, &err);
         break;
+    case MIGRATION_PARAMETER_MULTI_RDMA_CHANNELS:
+        p->has_multi_rdma_channels = true;
+        visit_type_int(v, param, &p->multi_rdma_channels, &err);
+        break;
     default:
         assert(0);
     }
diff --git a/qapi/migration.json b/qapi/migration.json
index c995ffdc4c..ab79bf0600 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -588,6 +588,10 @@
 # @max-cpu-throttle: maximum cpu throttle percentage.
 #                    Defaults to 99. (Since 3.1)
 #
+# @multi-rdma-channels: Number of channels used to migrate data in
+#                       parallel. This is the same number that the
+#                       number of multiRDMA used for migration.  The
+#                       default value is 2 (since 4.2)
 # Since: 2.4
 ##
 { 'enum': 'MigrationParameter',
@@ -600,7 +604,8 @@
            'downtime-limit', 'x-checkpoint-delay', 'block-incremental',
            'multifd-channels',
            'xbzrle-cache-size', 'max-postcopy-bandwidth',
-           'max-cpu-throttle' ] }
+           'max-cpu-throttle',
+           'multi-rdma-channels'] }
 
 ##
 # @MigrateSetParameters:
@@ -690,6 +695,10 @@
 # @max-cpu-throttle: maximum cpu throttle percentage.
 #                    The default value is 99. (Since 3.1)
 #
+# @multi-rdma-channels: Number of channels used to migrate data in
+#                       parallel. This is the same number that the
+#                       number of multiRDMA used for migration.  The
+#                       default value is 2 (since 4.2)
 # Since: 2.4
 ##
 # TODO either fuse back into MigrationParameters, or make
@@ -715,7 +724,8 @@
             '*multifd-channels': 'int',
             '*xbzrle-cache-size': 'size',
             '*max-postcopy-bandwidth': 'size',
-           '*max-cpu-throttle': 'int' } }
+           '*max-cpu-throttle': 'int',
+            '*multi-rdma-channels': 'int'} }
 
 ##
 # @migrate-set-parameters:
@@ -825,6 +835,10 @@
 #                    Defaults to 99.
 #                     (Since 3.1)
 #
+# @multi-rdma-channels: Number of channels used to migrate data in
+#                       parallel. This is the same number that the
+#                       number of multiRDMA used for migration.  The
+#                       default value is 2 (since 4.2)
 # Since: 2.4
 ##
 { 'struct': 'MigrationParameters',
@@ -847,8 +861,9 @@
             '*block-incremental': 'bool' ,
             '*multifd-channels': 'uint8',
             '*xbzrle-cache-size': 'size',
-           '*max-postcopy-bandwidth': 'size',
-            '*max-cpu-throttle':'uint8'} }
+           '*max-postcopy-bandwidth': 'size',
+            '*max-cpu-throttle':'uint8',
+            '*multi-rdma-channels':'uint8'} }
 
 ##
 # @query-migrate-parameters:
-- 
2.19.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]