qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v7 00/10] KVM: Dirty ring support (QEMU part)


From: Paolo Bonzini
Subject: Re: [PATCH v7 00/10] KVM: Dirty ring support (QEMU part)
Date: Fri, 14 May 2021 17:38:17 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Thunderbird/78.8.1

On 06/05/21 18:05, Peter Xu wrote:
This is v7 of the qemu dirty ring interface support.

v7:
- Rebase to latest master commit d45a5270d07

Queued, thanks!

I only made a small change to rename the property from dirty-gfn-count
to dirty-ring-size, since (assuming the user knows what gfn means)
it's not clear that it's related to the ring buffer support.

Thanks,

Paolo

diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index fd8ce2e0b2..aa785b7171 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -146,9 +146,8 @@ struct KVMState
         KVMMemoryListener *ml;
         AddressSpace *as;
     } *as;
-    bool kvm_dirty_ring_enabled;    /* Whether KVM dirty ring is enabled */
-    uint64_t kvm_dirty_ring_size;   /* Size of the per-vcpu dirty ring */
-    uint32_t kvm_dirty_gfn_count;   /* Number of dirty GFNs per ring */
+    uint64_t kvm_dirty_ring_bytes;  /* Size of the per-vcpu dirty ring */
+    uint32_t kvm_dirty_ring_size;   /* Number of dirty GFNs per ring */
     struct KVMDirtyRingReaper reaper;
 };
@@ -725,14 +724,14 @@ static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
 {
     struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
-    uint32_t gfn_count = s->kvm_dirty_gfn_count;
+    uint32_t ring_size = s->kvm_dirty_ring_size;
     uint32_t count = 0, fetch = cpu->kvm_fetch_index;
- assert(dirty_gfns && gfn_count);
+    assert(dirty_gfns && ring_size);
     trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
while (true) {
-        cur = &dirty_gfns[fetch % gfn_count];
+        cur = &dirty_gfns[fetch % ring_size];
         if (!dirty_gfn_is_dirtied(cur)) {
             break;
         }
@@ -1389,7 +1388,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
                  *
                  * Not easy.  Let's cross the fingers until it's fixed.
                  */
-                if (kvm_state->kvm_dirty_ring_enabled) {
+                if (kvm_state->kvm_dirty_ring_size) {
                     kvm_dirty_ring_reap_locked(kvm_state);
                 } else {
                     kvm_slot_get_dirty_log(kvm_state, mem);
@@ -2445,24 +2444,24 @@ static int kvm_init(MachineState *ms)
      * Enable KVM dirty ring if supported, otherwise fall back to
      * dirty logging mode
      */
-    if (s->kvm_dirty_gfn_count > 0) {
-        uint64_t ring_size;
+    if (s->kvm_dirty_ring_size > 0) {
+        uint64_t ring_bytes;
- ring_size = s->kvm_dirty_gfn_count * sizeof(struct kvm_dirty_gfn);
+        ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* Read the max supported pages */
         ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
         if (ret > 0) {
-            if (ring_size > ret) {
-                error_report("KVM dirty GFN count %" PRIu32 " too big "
+            if (ring_bytes > ret) {
+                error_report("KVM dirty ring size %" PRIu32 " too big "
                              "(maximum is %ld).  Please use a smaller value.",
-                             s->kvm_dirty_gfn_count,
+                             s->kvm_dirty_ring_size,
                              ret / sizeof(struct kvm_dirty_gfn));
                 ret = -EINVAL;
                 goto err;
             }
- ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_size);
+            ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
             if (ret) {
                 error_report("Enabling of KVM dirty ring failed: %d. "
                              "Suggested mininum value is 1024. "
@@ -2470,8 +2469,7 @@ static int kvm_init(MachineState *ms)
                 goto err;
             }
- s->kvm_dirty_ring_size = ring_size;
-            s->kvm_dirty_ring_enabled = true;
+            s->kvm_dirty_ring_bytes = ring_bytes;
         }
     }
@@ -3552,17 +3550,17 @@ bool kvm_kernel_irqchip_split(void)
     return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
 }
-static void kvm_get_dirty_gfn_count(Object *obj, Visitor *v,
+static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
                                     const char *name, void *opaque,
                                     Error **errp)
 {
     KVMState *s = KVM_STATE(obj);
-    uint32_t value = s->kvm_dirty_gfn_count;
+    uint32_t value = s->kvm_dirty_ring_size;
visit_type_uint32(v, name, &value, errp);
 }
-static void kvm_set_dirty_gfn_count(Object *obj, Visitor *v,
+static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
                                     const char *name, void *opaque,
                                     Error **errp)
 {
@@ -3576,7 +3574,7 @@ static void kvm_set_dirty_gfn_count(Object *obj, Visitor 
*v,
         return;
     }
- s->kvm_dirty_gfn_count = value;
+    s->kvm_dirty_ring_size = value;
 }
static void kvm_accel_instance_init(Object *obj)
@@ -3587,7 +3585,7 @@ static void kvm_accel_instance_init(Object *obj)
     s->kernel_irqchip_allowed = true;
     s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
     /* KVM dirty ring is by default off */
-    s->kvm_dirty_gfn_count = 0;
+    s->kvm_dirty_ring_size = 0;
 }
static void kvm_accel_class_init(ObjectClass *oc, void *data)
@@ -3610,11 +3608,11 @@ static void kvm_accel_class_init(ObjectClass *oc, void 
*data)
     object_class_property_set_description(oc, "kvm-shadow-mem",
         "KVM shadow MMU size");
- object_class_property_add(oc, "dirty-gfn-count", "uint32",
-        kvm_get_dirty_gfn_count, kvm_set_dirty_gfn_count,
+    object_class_property_add(oc, "dirty-ring-size", "uint32",
+        kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
         NULL, NULL);
-    object_class_property_set_description(oc, "dirty-gfn-count",
-        "KVM dirty GFN count (=0 to disable dirty ring)");
+    object_class_property_set_description(oc, "dirty-ring-size",
+        "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
 }
static const TypeInfo kvm_accel_type = {
diff --git a/qemu-options.hx b/qemu-options.hx
index acd8b4f6f9..31931f0923 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -141,7 +141,7 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel,
     "                kvm-shadow-mem=size of KVM shadow MMU in bytes\n"
     "                split-wx=on|off (enable TCG split w^x mapping)\n"
     "                tb-size=n (TCG translation block cache size)\n"
-    "                dirty-gfn-count=n (KVM dirty ring GFN count, default 0)\n"
+    "                dirty-ring-size=n (KVM dirty ring GFN count, default 0)\n"
     "                thread=single|multi (enable multi-threaded TCG)\n", 
QEMU_ARCH_ALL)
 SRST
 ``-accel name[,prop=value[,...]]``
@@ -183,15 +183,15 @@ SRST
         incompatible TCG features have been enabled (e.g.
         icount/replay).
- ``dirty-gfn-count=n``
-        When KVM accelerator is used, it controls the per-vcpu KVM dirty ring
-        size (number of entries one dirty ring contains, per-vcpu). It should
+    ``dirty-ring-size=n``
+        When the KVM accelerator is used, it controls the size of the per-vCPU
+        dirty page ring buffer (number of entries for each vCPU). It should
         be a value that is power of two, and it should be 1024 or bigger (but
         still less than the maximum value that the kernel supports).  4096
         could be a good initial value if you have no idea which is the best.
         Set this value to 0 to disable the feature.  By default, this feature
-        is disabled (dirty-gfn-count=0).  When enabled, it'll automatically
-        replace the kvm get dirty log feature.
+        is disabled (dirty-ring-size=0).  When enabled, KVM will instead
+        record dirty pages in a bitmap.
ERST



reply via email to

[Prev in Thread] Current Thread [Next in Thread]