qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 2/2] block-copy: refactor copy_range handling


From: Vladimir Sementsov-Ogievskiy
Subject: [PATCH 2/2] block-copy: refactor copy_range handling
Date: Fri, 28 May 2021 17:16:28 +0300

Currently we update s->use_copy_range and s->copy_size in
block_copy_do_copy().

It's not very good:

1. block_copy_do_copy() is intended to be a simple function, that wraps
bdrv_co_<io> functions for need of block copy. That's why we don't pass
BlockCopyTask into it. So, block_copy_do_copy() is bad place for
manipulation with generic state of block-copy process

2. We are going to make block-copy thread-safe. So, it's good to move
manipulation with state of block-copy to the places where we'll need
critical sections anyway, to not introduce extra synchronization
primitives in block_copy_do_copy().

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 block/block-copy.c | 71 +++++++++++++++++++++++++++++++---------------
 1 file changed, 48 insertions(+), 23 deletions(-)

diff --git a/block/block-copy.c b/block/block-copy.c
index f9e871b64f..c96fe31054 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -65,6 +65,7 @@ typedef struct BlockCopyTask {
     int64_t offset;
     int64_t bytes;
     bool zeroes;
+    bool copy_range;
     QLIST_ENTRY(BlockCopyTask) list;
     CoQueue wait_queue; /* coroutines blocked on this task */
 } BlockCopyTask;
@@ -183,6 +184,7 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState 
*s,
         .call_state = call_state,
         .offset = offset,
         .bytes = bytes,
+        .copy_range = s->use_copy_range,
     };
     qemu_co_queue_init(&task->wait_queue);
     QLIST_INSERT_HEAD(&s->tasks, task, list);
@@ -342,11 +344,17 @@ static coroutine_fn int block_copy_task_run(AioTaskPool 
*pool,
  *
  * No sync here: nor bitmap neighter intersecting requests handling, only copy.
  *
+ * @copy_range is in-out argument: if *copy_range is false, copy_range is not
+ * done. If *copy_range is true, copy_range attempt is done. If copy_range
+ * attempt failed, the function fallback to usual read+write and *copy_range is
+ * set to false. *copy_range and zeroes must not be true simultaneously.
+ *
  * Returns 0 on success.
  */
 static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
                                            int64_t offset, int64_t bytes,
-                                           bool zeroes, bool *error_is_read)
+                                           bool zeroes, bool *copy_range,
+                                           bool *error_is_read)
 {
     int ret;
     int64_t nbytes = MIN(offset + bytes, s->len) - offset;
@@ -359,6 +367,7 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState 
*s,
     assert(offset + bytes <= s->len ||
            offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
     assert(nbytes < INT_MAX);
+    assert(!(*copy_range && zeroes));
 
     if (zeroes) {
         ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
@@ -370,32 +379,15 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState 
*s,
         return ret;
     }
 
-    if (s->use_copy_range) {
+    if (*copy_range) {
         ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
                                  0, s->write_flags);
         if (ret < 0) {
             trace_block_copy_copy_range_fail(s, offset, ret);
-            s->use_copy_range = false;
-            s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
+            *copy_range = false;
             /* Fallback to read+write with allocated buffer */
         } else {
-            if (s->use_copy_range) {
-                /*
-                 * Successful copy-range. Now increase copy_size.  copy_range
-                 * does not respect max_transfer (it's a TODO), so we factor
-                 * that in here.
-                 *
-                 * Note: we double-check s->use_copy_range for the case when
-                 * parallel block-copy request unsets it during previous
-                 * bdrv_co_copy_range call.
-                 */
-                s->copy_size =
-                        MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
-                            QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
-                                                                    s->target),
-                                            s->cluster_size));
-            }
-            goto out;
+            return 0;
         }
     }
 
@@ -431,14 +423,44 @@ out:
     return ret;
 }
 
+static void block_copy_handle_copy_range_result(BlockCopyState *s,
+                                                bool is_success)
+{
+    if (!s->use_copy_range) {
+        /* already disabled */
+        return;
+    }
+
+    if (is_success) {
+        /*
+         * Successful copy-range. Now increase copy_size.  copy_range
+         * does not respect max_transfer (it's a TODO), so we factor
+         * that in here.
+         */
+        s->copy_size =
+                MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
+                    QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
+                                                            s->target),
+                                    s->cluster_size));
+    } else {
+        /* Copy-range failed, disable it. */
+        s->use_copy_range = false;
+        s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
+    }
+}
+
 static coroutine_fn int block_copy_task_entry(AioTask *task)
 {
     BlockCopyTask *t = container_of(task, BlockCopyTask, task);
     bool error_is_read = false;
+    bool copy_range = t->copy_range;
     int ret;
 
     ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes,
-                             &error_is_read);
+                             &copy_range, &error_is_read);
+    if (t->copy_range) {
+        block_copy_handle_copy_range_result(t->s, copy_range);
+    }
     if (ret < 0) {
         if (!t->call_state->ret) {
             t->call_state->ret = ret;
@@ -619,7 +641,10 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
             g_free(task);
             continue;
         }
-        task->zeroes = ret & BDRV_BLOCK_ZERO;
+        if (ret & BDRV_BLOCK_ZERO) {
+            task->zeroes = true;
+            task->copy_range = false;
+        }
 
         if (s->speed) {
             if (!call_state->ignore_ratelimit) {
-- 
2.29.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]