[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-stable] [PULL 30/36] block/backup: teach TOP to never copy unalloc
From: |
John Snow |
Subject: |
[Qemu-stable] [PULL 30/36] block/backup: teach TOP to never copy unallocated regions |
Date: |
Fri, 16 Aug 2019 19:13:12 -0400 |
Presently, If sync=TOP is selected, we mark the entire bitmap as dirty.
In the write notifier handler, we dutifully copy out such regions.
Fix this in three parts:
1. Mark the bitmap as being initialized before the first yield.
2. After the first yield but before the backup loop, interrogate the
allocation status asynchronously and initialize the bitmap.
3. Teach the write notifier to interrogate allocation status if it is
invoked during bitmap initialization.
As an effect of this patch, the job progress for TOP backups
now behaves like this:
- total progress starts at bdrv_length.
- As allocation status is interrogated, total progress decreases.
- As blocks are copied, current progress increases.
Taken together, the floor and ceiling move to meet each other.
Signed-off-by: John Snow <address@hidden>
Message-id: address@hidden
[Remove ret = -ECANCELED change. --js]
[Squash in conflict resolution based on Max's patch --js]
Message-id: address@hidden
Reviewed-by: Max Reitz <address@hidden>
Signed-off-by: John Snow <address@hidden>
---
block/backup.c | 79 ++++++++++++++++++++++++++++++++++++++++------
block/trace-events | 1 +
2 files changed, 71 insertions(+), 9 deletions(-)
diff --git a/block/backup.c b/block/backup.c
index f6bf32c9438..9e1382ec5c6 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -58,6 +58,7 @@ typedef struct BackupBlockJob {
int64_t copy_range_size;
bool serialize_target_writes;
+ bool initializing_bitmap;
} BackupBlockJob;
static const BlockJobDriver backup_job_driver;
@@ -227,6 +228,35 @@ static int backup_is_cluster_allocated(BackupBlockJob *s,
int64_t offset,
}
}
+/**
+ * Reset bits in copy_bitmap starting at offset if they represent unallocated
+ * data in the image. May reset subsequent contiguous bits.
+ * @return 0 when the cluster at @offset was unallocated,
+ * 1 otherwise, and -ret on error.
+ */
+static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
+ int64_t offset, int64_t *count)
+{
+ int ret;
+ int64_t clusters, bytes, estimate;
+
+ ret = backup_is_cluster_allocated(s, offset, &clusters);
+ if (ret < 0) {
+ return ret;
+ }
+
+ bytes = clusters * s->cluster_size;
+
+ if (!ret) {
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
+ estimate = bdrv_get_dirty_count(s->copy_bitmap);
+ job_progress_set_remaining(&s->common.job, estimate);
+ }
+
+ *count = bytes;
+ return ret;
+}
+
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int64_t offset, uint64_t bytes,
bool *error_is_read,
@@ -236,6 +266,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int ret = 0;
int64_t start, end; /* bytes */
void *bounce_buffer = NULL;
+ int64_t status_bytes;
qemu_co_rwlock_rdlock(&job->flush_rwlock);
@@ -262,6 +293,17 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
dirty_end = end;
}
+ if (job->initializing_bitmap) {
+ ret = backup_bitmap_reset_unallocated(job, start, &status_bytes);
+ if (ret == 0) {
+ trace_backup_do_cow_skip_range(job, start, status_bytes);
+ start += status_bytes;
+ continue;
+ }
+ /* Clamp to known allocated region */
+ dirty_end = MIN(dirty_end, start + status_bytes);
+ }
+
trace_backup_do_cow_process(job, start);
if (job->use_copy_range) {
@@ -446,18 +488,9 @@ static int coroutine_fn backup_loop(BackupBlockJob *job)
int64_t offset;
BdrvDirtyBitmapIter *bdbi;
int ret = 0;
- int64_t dummy;
bdbi = bdrv_dirty_iter_new(job->copy_bitmap);
while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
- if (job->sync_mode == MIRROR_SYNC_MODE_TOP &&
- !backup_is_cluster_allocated(job, offset, &dummy))
- {
- bdrv_reset_dirty_bitmap(job->copy_bitmap, offset,
- job->cluster_size);
- continue;
- }
-
do {
if (yield_and_check(job)) {
goto out;
@@ -488,6 +521,13 @@ static void backup_init_copy_bitmap(BackupBlockJob *job)
NULL, true);
assert(ret);
} else {
+ if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
+ /*
+ * We can't hog the coroutine to initialize this thoroughly.
+ * Set a flag and resume work when we are able to yield safely.
+ */
+ job->initializing_bitmap = true;
+ }
bdrv_set_dirty_bitmap(job->copy_bitmap, 0, job->len);
}
@@ -509,6 +549,26 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
s->before_write.notify = backup_before_write_notify;
bdrv_add_before_write_notifier(bs, &s->before_write);
+ if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
+ int64_t offset = 0;
+ int64_t count;
+
+ for (offset = 0; offset < s->len; ) {
+ if (yield_and_check(s)) {
+ ret = -ECANCELED;
+ goto out;
+ }
+
+ ret = backup_bitmap_reset_unallocated(s, offset, &count);
+ if (ret < 0) {
+ goto out;
+ }
+
+ offset += count;
+ }
+ s->initializing_bitmap = false;
+ }
+
if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
/* All bits are set in copy_bitmap to allow any cluster to be copied.
* This does not actually require them to be copied. */
@@ -521,6 +581,7 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
ret = backup_loop(s);
}
+ out:
notifier_with_return_remove(&s->before_write);
/* wait until pending backup_do_cow() calls have completed */
diff --git a/block/trace-events b/block/trace-events
index d724df0117d..04209f058d4 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -41,6 +41,7 @@ mirror_yield_in_flight(void *s, int64_t offset, int
in_flight) "s %p offset %" P
backup_do_cow_enter(void *job, int64_t start, int64_t offset, uint64_t bytes)
"job %p start %" PRId64 " offset %" PRId64 " bytes %" PRIu64
backup_do_cow_return(void *job, int64_t offset, uint64_t bytes, int ret) "job
%p offset %" PRId64 " bytes %" PRIu64 " ret %d"
backup_do_cow_skip(void *job, int64_t start) "job %p start %"PRId64
+backup_do_cow_skip_range(void *job, int64_t start, uint64_t bytes) "job %p
start %"PRId64" bytes %"PRId64
backup_do_cow_process(void *job, int64_t start) "job %p start %"PRId64
backup_do_cow_read_fail(void *job, int64_t start, int ret) "job %p start
%"PRId64" ret %d"
backup_do_cow_write_fail(void *job, int64_t start, int ret) "job %p start
%"PRId64" ret %d"
--
2.21.0
- [Qemu-stable] [PULL 21/36] iotests: test bitmap moving inside 254, (continued)
- [Qemu-stable] [PULL 21/36] iotests: test bitmap moving inside 254, John Snow, 2019/08/16
- [Qemu-stable] [PULL 22/36] iotests/257: add Pattern class, John Snow, 2019/08/16
- [Qemu-stable] [PULL 20/36] qapi: implement block-dirty-bitmap-remove transaction action, John Snow, 2019/08/16
- [Qemu-stable] [PULL 23/36] iotests/257: add EmulatedBitmap class, John Snow, 2019/08/16
- [Qemu-stable] [PULL 29/36] block/backup: add backup_is_cluster_allocated, John Snow, 2019/08/16
- [Qemu-stable] [PULL 25/36] block/backup: hoist bitmap check into QMP interface, John Snow, 2019/08/16
- [Qemu-stable] [PULL 28/36] block/backup: centralize copy_bitmap initialization, John Snow, 2019/08/16
- [Qemu-stable] [PULL 26/36] iotests/257: test API failures, John Snow, 2019/08/16
- [Qemu-stable] [PULL 33/36] qapi: add dirty-bitmaps to query-named-block-nodes result, John Snow, 2019/08/16
- [Qemu-stable] [PULL 34/36] block/backup: deal with zero detection, John Snow, 2019/08/16
- [Qemu-stable] [PULL 30/36] block/backup: teach TOP to never copy unallocated regions,
John Snow <=
- [Qemu-stable] [PULL 27/36] block/backup: improve sync=bitmap work estimates, John Snow, 2019/08/16
- [Qemu-stable] [PULL 35/36] block/backup: refactor write_flags, John Snow, 2019/08/16
- [Qemu-stable] [PULL 36/36] tests/test-hbitmap: test next_zero and _next_dirty_area after truncate, John Snow, 2019/08/16
- [Qemu-stable] [PULL 31/36] block/backup: support bitmap sync modes for non-bitmap backups, John Snow, 2019/08/16
- [Qemu-stable] [PULL 24/36] iotests/257: Refactor backup helpers, John Snow, 2019/08/16
- [Qemu-stable] [PULL 17/36] iotests: add test 257 for bitmap-mode backups, John Snow, 2019/08/16
- [Qemu-stable] [PULL 32/36] iotests/257: test traditional sync modes, John Snow, 2019/08/16
- Re: [Qemu-stable] [PULL 00/36] Bitmaps patches, Peter Maydell, 2019/08/19