iotests/iov-padding: New test
[qemu/kevin.git] / block / block-copy.c
blobe13d7bc6b69fbe89049281c212f68629799fa2c7
1 /*
2 * block_copy API
4 * Copyright (C) 2013 Proxmox Server Solutions
5 * Copyright (c) 2019 Virtuozzo International GmbH.
7 * Authors:
8 * Dietmar Maurer (dietmar@proxmox.com)
9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
17 #include "trace.h"
18 #include "qapi/error.h"
19 #include "block/block-copy.h"
20 #include "block/block_int-io.h"
21 #include "block/dirty-bitmap.h"
22 #include "block/reqlist.h"
23 #include "sysemu/block-backend.h"
24 #include "qemu/units.h"
25 #include "qemu/co-shared-resource.h"
26 #include "qemu/coroutine.h"
27 #include "qemu/ratelimit.h"
28 #include "block/aio_task.h"
29 #include "qemu/error-report.h"
30 #include "qemu/memalign.h"
32 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
33 #define BLOCK_COPY_MAX_BUFFER (1 * MiB)
34 #define BLOCK_COPY_MAX_MEM (128 * MiB)
35 #define BLOCK_COPY_MAX_WORKERS 64
36 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
37 #define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16)
39 typedef enum {
40 COPY_READ_WRITE_CLUSTER,
41 COPY_READ_WRITE,
42 COPY_WRITE_ZEROES,
43 COPY_RANGE_SMALL,
44 COPY_RANGE_FULL
45 } BlockCopyMethod;
47 static coroutine_fn int block_copy_task_entry(AioTask *task);
49 typedef struct BlockCopyCallState {
50 /* Fields initialized in block_copy_async() and never changed. */
51 BlockCopyState *s;
52 int64_t offset;
53 int64_t bytes;
54 int max_workers;
55 int64_t max_chunk;
56 bool ignore_ratelimit;
57 BlockCopyAsyncCallbackFunc cb;
58 void *cb_opaque;
59 /* Coroutine where async block-copy is running */
60 Coroutine *co;
62 /* Fields whose state changes throughout the execution */
63 bool finished; /* atomic */
64 QemuCoSleep sleep; /* TODO: protect API with a lock */
65 bool cancelled; /* atomic */
66 /* To reference all call states from BlockCopyState */
67 QLIST_ENTRY(BlockCopyCallState) list;
70 * Fields that report information about return values and erros.
71 * Protected by lock in BlockCopyState.
73 bool error_is_read;
75 * @ret is set concurrently by tasks under mutex. Only set once by first
76 * failed task (and untouched if no task failed).
77 * After finishing (call_state->finished is true), it is not modified
78 * anymore and may be safely read without mutex.
80 int ret;
81 } BlockCopyCallState;
83 typedef struct BlockCopyTask {
84 AioTask task;
87 * Fields initialized in block_copy_task_create()
88 * and never changed.
90 BlockCopyState *s;
91 BlockCopyCallState *call_state;
93 * @method can also be set again in the while loop of
94 * block_copy_dirty_clusters(), but it is never accessed concurrently
95 * because the only other function that reads it is
96 * block_copy_task_entry() and it is invoked afterwards in the same
97 * iteration.
99 BlockCopyMethod method;
102 * Generally, req is protected by lock in BlockCopyState, Still req.offset
103 * is only set on task creation, so may be read concurrently after creation.
104 * req.bytes is changed at most once, and need only protecting the case of
105 * parallel read while updating @bytes value in block_copy_task_shrink().
107 BlockReq req;
108 } BlockCopyTask;
110 static int64_t task_end(BlockCopyTask *task)
112 return task->req.offset + task->req.bytes;
115 typedef struct BlockCopyState {
117 * BdrvChild objects are not owned or managed by block-copy. They are
118 * provided by block-copy user and user is responsible for appropriate
119 * permissions on these children.
121 BdrvChild *source;
122 BdrvChild *target;
125 * Fields initialized in block_copy_state_new()
126 * and never changed.
128 int64_t cluster_size;
129 int64_t max_transfer;
130 uint64_t len;
131 BdrvRequestFlags write_flags;
134 * Fields whose state changes throughout the execution
135 * Protected by lock.
137 CoMutex lock;
138 int64_t in_flight_bytes;
139 BlockCopyMethod method;
140 BlockReqList reqs;
141 QLIST_HEAD(, BlockCopyCallState) calls;
143 * skip_unallocated:
145 * Used by sync=top jobs, which first scan the source node for unallocated
146 * areas and clear them in the copy_bitmap. During this process, the bitmap
147 * is thus not fully initialized: It may still have bits set for areas that
148 * are unallocated and should actually not be copied.
150 * This is indicated by skip_unallocated.
152 * In this case, block_copy() will query the source’s allocation status,
153 * skip unallocated regions, clear them in the copy_bitmap, and invoke
154 * block_copy_reset_unallocated() every time it does.
156 bool skip_unallocated; /* atomic */
157 /* State fields that use a thread-safe API */
158 BdrvDirtyBitmap *copy_bitmap;
159 ProgressMeter *progress;
160 SharedResource *mem;
161 RateLimit rate_limit;
162 } BlockCopyState;
164 /* Called with lock held */
165 static int64_t block_copy_chunk_size(BlockCopyState *s)
167 switch (s->method) {
168 case COPY_READ_WRITE_CLUSTER:
169 return s->cluster_size;
170 case COPY_READ_WRITE:
171 case COPY_RANGE_SMALL:
172 return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER),
173 s->max_transfer);
174 case COPY_RANGE_FULL:
175 return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
176 s->max_transfer);
177 default:
178 /* Cannot have COPY_WRITE_ZEROES here. */
179 abort();
184 * Search for the first dirty area in offset/bytes range and create task at
185 * the beginning of it.
187 static coroutine_fn BlockCopyTask *
188 block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
189 int64_t offset, int64_t bytes)
191 BlockCopyTask *task;
192 int64_t max_chunk;
194 QEMU_LOCK_GUARD(&s->lock);
195 max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk);
196 if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
197 offset, offset + bytes,
198 max_chunk, &offset, &bytes))
200 return NULL;
203 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
204 bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
206 /* region is dirty, so no existent tasks possible in it */
207 assert(!reqlist_find_conflict(&s->reqs, offset, bytes));
209 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
210 s->in_flight_bytes += bytes;
212 task = g_new(BlockCopyTask, 1);
213 *task = (BlockCopyTask) {
214 .task.func = block_copy_task_entry,
215 .s = s,
216 .call_state = call_state,
217 .method = s->method,
219 reqlist_init_req(&s->reqs, &task->req, offset, bytes);
221 return task;
225 * block_copy_task_shrink
227 * Drop the tail of the task to be handled later. Set dirty bits back and
228 * wake up all tasks waiting for us (may be some of them are not intersecting
229 * with shrunk task)
231 static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
232 int64_t new_bytes)
234 QEMU_LOCK_GUARD(&task->s->lock);
235 if (new_bytes == task->req.bytes) {
236 return;
239 assert(new_bytes > 0 && new_bytes < task->req.bytes);
241 task->s->in_flight_bytes -= task->req.bytes - new_bytes;
242 bdrv_set_dirty_bitmap(task->s->copy_bitmap,
243 task->req.offset + new_bytes,
244 task->req.bytes - new_bytes);
246 reqlist_shrink_req(&task->req, new_bytes);
249 static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
251 QEMU_LOCK_GUARD(&task->s->lock);
252 task->s->in_flight_bytes -= task->req.bytes;
253 if (ret < 0) {
254 bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->req.offset,
255 task->req.bytes);
257 if (task->s->progress) {
258 progress_set_remaining(task->s->progress,
259 bdrv_get_dirty_count(task->s->copy_bitmap) +
260 task->s->in_flight_bytes);
262 reqlist_remove_req(&task->req);
265 void block_copy_state_free(BlockCopyState *s)
267 if (!s) {
268 return;
271 ratelimit_destroy(&s->rate_limit);
272 bdrv_release_dirty_bitmap(s->copy_bitmap);
273 shres_destroy(s->mem);
274 g_free(s);
277 static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
279 return MIN_NON_ZERO(INT_MAX,
280 MIN_NON_ZERO(source->bs->bl.max_transfer,
281 target->bs->bl.max_transfer));
284 void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
285 bool compress)
287 /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */
288 s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) |
289 (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
291 if (s->max_transfer < s->cluster_size) {
293 * copy_range does not respect max_transfer. We don't want to bother
294 * with requests smaller than block-copy cluster size, so fallback to
295 * buffered copying (read and write respect max_transfer on their
296 * behalf).
298 s->method = COPY_READ_WRITE_CLUSTER;
299 } else if (compress) {
300 /* Compression supports only cluster-size writes and no copy-range. */
301 s->method = COPY_READ_WRITE_CLUSTER;
302 } else {
304 * If copy range enabled, start with COPY_RANGE_SMALL, until first
305 * successful copy_range (look at block_copy_do_copy).
307 s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
311 static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
312 Error **errp)
314 int ret;
315 BlockDriverInfo bdi;
316 bool target_does_cow = bdrv_backing_chain_next(target);
319 * If there is no backing file on the target, we cannot rely on COW if our
320 * backup cluster size is smaller than the target cluster size. Even for
321 * targets with a backing file, try to avoid COW if possible.
323 ret = bdrv_get_info(target, &bdi);
324 if (ret == -ENOTSUP && !target_does_cow) {
325 /* Cluster size is not defined */
326 warn_report("The target block device doesn't provide "
327 "information about the block size and it doesn't have a "
328 "backing file. The default block size of %u bytes is "
329 "used. If the actual block size of the target exceeds "
330 "this default, the backup may be unusable",
331 BLOCK_COPY_CLUSTER_SIZE_DEFAULT);
332 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
333 } else if (ret < 0 && !target_does_cow) {
334 error_setg_errno(errp, -ret,
335 "Couldn't determine the cluster size of the target image, "
336 "which has no backing file");
337 error_append_hint(errp,
338 "Aborting, since this may create an unusable destination image\n");
339 return ret;
340 } else if (ret < 0 && target_does_cow) {
341 /* Not fatal; just trudge on ahead. */
342 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
345 return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
348 BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
349 const BdrvDirtyBitmap *bitmap,
350 Error **errp)
352 ERRP_GUARD();
353 BlockCopyState *s;
354 int64_t cluster_size;
355 BdrvDirtyBitmap *copy_bitmap;
356 bool is_fleecing;
358 cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
359 if (cluster_size < 0) {
360 return NULL;
363 copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
364 errp);
365 if (!copy_bitmap) {
366 return NULL;
368 bdrv_disable_dirty_bitmap(copy_bitmap);
369 if (bitmap) {
370 if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) {
371 error_prepend(errp, "Failed to merge bitmap '%s' to internal "
372 "copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap));
373 bdrv_release_dirty_bitmap(copy_bitmap);
374 return NULL;
376 } else {
377 bdrv_set_dirty_bitmap(copy_bitmap, 0,
378 bdrv_dirty_bitmap_size(copy_bitmap));
382 * If source is in backing chain of target assume that target is going to be
383 * used for "image fleecing", i.e. it should represent a kind of snapshot of
384 * source at backup-start point in time. And target is going to be read by
385 * somebody (for example, used as NBD export) during backup job.
387 * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
388 * intersection of backup writes and third party reads from target,
389 * otherwise reading from target we may occasionally read already updated by
390 * guest data.
392 * For more information see commit f8d59dfb40bb and test
393 * tests/qemu-iotests/222
395 is_fleecing = bdrv_chain_contains(target->bs, source->bs);
397 s = g_new(BlockCopyState, 1);
398 *s = (BlockCopyState) {
399 .source = source,
400 .target = target,
401 .copy_bitmap = copy_bitmap,
402 .cluster_size = cluster_size,
403 .len = bdrv_dirty_bitmap_size(copy_bitmap),
404 .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0),
405 .mem = shres_create(BLOCK_COPY_MAX_MEM),
406 .max_transfer = QEMU_ALIGN_DOWN(
407 block_copy_max_transfer(source, target),
408 cluster_size),
411 block_copy_set_copy_opts(s, false, false);
413 ratelimit_init(&s->rate_limit);
414 qemu_co_mutex_init(&s->lock);
415 QLIST_INIT(&s->reqs);
416 QLIST_INIT(&s->calls);
418 return s;
421 /* Only set before running the job, no need for locking. */
422 void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
424 s->progress = pm;
428 * Takes ownership of @task
430 * If pool is NULL directly run the task, otherwise schedule it into the pool.
432 * Returns: task.func return code if pool is NULL
433 * otherwise -ECANCELED if pool status is bad
434 * otherwise 0 (successfully scheduled)
436 static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
437 BlockCopyTask *task)
439 if (!pool) {
440 int ret = task->task.func(&task->task);
442 g_free(task);
443 return ret;
446 aio_task_pool_wait_slot(pool);
447 if (aio_task_pool_status(pool) < 0) {
448 co_put_to_shres(task->s->mem, task->req.bytes);
449 block_copy_task_end(task, -ECANCELED);
450 g_free(task);
451 return -ECANCELED;
454 aio_task_pool_start_task(pool, &task->task);
456 return 0;
460 * block_copy_do_copy
462 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
463 * s->len only to cover last cluster when s->len is not aligned to clusters.
465 * No sync here: nor bitmap neighter intersecting requests handling, only copy.
467 * @method is an in-out argument, so that copy_range can be either extended to
468 * a full-size buffer or disabled if the copy_range attempt fails. The output
469 * value of @method should be used for subsequent tasks.
470 * Returns 0 on success.
472 static int coroutine_fn GRAPH_RDLOCK
473 block_copy_do_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
474 BlockCopyMethod *method, bool *error_is_read)
476 int ret;
477 int64_t nbytes = MIN(offset + bytes, s->len) - offset;
478 void *bounce_buffer = NULL;
480 assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
481 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
482 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
483 assert(offset < s->len);
484 assert(offset + bytes <= s->len ||
485 offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
486 assert(nbytes < INT_MAX);
488 switch (*method) {
489 case COPY_WRITE_ZEROES:
490 ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
491 ~BDRV_REQ_WRITE_COMPRESSED);
492 if (ret < 0) {
493 trace_block_copy_write_zeroes_fail(s, offset, ret);
494 *error_is_read = false;
496 return ret;
498 case COPY_RANGE_SMALL:
499 case COPY_RANGE_FULL:
500 ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
501 0, s->write_flags);
502 if (ret >= 0) {
503 /* Successful copy-range, increase chunk size. */
504 *method = COPY_RANGE_FULL;
505 return 0;
508 trace_block_copy_copy_range_fail(s, offset, ret);
509 *method = COPY_READ_WRITE;
510 /* Fall through to read+write with allocated buffer */
512 case COPY_READ_WRITE_CLUSTER:
513 case COPY_READ_WRITE:
515 * In case of failed copy_range request above, we may proceed with
516 * buffered request larger than BLOCK_COPY_MAX_BUFFER.
517 * Still, further requests will be properly limited, so don't care too
518 * much. Moreover the most likely case (copy_range is unsupported for
519 * the configuration, so the very first copy_range request fails)
520 * is handled by setting large copy_size only after first successful
521 * copy_range.
524 bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
526 ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
527 if (ret < 0) {
528 trace_block_copy_read_fail(s, offset, ret);
529 *error_is_read = true;
530 goto out;
533 ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
534 s->write_flags);
535 if (ret < 0) {
536 trace_block_copy_write_fail(s, offset, ret);
537 *error_is_read = false;
538 goto out;
541 out:
542 qemu_vfree(bounce_buffer);
543 break;
545 default:
546 abort();
549 return ret;
552 static coroutine_fn int block_copy_task_entry(AioTask *task)
554 BlockCopyTask *t = container_of(task, BlockCopyTask, task);
555 BlockCopyState *s = t->s;
556 bool error_is_read = false;
557 BlockCopyMethod method = t->method;
558 int ret;
560 WITH_GRAPH_RDLOCK_GUARD() {
561 ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method,
562 &error_is_read);
565 WITH_QEMU_LOCK_GUARD(&s->lock) {
566 if (s->method == t->method) {
567 s->method = method;
570 if (ret < 0) {
571 if (!t->call_state->ret) {
572 t->call_state->ret = ret;
573 t->call_state->error_is_read = error_is_read;
575 } else if (s->progress) {
576 progress_work_done(s->progress, t->req.bytes);
579 co_put_to_shres(s->mem, t->req.bytes);
580 block_copy_task_end(t, ret);
582 return ret;
585 static coroutine_fn GRAPH_RDLOCK
586 int block_copy_block_status(BlockCopyState *s, int64_t offset, int64_t bytes,
587 int64_t *pnum)
589 int64_t num;
590 BlockDriverState *base;
591 int ret;
593 if (qatomic_read(&s->skip_unallocated)) {
594 base = bdrv_backing_chain_next(s->source->bs);
595 } else {
596 base = NULL;
599 ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num,
600 NULL, NULL);
601 if (ret < 0 || num < s->cluster_size) {
603 * On error or if failed to obtain large enough chunk just fallback to
604 * copy one cluster.
606 num = s->cluster_size;
607 ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
608 } else if (offset + num == s->len) {
609 num = QEMU_ALIGN_UP(num, s->cluster_size);
610 } else {
611 num = QEMU_ALIGN_DOWN(num, s->cluster_size);
614 *pnum = num;
615 return ret;
619 * Check if the cluster starting at offset is allocated or not.
620 * return via pnum the number of contiguous clusters sharing this allocation.
622 static int coroutine_fn GRAPH_RDLOCK
623 block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
624 int64_t *pnum)
626 BlockDriverState *bs = s->source->bs;
627 int64_t count, total_count = 0;
628 int64_t bytes = s->len - offset;
629 int ret;
631 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
633 while (true) {
634 /* protected in backup_run() */
635 ret = bdrv_co_is_allocated(bs, offset, bytes, &count);
636 if (ret < 0) {
637 return ret;
640 total_count += count;
642 if (ret || count == 0) {
644 * ret: partial segment(s) are considered allocated.
645 * otherwise: unallocated tail is treated as an entire segment.
647 *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
648 return ret;
651 /* Unallocated segment(s) with uncertain following segment(s) */
652 if (total_count >= s->cluster_size) {
653 *pnum = total_count / s->cluster_size;
654 return 0;
657 offset += count;
658 bytes -= count;
662 void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes)
664 QEMU_LOCK_GUARD(&s->lock);
666 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
667 if (s->progress) {
668 progress_set_remaining(s->progress,
669 bdrv_get_dirty_count(s->copy_bitmap) +
670 s->in_flight_bytes);
675 * Reset bits in copy_bitmap starting at offset if they represent unallocated
676 * data in the image. May reset subsequent contiguous bits.
677 * @return 0 when the cluster at @offset was unallocated,
678 * 1 otherwise, and -ret on error.
680 int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
681 int64_t offset,
682 int64_t *count)
684 int ret;
685 int64_t clusters, bytes;
687 ret = block_copy_is_cluster_allocated(s, offset, &clusters);
688 if (ret < 0) {
689 return ret;
692 bytes = clusters * s->cluster_size;
694 if (!ret) {
695 block_copy_reset(s, offset, bytes);
698 *count = bytes;
699 return ret;
703 * block_copy_dirty_clusters
705 * Copy dirty clusters in @offset/@bytes range.
706 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
707 * clusters found and -errno on failure.
709 static int coroutine_fn GRAPH_RDLOCK
710 block_copy_dirty_clusters(BlockCopyCallState *call_state)
712 BlockCopyState *s = call_state->s;
713 int64_t offset = call_state->offset;
714 int64_t bytes = call_state->bytes;
716 int ret = 0;
717 bool found_dirty = false;
718 int64_t end = offset + bytes;
719 AioTaskPool *aio = NULL;
722 * block_copy() user is responsible for keeping source and target in same
723 * aio context
725 assert(bdrv_get_aio_context(s->source->bs) ==
726 bdrv_get_aio_context(s->target->bs));
728 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
729 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
731 while (bytes && aio_task_pool_status(aio) == 0 &&
732 !qatomic_read(&call_state->cancelled)) {
733 BlockCopyTask *task;
734 int64_t status_bytes;
736 task = block_copy_task_create(s, call_state, offset, bytes);
737 if (!task) {
738 /* No more dirty bits in the bitmap */
739 trace_block_copy_skip_range(s, offset, bytes);
740 break;
742 if (task->req.offset > offset) {
743 trace_block_copy_skip_range(s, offset, task->req.offset - offset);
746 found_dirty = true;
748 ret = block_copy_block_status(s, task->req.offset, task->req.bytes,
749 &status_bytes);
750 assert(ret >= 0); /* never fail */
751 if (status_bytes < task->req.bytes) {
752 block_copy_task_shrink(task, status_bytes);
754 if (qatomic_read(&s->skip_unallocated) &&
755 !(ret & BDRV_BLOCK_ALLOCATED)) {
756 block_copy_task_end(task, 0);
757 trace_block_copy_skip_range(s, task->req.offset, task->req.bytes);
758 offset = task_end(task);
759 bytes = end - offset;
760 g_free(task);
761 continue;
763 if (ret & BDRV_BLOCK_ZERO) {
764 task->method = COPY_WRITE_ZEROES;
767 if (!call_state->ignore_ratelimit) {
768 uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
769 if (ns > 0) {
770 block_copy_task_end(task, -EAGAIN);
771 g_free(task);
772 qemu_co_sleep_ns_wakeable(&call_state->sleep,
773 QEMU_CLOCK_REALTIME, ns);
774 continue;
778 ratelimit_calculate_delay(&s->rate_limit, task->req.bytes);
780 trace_block_copy_process(s, task->req.offset);
782 co_get_from_shres(s->mem, task->req.bytes);
784 offset = task_end(task);
785 bytes = end - offset;
787 if (!aio && bytes) {
788 aio = aio_task_pool_new(call_state->max_workers);
791 ret = block_copy_task_run(aio, task);
792 if (ret < 0) {
793 goto out;
797 out:
798 if (aio) {
799 aio_task_pool_wait_all(aio);
802 * We are not really interested in -ECANCELED returned from
803 * block_copy_task_run. If it fails, it means some task already failed
804 * for real reason, let's return first failure.
805 * Still, assert that we don't rewrite failure by success.
807 * Note: ret may be positive here because of block-status result.
809 assert(ret >= 0 || aio_task_pool_status(aio) < 0);
810 ret = aio_task_pool_status(aio);
812 aio_task_pool_free(aio);
815 return ret < 0 ? ret : found_dirty;
818 void block_copy_kick(BlockCopyCallState *call_state)
820 qemu_co_sleep_wake(&call_state->sleep);
824 * block_copy_common
826 * Copy requested region, accordingly to dirty bitmap.
827 * Collaborate with parallel block_copy requests: if they succeed it will help
828 * us. If they fail, we will retry not-copied regions. So, if we return error,
829 * it means that some I/O operation failed in context of _this_ block_copy call,
830 * not some parallel operation.
832 static int coroutine_fn GRAPH_RDLOCK
833 block_copy_common(BlockCopyCallState *call_state)
835 int ret;
836 BlockCopyState *s = call_state->s;
838 qemu_co_mutex_lock(&s->lock);
839 QLIST_INSERT_HEAD(&s->calls, call_state, list);
840 qemu_co_mutex_unlock(&s->lock);
842 do {
843 ret = block_copy_dirty_clusters(call_state);
845 if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
846 WITH_QEMU_LOCK_GUARD(&s->lock) {
848 * Check that there is no task we still need to
849 * wait to complete
851 ret = reqlist_wait_one(&s->reqs, call_state->offset,
852 call_state->bytes, &s->lock);
853 if (ret == 0) {
855 * No pending tasks, but check again the bitmap in this
856 * same critical section, since a task might have failed
857 * between this and the critical section in
858 * block_copy_dirty_clusters().
860 * reqlist_wait_one return value 0 also means that it
861 * didn't release the lock. So, we are still in the same
862 * critical section, not interrupted by any concurrent
863 * access to state.
865 ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap,
866 call_state->offset,
867 call_state->bytes) >= 0;
873 * We retry in two cases:
874 * 1. Some progress done
875 * Something was copied, which means that there were yield points
876 * and some new dirty bits may have appeared (due to failed parallel
877 * block-copy requests).
878 * 2. We have waited for some intersecting block-copy request
879 * It may have failed and produced new dirty bits.
881 } while (ret > 0 && !qatomic_read(&call_state->cancelled));
883 qatomic_store_release(&call_state->finished, true);
885 if (call_state->cb) {
886 call_state->cb(call_state->cb_opaque);
889 qemu_co_mutex_lock(&s->lock);
890 QLIST_REMOVE(call_state, list);
891 qemu_co_mutex_unlock(&s->lock);
893 return ret;
896 static void coroutine_fn block_copy_async_co_entry(void *opaque)
898 GRAPH_RDLOCK_GUARD();
899 block_copy_common(opaque);
902 int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
903 bool ignore_ratelimit, uint64_t timeout_ns,
904 BlockCopyAsyncCallbackFunc cb,
905 void *cb_opaque)
907 int ret;
908 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
910 *call_state = (BlockCopyCallState) {
911 .s = s,
912 .offset = start,
913 .bytes = bytes,
914 .ignore_ratelimit = ignore_ratelimit,
915 .max_workers = BLOCK_COPY_MAX_WORKERS,
916 .cb = cb,
917 .cb_opaque = cb_opaque,
920 ret = qemu_co_timeout(block_copy_async_co_entry, call_state, timeout_ns,
921 g_free);
922 if (ret < 0) {
923 assert(ret == -ETIMEDOUT);
924 block_copy_call_cancel(call_state);
925 /* call_state will be freed by running coroutine. */
926 return ret;
929 ret = call_state->ret;
930 g_free(call_state);
932 return ret;
935 BlockCopyCallState *block_copy_async(BlockCopyState *s,
936 int64_t offset, int64_t bytes,
937 int max_workers, int64_t max_chunk,
938 BlockCopyAsyncCallbackFunc cb,
939 void *cb_opaque)
941 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
943 *call_state = (BlockCopyCallState) {
944 .s = s,
945 .offset = offset,
946 .bytes = bytes,
947 .max_workers = max_workers,
948 .max_chunk = max_chunk,
949 .cb = cb,
950 .cb_opaque = cb_opaque,
952 .co = qemu_coroutine_create(block_copy_async_co_entry, call_state),
955 qemu_coroutine_enter(call_state->co);
957 return call_state;
960 void block_copy_call_free(BlockCopyCallState *call_state)
962 if (!call_state) {
963 return;
966 assert(qatomic_read(&call_state->finished));
967 g_free(call_state);
970 bool block_copy_call_finished(BlockCopyCallState *call_state)
972 return qatomic_read(&call_state->finished);
975 bool block_copy_call_succeeded(BlockCopyCallState *call_state)
977 return qatomic_load_acquire(&call_state->finished) &&
978 !qatomic_read(&call_state->cancelled) &&
979 call_state->ret == 0;
982 bool block_copy_call_failed(BlockCopyCallState *call_state)
984 return qatomic_load_acquire(&call_state->finished) &&
985 !qatomic_read(&call_state->cancelled) &&
986 call_state->ret < 0;
989 bool block_copy_call_cancelled(BlockCopyCallState *call_state)
991 return qatomic_read(&call_state->cancelled);
994 int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
996 assert(qatomic_load_acquire(&call_state->finished));
997 if (error_is_read) {
998 *error_is_read = call_state->error_is_read;
1000 return call_state->ret;
1004 * Note that cancelling and finishing are racy.
1005 * User can cancel a block-copy that is already finished.
1007 void block_copy_call_cancel(BlockCopyCallState *call_state)
1009 qatomic_set(&call_state->cancelled, true);
1010 block_copy_kick(call_state);
1013 BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
1015 return s->copy_bitmap;
1018 int64_t block_copy_cluster_size(BlockCopyState *s)
1020 return s->cluster_size;
1023 void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
1025 qatomic_set(&s->skip_unallocated, skip);
1028 void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
1030 ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
1033 * Note: it's good to kick all call states from here, but it should be done
1034 * only from a coroutine, to not crash if s->calls list changed while
1035 * entering one call. So for now, the only user of this function kicks its
1036 * only one call_state by hand.