4 * Copyright (C) 2013 Proxmox Server Solutions
5 * Copyright (c) 2019 Virtuozzo International GmbH.
8 * Dietmar Maurer (dietmar@proxmox.com)
9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "block/block-copy.h"
20 #include "sysemu/block-backend.h"
21 #include "qemu/units.h"
22 #include "qemu/coroutine.h"
23 #include "block/aio_task.h"
25 #define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
26 #define BLOCK_COPY_MAX_BUFFER (1 * MiB)
27 #define BLOCK_COPY_MAX_MEM (128 * MiB)
28 #define BLOCK_COPY_MAX_WORKERS 64
29 #define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
31 static coroutine_fn
int block_copy_task_entry(AioTask
*task
);
33 typedef struct BlockCopyCallState
{
34 /* IN parameters. Initialized in block_copy_async() and never changed. */
40 bool ignore_ratelimit
;
41 BlockCopyAsyncCallbackFunc cb
;
44 /* Coroutine where async block-copy is running */
47 /* To reference all call states from BlockCopyState */
48 QLIST_ENTRY(BlockCopyCallState
) list
;
60 typedef struct BlockCopyTask
{
64 BlockCopyCallState
*call_state
;
69 QLIST_ENTRY(BlockCopyTask
) list
;
70 CoQueue wait_queue
; /* coroutines blocked on this task */
73 static int64_t task_end(BlockCopyTask
*task
)
75 return task
->offset
+ task
->bytes
;
78 typedef struct BlockCopyState
{
80 * BdrvChild objects are not owned or managed by block-copy. They are
81 * provided by block-copy user and user is responsible for appropriate
82 * permissions on these children.
86 BdrvDirtyBitmap
*copy_bitmap
;
87 int64_t in_flight_bytes
;
92 QLIST_HEAD(, BlockCopyTask
) tasks
; /* All tasks from all block-copy calls */
93 QLIST_HEAD(, BlockCopyCallState
) calls
;
95 BdrvRequestFlags write_flags
;
100 * Used by sync=top jobs, which first scan the source node for unallocated
101 * areas and clear them in the copy_bitmap. During this process, the bitmap
102 * is thus not fully initialized: It may still have bits set for areas that
103 * are unallocated and should actually not be copied.
105 * This is indicated by skip_unallocated.
107 * In this case, block_copy() will query the source’s allocation status,
108 * skip unallocated regions, clear them in the copy_bitmap, and invoke
109 * block_copy_reset_unallocated() every time it does.
111 bool skip_unallocated
;
113 ProgressMeter
*progress
;
117 RateLimit rate_limit
;
120 static BlockCopyTask
*find_conflicting_task(BlockCopyState
*s
,
121 int64_t offset
, int64_t bytes
)
125 QLIST_FOREACH(t
, &s
->tasks
, list
) {
126 if (offset
+ bytes
> t
->offset
&& offset
< t
->offset
+ t
->bytes
) {
135 * If there are no intersecting tasks return false. Otherwise, wait for the
136 * first found intersecting tasks to finish and return true.
138 static bool coroutine_fn
block_copy_wait_one(BlockCopyState
*s
, int64_t offset
,
141 BlockCopyTask
*task
= find_conflicting_task(s
, offset
, bytes
);
147 qemu_co_queue_wait(&task
->wait_queue
, NULL
);
153 * Search for the first dirty area in offset/bytes range and create task at
154 * the beginning of it.
156 static BlockCopyTask
*block_copy_task_create(BlockCopyState
*s
,
157 BlockCopyCallState
*call_state
,
158 int64_t offset
, int64_t bytes
)
161 int64_t max_chunk
= MIN_NON_ZERO(s
->copy_size
, call_state
->max_chunk
);
163 if (!bdrv_dirty_bitmap_next_dirty_area(s
->copy_bitmap
,
164 offset
, offset
+ bytes
,
165 max_chunk
, &offset
, &bytes
))
170 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
171 bytes
= QEMU_ALIGN_UP(bytes
, s
->cluster_size
);
173 /* region is dirty, so no existent tasks possible in it */
174 assert(!find_conflicting_task(s
, offset
, bytes
));
176 bdrv_reset_dirty_bitmap(s
->copy_bitmap
, offset
, bytes
);
177 s
->in_flight_bytes
+= bytes
;
179 task
= g_new(BlockCopyTask
, 1);
180 *task
= (BlockCopyTask
) {
181 .task
.func
= block_copy_task_entry
,
183 .call_state
= call_state
,
186 .copy_range
= s
->use_copy_range
,
188 qemu_co_queue_init(&task
->wait_queue
);
189 QLIST_INSERT_HEAD(&s
->tasks
, task
, list
);
195 * block_copy_task_shrink
197 * Drop the tail of the task to be handled later. Set dirty bits back and
198 * wake up all tasks waiting for us (may be some of them are not intersecting
201 static void coroutine_fn
block_copy_task_shrink(BlockCopyTask
*task
,
204 if (new_bytes
== task
->bytes
) {
208 assert(new_bytes
> 0 && new_bytes
< task
->bytes
);
210 task
->s
->in_flight_bytes
-= task
->bytes
- new_bytes
;
211 bdrv_set_dirty_bitmap(task
->s
->copy_bitmap
,
212 task
->offset
+ new_bytes
, task
->bytes
- new_bytes
);
214 task
->bytes
= new_bytes
;
215 qemu_co_queue_restart_all(&task
->wait_queue
);
218 static void coroutine_fn
block_copy_task_end(BlockCopyTask
*task
, int ret
)
220 task
->s
->in_flight_bytes
-= task
->bytes
;
222 bdrv_set_dirty_bitmap(task
->s
->copy_bitmap
, task
->offset
, task
->bytes
);
224 QLIST_REMOVE(task
, list
);
225 qemu_co_queue_restart_all(&task
->wait_queue
);
228 void block_copy_state_free(BlockCopyState
*s
)
234 ratelimit_destroy(&s
->rate_limit
);
235 bdrv_release_dirty_bitmap(s
->copy_bitmap
);
236 shres_destroy(s
->mem
);
240 static uint32_t block_copy_max_transfer(BdrvChild
*source
, BdrvChild
*target
)
242 return MIN_NON_ZERO(INT_MAX
,
243 MIN_NON_ZERO(source
->bs
->bl
.max_transfer
,
244 target
->bs
->bl
.max_transfer
));
247 BlockCopyState
*block_copy_state_new(BdrvChild
*source
, BdrvChild
*target
,
248 int64_t cluster_size
, bool use_copy_range
,
249 BdrvRequestFlags write_flags
, Error
**errp
)
252 BdrvDirtyBitmap
*copy_bitmap
;
254 copy_bitmap
= bdrv_create_dirty_bitmap(source
->bs
, cluster_size
, NULL
,
259 bdrv_disable_dirty_bitmap(copy_bitmap
);
261 s
= g_new(BlockCopyState
, 1);
262 *s
= (BlockCopyState
) {
265 .copy_bitmap
= copy_bitmap
,
266 .cluster_size
= cluster_size
,
267 .len
= bdrv_dirty_bitmap_size(copy_bitmap
),
268 .write_flags
= write_flags
,
269 .mem
= shres_create(BLOCK_COPY_MAX_MEM
),
272 if (block_copy_max_transfer(source
, target
) < cluster_size
) {
274 * copy_range does not respect max_transfer. We don't want to bother
275 * with requests smaller than block-copy cluster size, so fallback to
276 * buffered copying (read and write respect max_transfer on their
279 s
->use_copy_range
= false;
280 s
->copy_size
= cluster_size
;
281 } else if (write_flags
& BDRV_REQ_WRITE_COMPRESSED
) {
282 /* Compression supports only cluster-size writes and no copy-range. */
283 s
->use_copy_range
= false;
284 s
->copy_size
= cluster_size
;
287 * We enable copy-range, but keep small copy_size, until first
288 * successful copy_range (look at block_copy_do_copy).
290 s
->use_copy_range
= use_copy_range
;
291 s
->copy_size
= MAX(s
->cluster_size
, BLOCK_COPY_MAX_BUFFER
);
294 ratelimit_init(&s
->rate_limit
);
295 QLIST_INIT(&s
->tasks
);
296 QLIST_INIT(&s
->calls
);
301 void block_copy_set_progress_meter(BlockCopyState
*s
, ProgressMeter
*pm
)
307 * Takes ownership of @task
309 * If pool is NULL directly run the task, otherwise schedule it into the pool.
311 * Returns: task.func return code if pool is NULL
312 * otherwise -ECANCELED if pool status is bad
313 * otherwise 0 (successfully scheduled)
315 static coroutine_fn
int block_copy_task_run(AioTaskPool
*pool
,
319 int ret
= task
->task
.func(&task
->task
);
325 aio_task_pool_wait_slot(pool
);
326 if (aio_task_pool_status(pool
) < 0) {
327 co_put_to_shres(task
->s
->mem
, task
->bytes
);
328 block_copy_task_end(task
, -ECANCELED
);
333 aio_task_pool_start_task(pool
, &task
->task
);
341 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
342 * s->len only to cover last cluster when s->len is not aligned to clusters.
344 * No sync here: nor bitmap neighter intersecting requests handling, only copy.
346 * @copy_range is an in-out argument: if *copy_range is false, copy_range is not
347 * done. If *copy_range is true, copy_range is attempted. If the copy_range
348 * attempt fails, the function falls back to the usual read+write and
349 * *copy_range is set to false. *copy_range and zeroes must not be true
352 * Returns 0 on success.
354 static int coroutine_fn
block_copy_do_copy(BlockCopyState
*s
,
355 int64_t offset
, int64_t bytes
,
356 bool zeroes
, bool *copy_range
,
360 int64_t nbytes
= MIN(offset
+ bytes
, s
->len
) - offset
;
361 void *bounce_buffer
= NULL
;
363 assert(offset
>= 0 && bytes
> 0 && INT64_MAX
- offset
>= bytes
);
364 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
365 assert(QEMU_IS_ALIGNED(bytes
, s
->cluster_size
));
366 assert(offset
< s
->len
);
367 assert(offset
+ bytes
<= s
->len
||
368 offset
+ bytes
== QEMU_ALIGN_UP(s
->len
, s
->cluster_size
));
369 assert(nbytes
< INT_MAX
);
370 assert(!(*copy_range
&& zeroes
));
373 ret
= bdrv_co_pwrite_zeroes(s
->target
, offset
, nbytes
, s
->write_flags
&
374 ~BDRV_REQ_WRITE_COMPRESSED
);
376 trace_block_copy_write_zeroes_fail(s
, offset
, ret
);
377 *error_is_read
= false;
383 ret
= bdrv_co_copy_range(s
->source
, offset
, s
->target
, offset
, nbytes
,
386 trace_block_copy_copy_range_fail(s
, offset
, ret
);
388 /* Fallback to read+write with allocated buffer */
395 * In case of failed copy_range request above, we may proceed with buffered
396 * request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
397 * be properly limited, so don't care too much. Moreover the most likely
398 * case (copy_range is unsupported for the configuration, so the very first
399 * copy_range request fails) is handled by setting large copy_size only
400 * after first successful copy_range.
403 bounce_buffer
= qemu_blockalign(s
->source
->bs
, nbytes
);
405 ret
= bdrv_co_pread(s
->source
, offset
, nbytes
, bounce_buffer
, 0);
407 trace_block_copy_read_fail(s
, offset
, ret
);
408 *error_is_read
= true;
412 ret
= bdrv_co_pwrite(s
->target
, offset
, nbytes
, bounce_buffer
,
415 trace_block_copy_write_fail(s
, offset
, ret
);
416 *error_is_read
= false;
421 qemu_vfree(bounce_buffer
);
426 static void block_copy_handle_copy_range_result(BlockCopyState
*s
,
429 if (!s
->use_copy_range
) {
430 /* already disabled */
436 * Successful copy-range. Now increase copy_size. copy_range
437 * does not respect max_transfer (it's a TODO), so we factor
441 MIN(MAX(s
->cluster_size
, BLOCK_COPY_MAX_COPY_RANGE
),
442 QEMU_ALIGN_DOWN(block_copy_max_transfer(s
->source
,
446 /* Copy-range failed, disable it. */
447 s
->use_copy_range
= false;
448 s
->copy_size
= MAX(s
->cluster_size
, BLOCK_COPY_MAX_BUFFER
);
452 static coroutine_fn
int block_copy_task_entry(AioTask
*task
)
454 BlockCopyTask
*t
= container_of(task
, BlockCopyTask
, task
);
455 bool error_is_read
= false;
456 bool copy_range
= t
->copy_range
;
459 ret
= block_copy_do_copy(t
->s
, t
->offset
, t
->bytes
, t
->zeroes
,
460 ©_range
, &error_is_read
);
462 block_copy_handle_copy_range_result(t
->s
, copy_range
);
465 if (!t
->call_state
->ret
) {
466 t
->call_state
->ret
= ret
;
467 t
->call_state
->error_is_read
= error_is_read
;
470 progress_work_done(t
->s
->progress
, t
->bytes
);
472 co_put_to_shres(t
->s
->mem
, t
->bytes
);
473 block_copy_task_end(t
, ret
);
478 static int block_copy_block_status(BlockCopyState
*s
, int64_t offset
,
479 int64_t bytes
, int64_t *pnum
)
482 BlockDriverState
*base
;
485 if (s
->skip_unallocated
) {
486 base
= bdrv_backing_chain_next(s
->source
->bs
);
491 ret
= bdrv_block_status_above(s
->source
->bs
, base
, offset
, bytes
, &num
,
493 if (ret
< 0 || num
< s
->cluster_size
) {
495 * On error or if failed to obtain large enough chunk just fallback to
498 num
= s
->cluster_size
;
499 ret
= BDRV_BLOCK_ALLOCATED
| BDRV_BLOCK_DATA
;
500 } else if (offset
+ num
== s
->len
) {
501 num
= QEMU_ALIGN_UP(num
, s
->cluster_size
);
503 num
= QEMU_ALIGN_DOWN(num
, s
->cluster_size
);
511 * Check if the cluster starting at offset is allocated or not.
512 * return via pnum the number of contiguous clusters sharing this allocation.
514 static int block_copy_is_cluster_allocated(BlockCopyState
*s
, int64_t offset
,
517 BlockDriverState
*bs
= s
->source
->bs
;
518 int64_t count
, total_count
= 0;
519 int64_t bytes
= s
->len
- offset
;
522 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
525 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &count
);
530 total_count
+= count
;
532 if (ret
|| count
== 0) {
534 * ret: partial segment(s) are considered allocated.
535 * otherwise: unallocated tail is treated as an entire segment.
537 *pnum
= DIV_ROUND_UP(total_count
, s
->cluster_size
);
541 /* Unallocated segment(s) with uncertain following segment(s) */
542 if (total_count
>= s
->cluster_size
) {
543 *pnum
= total_count
/ s
->cluster_size
;
553 * Reset bits in copy_bitmap starting at offset if they represent unallocated
554 * data in the image. May reset subsequent contiguous bits.
555 * @return 0 when the cluster at @offset was unallocated,
556 * 1 otherwise, and -ret on error.
558 int64_t block_copy_reset_unallocated(BlockCopyState
*s
,
559 int64_t offset
, int64_t *count
)
562 int64_t clusters
, bytes
;
564 ret
= block_copy_is_cluster_allocated(s
, offset
, &clusters
);
569 bytes
= clusters
* s
->cluster_size
;
572 bdrv_reset_dirty_bitmap(s
->copy_bitmap
, offset
, bytes
);
573 progress_set_remaining(s
->progress
,
574 bdrv_get_dirty_count(s
->copy_bitmap
) +
583 * block_copy_dirty_clusters
585 * Copy dirty clusters in @offset/@bytes range.
586 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
587 * clusters found and -errno on failure.
589 static int coroutine_fn
590 block_copy_dirty_clusters(BlockCopyCallState
*call_state
)
592 BlockCopyState
*s
= call_state
->s
;
593 int64_t offset
= call_state
->offset
;
594 int64_t bytes
= call_state
->bytes
;
597 bool found_dirty
= false;
598 int64_t end
= offset
+ bytes
;
599 AioTaskPool
*aio
= NULL
;
602 * block_copy() user is responsible for keeping source and target in same
605 assert(bdrv_get_aio_context(s
->source
->bs
) ==
606 bdrv_get_aio_context(s
->target
->bs
));
608 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
609 assert(QEMU_IS_ALIGNED(bytes
, s
->cluster_size
));
611 while (bytes
&& aio_task_pool_status(aio
) == 0 && !call_state
->cancelled
) {
613 int64_t status_bytes
;
615 task
= block_copy_task_create(s
, call_state
, offset
, bytes
);
617 /* No more dirty bits in the bitmap */
618 trace_block_copy_skip_range(s
, offset
, bytes
);
621 if (task
->offset
> offset
) {
622 trace_block_copy_skip_range(s
, offset
, task
->offset
- offset
);
627 ret
= block_copy_block_status(s
, task
->offset
, task
->bytes
,
629 assert(ret
>= 0); /* never fail */
630 if (status_bytes
< task
->bytes
) {
631 block_copy_task_shrink(task
, status_bytes
);
633 if (s
->skip_unallocated
&& !(ret
& BDRV_BLOCK_ALLOCATED
)) {
634 block_copy_task_end(task
, 0);
635 progress_set_remaining(s
->progress
,
636 bdrv_get_dirty_count(s
->copy_bitmap
) +
638 trace_block_copy_skip_range(s
, task
->offset
, task
->bytes
);
639 offset
= task_end(task
);
640 bytes
= end
- offset
;
644 if (ret
& BDRV_BLOCK_ZERO
) {
646 task
->copy_range
= false;
649 if (!call_state
->ignore_ratelimit
) {
650 uint64_t ns
= ratelimit_calculate_delay(&s
->rate_limit
, 0);
652 block_copy_task_end(task
, -EAGAIN
);
654 qemu_co_sleep_ns_wakeable(&call_state
->sleep
,
655 QEMU_CLOCK_REALTIME
, ns
);
660 ratelimit_calculate_delay(&s
->rate_limit
, task
->bytes
);
662 trace_block_copy_process(s
, task
->offset
);
664 co_get_from_shres(s
->mem
, task
->bytes
);
666 offset
= task_end(task
);
667 bytes
= end
- offset
;
670 aio
= aio_task_pool_new(call_state
->max_workers
);
673 ret
= block_copy_task_run(aio
, task
);
681 aio_task_pool_wait_all(aio
);
684 * We are not really interested in -ECANCELED returned from
685 * block_copy_task_run. If it fails, it means some task already failed
686 * for real reason, let's return first failure.
687 * Still, assert that we don't rewrite failure by success.
689 * Note: ret may be positive here because of block-status result.
691 assert(ret
>= 0 || aio_task_pool_status(aio
) < 0);
692 ret
= aio_task_pool_status(aio
);
694 aio_task_pool_free(aio
);
697 return ret
< 0 ? ret
: found_dirty
;
700 void block_copy_kick(BlockCopyCallState
*call_state
)
702 qemu_co_sleep_wake(&call_state
->sleep
);
708 * Copy requested region, accordingly to dirty bitmap.
709 * Collaborate with parallel block_copy requests: if they succeed it will help
710 * us. If they fail, we will retry not-copied regions. So, if we return error,
711 * it means that some I/O operation failed in context of _this_ block_copy call,
712 * not some parallel operation.
714 static int coroutine_fn
block_copy_common(BlockCopyCallState
*call_state
)
718 QLIST_INSERT_HEAD(&call_state
->s
->calls
, call_state
, list
);
721 ret
= block_copy_dirty_clusters(call_state
);
723 if (ret
== 0 && !call_state
->cancelled
) {
724 ret
= block_copy_wait_one(call_state
->s
, call_state
->offset
,
729 * We retry in two cases:
730 * 1. Some progress done
731 * Something was copied, which means that there were yield points
732 * and some new dirty bits may have appeared (due to failed parallel
733 * block-copy requests).
734 * 2. We have waited for some intersecting block-copy request
735 * It may have failed and produced new dirty bits.
737 } while (ret
> 0 && !call_state
->cancelled
);
739 call_state
->finished
= true;
741 if (call_state
->cb
) {
742 call_state
->cb(call_state
->cb_opaque
);
745 QLIST_REMOVE(call_state
, list
);
750 int coroutine_fn
block_copy(BlockCopyState
*s
, int64_t start
, int64_t bytes
,
751 bool ignore_ratelimit
)
753 BlockCopyCallState call_state
= {
757 .ignore_ratelimit
= ignore_ratelimit
,
758 .max_workers
= BLOCK_COPY_MAX_WORKERS
,
761 return block_copy_common(&call_state
);
764 static void coroutine_fn
block_copy_async_co_entry(void *opaque
)
766 block_copy_common(opaque
);
769 BlockCopyCallState
*block_copy_async(BlockCopyState
*s
,
770 int64_t offset
, int64_t bytes
,
771 int max_workers
, int64_t max_chunk
,
772 BlockCopyAsyncCallbackFunc cb
,
775 BlockCopyCallState
*call_state
= g_new(BlockCopyCallState
, 1);
777 *call_state
= (BlockCopyCallState
) {
781 .max_workers
= max_workers
,
782 .max_chunk
= max_chunk
,
784 .cb_opaque
= cb_opaque
,
786 .co
= qemu_coroutine_create(block_copy_async_co_entry
, call_state
),
789 qemu_coroutine_enter(call_state
->co
);
794 void block_copy_call_free(BlockCopyCallState
*call_state
)
800 assert(call_state
->finished
);
804 bool block_copy_call_finished(BlockCopyCallState
*call_state
)
806 return call_state
->finished
;
809 bool block_copy_call_succeeded(BlockCopyCallState
*call_state
)
811 return call_state
->finished
&& !call_state
->cancelled
&&
812 call_state
->ret
== 0;
815 bool block_copy_call_failed(BlockCopyCallState
*call_state
)
817 return call_state
->finished
&& !call_state
->cancelled
&&
821 bool block_copy_call_cancelled(BlockCopyCallState
*call_state
)
823 return call_state
->cancelled
;
826 int block_copy_call_status(BlockCopyCallState
*call_state
, bool *error_is_read
)
828 assert(call_state
->finished
);
830 *error_is_read
= call_state
->error_is_read
;
832 return call_state
->ret
;
835 void block_copy_call_cancel(BlockCopyCallState
*call_state
)
837 call_state
->cancelled
= true;
838 block_copy_kick(call_state
);
841 BdrvDirtyBitmap
*block_copy_dirty_bitmap(BlockCopyState
*s
)
843 return s
->copy_bitmap
;
846 void block_copy_set_skip_unallocated(BlockCopyState
*s
, bool skip
)
848 s
->skip_unallocated
= skip
;
851 void block_copy_set_speed(BlockCopyState
*s
, uint64_t speed
)
853 ratelimit_set_speed(&s
->rate_limit
, speed
, BLOCK_COPY_SLICE_TIME
);
856 * Note: it's good to kick all call states from here, but it should be done
857 * only from a coroutine, to not crash if s->calls list changed while
858 * entering one call. So for now, the only user of this function kicks its
859 * only one call_state by hand.