2 * copy-before-write filter driver
4 * The driver performs Copy-Before-Write (CBW) operation: it is injected above
5 * some node, and before each write it copies _old_ data to the target node.
7 * Copyright (c) 2018-2021 Virtuozzo International GmbH.
10 * Sementsov-Ogievskiy Vladimir <vsementsov@virtuozzo.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program. If not, see <http://www.gnu.org/licenses/>.
26 #include "qemu/osdep.h"
27 #include "qapi/qmp/qjson.h"
29 #include "sysemu/block-backend.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "block/block_int.h"
33 #include "block/qdict.h"
34 #include "block/block-copy.h"
35 #include "block/dirty-bitmap.h"
37 #include "block/copy-before-write.h"
38 #include "block/reqlist.h"
40 #include "qapi/qapi-visit-block-core.h"
42 typedef struct BDRVCopyBeforeWriteState
{
45 OnCbwError on_cbw_error
;
46 uint32_t cbw_timeout_ns
;
49 * @lock: protects access to @access_bitmap, @done_bitmap and
55 * @access_bitmap: represents areas allowed for reading by fleecing user.
56 * Reading from non-dirty areas leads to -EACCES.
58 BdrvDirtyBitmap
*access_bitmap
;
61 * @done_bitmap: represents areas that was successfully copied to @target by
62 * copy-before-write operations.
64 BdrvDirtyBitmap
*done_bitmap
;
67 * @frozen_read_reqs: current read requests for fleecing user in bs->file
68 * node. These areas must not be rewritten by guest.
70 BlockReqList frozen_read_reqs
;
73 * @snapshot_error is normally zero. But on first copy-before-write failure
74 * when @on_cbw_error == ON_CBW_ERROR_BREAK_SNAPSHOT, @snapshot_error takes
75 * value of this error (<0). After that all in-flight and further
76 * snapshot-API requests will fail with that error.
79 } BDRVCopyBeforeWriteState
;
81 static int coroutine_fn GRAPH_RDLOCK
82 cbw_co_preadv(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
83 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
85 return bdrv_co_preadv(bs
->file
, offset
, bytes
, qiov
, flags
);
88 static void block_copy_cb(void *opaque
)
90 BlockDriverState
*bs
= opaque
;
92 bdrv_dec_in_flight(bs
);
96 * Do copy-before-write operation.
98 * On failure guest request must be failed too.
100 * On success, we also wait for all in-flight fleecing read requests in source
101 * node, and it's guaranteed that after cbw_do_copy_before_write() successful
102 * return there are no such requests and they will never appear.
104 static coroutine_fn
int cbw_do_copy_before_write(BlockDriverState
*bs
,
105 uint64_t offset
, uint64_t bytes
, BdrvRequestFlags flags
)
107 BDRVCopyBeforeWriteState
*s
= bs
->opaque
;
110 int64_t cluster_size
= block_copy_cluster_size(s
->bcs
);
112 if (flags
& BDRV_REQ_WRITE_UNCHANGED
) {
116 if (s
->snapshot_error
) {
120 off
= QEMU_ALIGN_DOWN(offset
, cluster_size
);
121 end
= QEMU_ALIGN_UP(offset
+ bytes
, cluster_size
);
124 * Increase in_flight, so that in case of timed-out block-copy, the
125 * remaining background block_copy() request (which can't be immediately
126 * cancelled by timeout) is presented in bs->in_flight. This way we are
127 * sure that on bs close() we'll previously wait for all timed-out but yet
128 * running block_copy calls.
130 bdrv_inc_in_flight(bs
);
131 ret
= block_copy(s
->bcs
, off
, end
- off
, true, s
->cbw_timeout_ns
,
133 if (ret
< 0 && s
->on_cbw_error
== ON_CBW_ERROR_BREAK_GUEST_WRITE
) {
137 WITH_QEMU_LOCK_GUARD(&s
->lock
) {
139 assert(s
->on_cbw_error
== ON_CBW_ERROR_BREAK_SNAPSHOT
);
140 if (!s
->snapshot_error
) {
141 s
->snapshot_error
= ret
;
144 bdrv_set_dirty_bitmap(s
->done_bitmap
, off
, end
- off
);
146 reqlist_wait_all(&s
->frozen_read_reqs
, off
, end
- off
, &s
->lock
);
152 static int coroutine_fn GRAPH_RDLOCK
153 cbw_co_pdiscard(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
155 int ret
= cbw_do_copy_before_write(bs
, offset
, bytes
, 0);
160 return bdrv_co_pdiscard(bs
->file
, offset
, bytes
);
163 static int coroutine_fn GRAPH_RDLOCK
164 cbw_co_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
165 BdrvRequestFlags flags
)
167 int ret
= cbw_do_copy_before_write(bs
, offset
, bytes
, flags
);
172 return bdrv_co_pwrite_zeroes(bs
->file
, offset
, bytes
, flags
);
175 static coroutine_fn GRAPH_RDLOCK
176 int cbw_co_pwritev(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
177 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
179 int ret
= cbw_do_copy_before_write(bs
, offset
, bytes
, flags
);
184 return bdrv_co_pwritev(bs
->file
, offset
, bytes
, qiov
, flags
);
187 static int coroutine_fn GRAPH_RDLOCK
cbw_co_flush(BlockDriverState
*bs
)
193 return bdrv_co_flush(bs
->file
->bs
);
197 * If @offset not accessible - return NULL.
199 * Otherwise, set @pnum to some bytes that accessible from @file (@file is set
200 * to bs->file or to s->target). Return newly allocated BlockReq object that
201 * should be than passed to cbw_snapshot_read_unlock().
203 * It's guaranteed that guest writes will not interact in the region until
204 * cbw_snapshot_read_unlock() called.
206 static BlockReq
* coroutine_fn GRAPH_RDLOCK
207 cbw_snapshot_read_lock(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
208 int64_t *pnum
, BdrvChild
**file
)
210 BDRVCopyBeforeWriteState
*s
= bs
->opaque
;
211 BlockReq
*req
= g_new(BlockReq
, 1);
214 QEMU_LOCK_GUARD(&s
->lock
);
216 if (s
->snapshot_error
) {
221 if (bdrv_dirty_bitmap_next_zero(s
->access_bitmap
, offset
, bytes
) != -1) {
226 done
= bdrv_dirty_bitmap_status(s
->done_bitmap
, offset
, bytes
, pnum
);
229 * Special invalid BlockReq, that is handled in
230 * cbw_snapshot_read_unlock(). We don't need to lock something to read
233 *req
= (BlockReq
) {.offset
= -1, .bytes
= -1};
236 reqlist_init_req(&s
->frozen_read_reqs
, req
, offset
, bytes
);
243 static coroutine_fn
void
244 cbw_snapshot_read_unlock(BlockDriverState
*bs
, BlockReq
*req
)
246 BDRVCopyBeforeWriteState
*s
= bs
->opaque
;
248 if (req
->offset
== -1 && req
->bytes
== -1) {
253 QEMU_LOCK_GUARD(&s
->lock
);
255 reqlist_remove_req(req
);
259 static int coroutine_fn GRAPH_RDLOCK
260 cbw_co_preadv_snapshot(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
261 QEMUIOVector
*qiov
, size_t qiov_offset
)
267 /* TODO: upgrade to async loop using AioTask */
271 req
= cbw_snapshot_read_lock(bs
, offset
, bytes
, &cur_bytes
, &file
);
276 ret
= bdrv_co_preadv_part(file
, offset
, cur_bytes
,
277 qiov
, qiov_offset
, 0);
278 cbw_snapshot_read_unlock(bs
, req
);
285 qiov_offset
+= cur_bytes
;
291 static int coroutine_fn GRAPH_RDLOCK
292 cbw_co_snapshot_block_status(BlockDriverState
*bs
,
293 bool want_zero
, int64_t offset
, int64_t bytes
,
294 int64_t *pnum
, int64_t *map
,
295 BlockDriverState
**file
)
297 BDRVCopyBeforeWriteState
*s
= bs
->opaque
;
303 req
= cbw_snapshot_read_lock(bs
, offset
, bytes
, &cur_bytes
, &child
);
308 ret
= bdrv_co_block_status(child
->bs
, offset
, cur_bytes
, pnum
, map
, file
);
309 if (child
== s
->target
) {
311 * We refer to s->target only for areas that we've written to it.
312 * And we can not report unallocated blocks in s->target: this will
313 * break generic block-status-above logic, that will go to
314 * copy-before-write filtered child in this case.
316 assert(ret
& BDRV_BLOCK_ALLOCATED
);
319 cbw_snapshot_read_unlock(bs
, req
);
324 static int coroutine_fn GRAPH_RDLOCK
325 cbw_co_pdiscard_snapshot(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
327 BDRVCopyBeforeWriteState
*s
= bs
->opaque
;
329 WITH_QEMU_LOCK_GUARD(&s
->lock
) {
330 bdrv_reset_dirty_bitmap(s
->access_bitmap
, offset
, bytes
);
333 block_copy_reset(s
->bcs
, offset
, bytes
);
335 return bdrv_co_pdiscard(s
->target
, offset
, bytes
);
338 static void GRAPH_RDLOCK
cbw_refresh_filename(BlockDriverState
*bs
)
340 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
341 bs
->file
->bs
->filename
);
344 static void GRAPH_RDLOCK
345 cbw_child_perm(BlockDriverState
*bs
, BdrvChild
*c
, BdrvChildRole role
,
346 BlockReopenQueue
*reopen_queue
,
347 uint64_t perm
, uint64_t shared
,
348 uint64_t *nperm
, uint64_t *nshared
)
350 if (!(role
& BDRV_CHILD_FILTERED
)) {
354 * Share write to target (child_file), to not interfere
355 * with guest writes to its disk which may be in target backing chain.
356 * Can't resize during a backup block job because we check the size
359 *nshared
= BLK_PERM_ALL
& ~BLK_PERM_RESIZE
;
360 *nperm
= BLK_PERM_WRITE
;
363 bdrv_default_perms(bs
, c
, role
, reopen_queue
,
364 perm
, shared
, nperm
, nshared
);
366 if (!QLIST_EMPTY(&bs
->parents
)) {
367 if (perm
& BLK_PERM_WRITE
) {
368 *nperm
= *nperm
| BLK_PERM_CONSISTENT_READ
;
370 *nshared
&= ~(BLK_PERM_WRITE
| BLK_PERM_RESIZE
);
375 static BlockdevOptions
*cbw_parse_options(QDict
*options
, Error
**errp
)
377 BlockdevOptions
*opts
= NULL
;
380 qdict_put_str(options
, "driver", "copy-before-write");
382 v
= qobject_input_visitor_new_flat_confused(options
, errp
);
387 visit_type_BlockdevOptions(v
, NULL
, &opts
, errp
);
393 * Delete options which we are going to parse through BlockdevOptions
394 * object for original options.
396 qdict_extract_subqdict(options
, NULL
, "bitmap");
397 qdict_del(options
, "on-cbw-error");
398 qdict_del(options
, "cbw-timeout");
402 qdict_del(options
, "driver");
407 static int cbw_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
410 BDRVCopyBeforeWriteState
*s
= bs
->opaque
;
411 BdrvDirtyBitmap
*bitmap
= NULL
;
412 int64_t cluster_size
;
413 g_autoptr(BlockdevOptions
) full_opts
= NULL
;
414 BlockdevOptionsCbw
*opts
;
418 full_opts
= cbw_parse_options(options
, errp
);
422 assert(full_opts
->driver
== BLOCKDEV_DRIVER_COPY_BEFORE_WRITE
);
423 opts
= &full_opts
->u
.copy_before_write
;
425 ret
= bdrv_open_file_child(NULL
, options
, "file", bs
, errp
);
430 s
->target
= bdrv_open_child(NULL
, options
, "target", bs
, &child_of_bds
,
431 BDRV_CHILD_DATA
, false, errp
);
436 GRAPH_RDLOCK_GUARD_MAINLOOP();
438 ctx
= bdrv_get_aio_context(bs
);
439 aio_context_acquire(ctx
);
442 bitmap
= block_dirty_bitmap_lookup(opts
->bitmap
->node
,
443 opts
->bitmap
->name
, NULL
, errp
);
449 s
->on_cbw_error
= opts
->has_on_cbw_error
? opts
->on_cbw_error
:
450 ON_CBW_ERROR_BREAK_GUEST_WRITE
;
451 s
->cbw_timeout_ns
= opts
->has_cbw_timeout
?
452 opts
->cbw_timeout
* NANOSECONDS_PER_SECOND
: 0;
454 bs
->total_sectors
= bs
->file
->bs
->total_sectors
;
455 bs
->supported_write_flags
= BDRV_REQ_WRITE_UNCHANGED
|
456 (BDRV_REQ_FUA
& bs
->file
->bs
->supported_write_flags
);
457 bs
->supported_zero_flags
= BDRV_REQ_WRITE_UNCHANGED
|
458 ((BDRV_REQ_FUA
| BDRV_REQ_MAY_UNMAP
| BDRV_REQ_NO_FALLBACK
) &
459 bs
->file
->bs
->supported_zero_flags
);
461 s
->bcs
= block_copy_state_new(bs
->file
, s
->target
, bitmap
, errp
);
463 error_prepend(errp
, "Cannot create block-copy-state: ");
468 cluster_size
= block_copy_cluster_size(s
->bcs
);
470 s
->done_bitmap
= bdrv_create_dirty_bitmap(bs
, cluster_size
, NULL
, errp
);
471 if (!s
->done_bitmap
) {
475 bdrv_disable_dirty_bitmap(s
->done_bitmap
);
477 /* s->access_bitmap starts equal to bcs bitmap */
478 s
->access_bitmap
= bdrv_create_dirty_bitmap(bs
, cluster_size
, NULL
, errp
);
479 if (!s
->access_bitmap
) {
483 bdrv_disable_dirty_bitmap(s
->access_bitmap
);
484 bdrv_dirty_bitmap_merge_internal(s
->access_bitmap
,
485 block_copy_dirty_bitmap(s
->bcs
), NULL
,
488 qemu_co_mutex_init(&s
->lock
);
489 QLIST_INIT(&s
->frozen_read_reqs
);
493 aio_context_release(ctx
);
497 static void cbw_close(BlockDriverState
*bs
)
499 BDRVCopyBeforeWriteState
*s
= bs
->opaque
;
501 bdrv_release_dirty_bitmap(s
->access_bitmap
);
502 bdrv_release_dirty_bitmap(s
->done_bitmap
);
504 block_copy_state_free(s
->bcs
);
508 static BlockDriver bdrv_cbw_filter
= {
509 .format_name
= "copy-before-write",
510 .instance_size
= sizeof(BDRVCopyBeforeWriteState
),
512 .bdrv_open
= cbw_open
,
513 .bdrv_close
= cbw_close
,
515 .bdrv_co_preadv
= cbw_co_preadv
,
516 .bdrv_co_pwritev
= cbw_co_pwritev
,
517 .bdrv_co_pwrite_zeroes
= cbw_co_pwrite_zeroes
,
518 .bdrv_co_pdiscard
= cbw_co_pdiscard
,
519 .bdrv_co_flush
= cbw_co_flush
,
521 .bdrv_co_preadv_snapshot
= cbw_co_preadv_snapshot
,
522 .bdrv_co_pdiscard_snapshot
= cbw_co_pdiscard_snapshot
,
523 .bdrv_co_snapshot_block_status
= cbw_co_snapshot_block_status
,
525 .bdrv_refresh_filename
= cbw_refresh_filename
,
527 .bdrv_child_perm
= cbw_child_perm
,
532 BlockDriverState
*bdrv_cbw_append(BlockDriverState
*source
,
533 BlockDriverState
*target
,
534 const char *filter_node_name
,
535 BlockCopyState
**bcs
,
538 BDRVCopyBeforeWriteState
*state
;
539 BlockDriverState
*top
;
542 assert(source
->total_sectors
== target
->total_sectors
);
546 qdict_put_str(opts
, "driver", "copy-before-write");
547 if (filter_node_name
) {
548 qdict_put_str(opts
, "node-name", filter_node_name
);
550 qdict_put_str(opts
, "file", bdrv_get_node_name(source
));
551 qdict_put_str(opts
, "target", bdrv_get_node_name(target
));
553 top
= bdrv_insert_node(source
, opts
, BDRV_O_RDWR
, errp
);
564 void bdrv_cbw_drop(BlockDriverState
*bs
)
567 bdrv_drop_filter(bs
, &error_abort
);
571 static void cbw_init(void)
573 bdrv_register(&bdrv_cbw_filter
);
576 block_init(cbw_init
);