4 * Copyright Red Hat, Inc. 2012
7 * Jeff Cody <jcody@redhat.com>
8 * Based on stream.c by Stefan Hajnoczi
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
16 #include "block_int.h"
18 #include "qemu/ratelimit.h"
22 * Size of data buffer for populating the image file. This should be large
23 * enough to process multiple clusters in a single call, so that populating
24 * contiguous regions of the image is efficient.
26 COMMIT_BUFFER_SIZE
= 512 * 1024, /* in bytes */
29 #define SLICE_TIME 100000000ULL /* ns */
31 typedef struct CommitBlockJob
{
34 BlockDriverState
*active
;
35 BlockDriverState
*top
;
36 BlockDriverState
*base
;
37 BlockdevOnError on_error
;
39 int orig_overlay_flags
;
42 static int coroutine_fn
commit_populate(BlockDriverState
*bs
,
43 BlockDriverState
*base
,
44 int64_t sector_num
, int nb_sectors
,
49 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
54 ret
= bdrv_write(base
, sector_num
, buf
, nb_sectors
);
62 static void coroutine_fn
commit_run(void *opaque
)
64 CommitBlockJob
*s
= opaque
;
65 BlockDriverState
*active
= s
->active
;
66 BlockDriverState
*top
= s
->top
;
67 BlockDriverState
*base
= s
->base
;
68 BlockDriverState
*overlay_bs
= NULL
;
69 int64_t sector_num
, end
;
73 int bytes_written
= 0;
76 ret
= s
->common
.len
= bdrv_getlength(top
);
79 if (s
->common
.len
< 0) {
80 goto exit_restore_reopen
;
83 ret
= base_len
= bdrv_getlength(base
);
85 goto exit_restore_reopen
;
88 if (base_len
< s
->common
.len
) {
89 ret
= bdrv_truncate(base
, s
->common
.len
);
91 goto exit_restore_reopen
;
95 overlay_bs
= bdrv_find_overlay(active
, top
);
97 end
= s
->common
.len
>> BDRV_SECTOR_BITS
;
98 buf
= qemu_blockalign(top
, COMMIT_BUFFER_SIZE
);
100 for (sector_num
= 0; sector_num
< end
; sector_num
+= n
) {
101 uint64_t delay_ns
= 0;
105 /* Note that even when no rate limit is applied we need to yield
106 * with no pending I/O here so that qemu_aio_flush() returns.
108 block_job_sleep_ns(&s
->common
, rt_clock
, delay_ns
);
109 if (block_job_is_cancelled(&s
->common
)) {
112 /* Copy if allocated above the base */
113 ret
= bdrv_co_is_allocated_above(top
, base
, sector_num
,
114 COMMIT_BUFFER_SIZE
/ BDRV_SECTOR_SIZE
,
117 trace_commit_one_iteration(s
, sector_num
, n
, ret
);
119 if (s
->common
.speed
) {
120 delay_ns
= ratelimit_calculate_delay(&s
->limit
, n
);
125 ret
= commit_populate(top
, base
, sector_num
, n
, buf
);
126 bytes_written
+= n
* BDRV_SECTOR_SIZE
;
129 if (s
->on_error
== BLOCKDEV_ON_ERROR_STOP
||
130 s
->on_error
== BLOCKDEV_ON_ERROR_REPORT
||
131 (s
->on_error
== BLOCKDEV_ON_ERROR_ENOSPC
&& ret
== -ENOSPC
)) {
138 /* Publish progress */
139 s
->common
.offset
+= n
* BDRV_SECTOR_SIZE
;
144 if (!block_job_is_cancelled(&s
->common
) && sector_num
== end
) {
146 ret
= bdrv_drop_intermediate(active
, top
, base
);
153 /* restore base open flags here if appropriate (e.g., change the base back
154 * to r/o). These reopens do not need to be atomic, since we won't abort
155 * even on failure here */
156 if (s
->base_flags
!= bdrv_get_flags(base
)) {
157 bdrv_reopen(base
, s
->base_flags
, NULL
);
159 if (s
->orig_overlay_flags
!= bdrv_get_flags(overlay_bs
)) {
160 bdrv_reopen(overlay_bs
, s
->orig_overlay_flags
, NULL
);
163 block_job_completed(&s
->common
, ret
);
166 static void commit_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
168 CommitBlockJob
*s
= container_of(job
, CommitBlockJob
, common
);
171 error_set(errp
, QERR_INVALID_PARAMETER
, "speed");
174 ratelimit_set_speed(&s
->limit
, speed
/ BDRV_SECTOR_SIZE
, SLICE_TIME
);
177 static BlockJobType commit_job_type
= {
178 .instance_size
= sizeof(CommitBlockJob
),
179 .job_type
= "commit",
180 .set_speed
= commit_set_speed
,
183 void commit_start(BlockDriverState
*bs
, BlockDriverState
*base
,
184 BlockDriverState
*top
, int64_t speed
,
185 BlockdevOnError on_error
, BlockDriverCompletionFunc
*cb
,
186 void *opaque
, Error
**errp
)
189 BlockReopenQueue
*reopen_queue
= NULL
;
190 int orig_overlay_flags
;
192 BlockDriverState
*overlay_bs
;
193 Error
*local_err
= NULL
;
195 if ((on_error
== BLOCKDEV_ON_ERROR_STOP
||
196 on_error
== BLOCKDEV_ON_ERROR_ENOSPC
) &&
197 !bdrv_iostatus_is_enabled(bs
)) {
198 error_set(errp
, QERR_INVALID_PARAMETER_COMBINATION
);
202 /* Once we support top == active layer, remove this check */
205 "Top image as the active layer is currently unsupported");
210 error_setg(errp
, "Invalid files for merge: top and base are the same");
214 overlay_bs
= bdrv_find_overlay(bs
, top
);
216 if (overlay_bs
== NULL
) {
217 error_setg(errp
, "Could not find overlay image for %s:", top
->filename
);
221 orig_base_flags
= bdrv_get_flags(base
);
222 orig_overlay_flags
= bdrv_get_flags(overlay_bs
);
224 /* convert base & overlay_bs to r/w, if necessary */
225 if (!(orig_base_flags
& BDRV_O_RDWR
)) {
226 reopen_queue
= bdrv_reopen_queue(reopen_queue
, base
,
227 orig_base_flags
| BDRV_O_RDWR
);
229 if (!(orig_overlay_flags
& BDRV_O_RDWR
)) {
230 reopen_queue
= bdrv_reopen_queue(reopen_queue
, overlay_bs
,
231 orig_overlay_flags
| BDRV_O_RDWR
);
234 bdrv_reopen_multiple(reopen_queue
, &local_err
);
235 if (local_err
!= NULL
) {
236 error_propagate(errp
, local_err
);
242 s
= block_job_create(&commit_job_type
, bs
, speed
, cb
, opaque
, errp
);
251 s
->base_flags
= orig_base_flags
;
252 s
->orig_overlay_flags
= orig_overlay_flags
;
254 s
->on_error
= on_error
;
255 s
->common
.co
= qemu_coroutine_create(commit_run
);
257 trace_commit_start(bs
, base
, top
, s
, s
->common
.co
, opaque
);
258 qemu_coroutine_enter(s
->common
.co
, s
);