4 * Copyright IBM, Corp. 2011
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "block/block_int.h"
17 #include "block/blockjob_int.h"
18 #include "qapi/error.h"
19 #include "qapi/qmp/qerror.h"
20 #include "qemu/ratelimit.h"
21 #include "sysemu/block-backend.h"
25 * Size of data buffer for populating the image file. This should be large
26 * enough to process multiple clusters in a single call, so that populating
27 * contiguous regions of the image is efficient.
29 STREAM_BUFFER_SIZE
= 512 * 1024, /* in bytes */
32 typedef struct StreamBlockJob
{
34 BlockDriverState
*base
;
35 BlockdevOnError on_error
;
36 char *backing_file_str
;
40 static int coroutine_fn
stream_populate(BlockBackend
*blk
,
41 int64_t offset
, uint64_t bytes
,
44 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
46 assert(bytes
< SIZE_MAX
);
48 /* Copy-on-read the unallocated clusters */
49 return blk_co_preadv(blk
, offset
, qiov
.size
, &qiov
, BDRV_REQ_COPY_ON_READ
);
52 static int stream_prepare(Job
*job
)
54 StreamBlockJob
*s
= container_of(job
, StreamBlockJob
, common
.job
);
55 BlockJob
*bjob
= &s
->common
;
56 BlockDriverState
*bs
= blk_bs(bjob
->blk
);
57 BlockDriverState
*base
= s
->base
;
58 Error
*local_err
= NULL
;
62 const char *base_id
= NULL
, *base_fmt
= NULL
;
64 base_id
= s
->backing_file_str
;
66 base_fmt
= base
->drv
->format_name
;
69 ret
= bdrv_change_backing_file(bs
, base_id
, base_fmt
);
70 bdrv_set_backing_hd(bs
, base
, &local_err
);
72 error_report_err(local_err
);
80 static void stream_clean(Job
*job
)
82 StreamBlockJob
*s
= container_of(job
, StreamBlockJob
, common
.job
);
83 BlockJob
*bjob
= &s
->common
;
84 BlockDriverState
*bs
= blk_bs(bjob
->blk
);
86 /* Reopen the image back in read-only mode if necessary */
87 if (s
->bs_read_only
) {
88 /* Give up write permissions before making it read-only */
89 blk_set_perm(bjob
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
90 bdrv_reopen_set_read_only(bs
, true, NULL
);
93 g_free(s
->backing_file_str
);
96 static int coroutine_fn
stream_run(Job
*job
, Error
**errp
)
98 StreamBlockJob
*s
= container_of(job
, StreamBlockJob
, common
.job
);
99 BlockBackend
*blk
= s
->common
.blk
;
100 BlockDriverState
*bs
= blk_bs(blk
);
101 BlockDriverState
*base
= s
->base
;
104 uint64_t delay_ns
= 0;
107 int64_t n
= 0; /* bytes */
114 len
= bdrv_getlength(bs
);
119 job_progress_set_remaining(&s
->common
.job
, len
);
121 buf
= qemu_blockalign(bs
, STREAM_BUFFER_SIZE
);
123 /* Turn on copy-on-read for the whole block device so that guest read
124 * requests help us make progress. Only do this when copying the entire
125 * backing chain since the copy-on-read operation does not take base into
129 bdrv_enable_copy_on_read(bs
);
132 for ( ; offset
< len
; offset
+= n
) {
135 /* Note that even when no rate limit is applied we need to yield
136 * with no pending I/O here so that bdrv_drain_all() returns.
138 job_sleep_ns(&s
->common
.job
, delay_ns
);
139 if (job_is_cancelled(&s
->common
.job
)) {
145 ret
= bdrv_is_allocated(bs
, offset
, STREAM_BUFFER_SIZE
, &n
);
147 /* Allocated in the top, no need to copy. */
148 } else if (ret
>= 0) {
149 /* Copy if allocated in the intermediate images. Limit to the
150 * known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE). */
151 ret
= bdrv_is_allocated_above(backing_bs(bs
), base
,
154 /* Finish early if end of backing file has been reached */
155 if (ret
== 0 && n
== 0) {
161 trace_stream_one_iteration(s
, offset
, n
, ret
);
163 ret
= stream_populate(blk
, offset
, n
, buf
);
166 BlockErrorAction action
=
167 block_job_error_action(&s
->common
, s
->on_error
, true, -ret
);
168 if (action
== BLOCK_ERROR_ACTION_STOP
) {
175 if (action
== BLOCK_ERROR_ACTION_REPORT
) {
181 /* Publish progress */
182 job_progress_update(&s
->common
.job
, n
);
184 delay_ns
= block_job_ratelimit_get_delay(&s
->common
, n
);
191 bdrv_disable_copy_on_read(bs
);
194 /* Do not remove the backing file if an error was there but ignored. */
200 /* Modify backing chain and close BDSes in main loop */
204 static const BlockJobDriver stream_job_driver
= {
206 .instance_size
= sizeof(StreamBlockJob
),
207 .job_type
= JOB_TYPE_STREAM
,
208 .free
= block_job_free
,
210 .prepare
= stream_prepare
,
211 .clean
= stream_clean
,
212 .user_resume
= block_job_user_resume
,
213 .drain
= block_job_drain
,
217 void stream_start(const char *job_id
, BlockDriverState
*bs
,
218 BlockDriverState
*base
, const char *backing_file_str
,
219 int creation_flags
, int64_t speed
,
220 BlockdevOnError on_error
, Error
**errp
)
223 BlockDriverState
*iter
;
226 /* Make sure that the image is opened in read-write mode */
227 bs_read_only
= bdrv_is_read_only(bs
);
229 if (bdrv_reopen_set_read_only(bs
, false, errp
) != 0) {
234 /* Prevent concurrent jobs trying to modify the graph structure here, we
235 * already have our own plans. Also don't allow resize as the image size is
236 * queried only at the job start and then cached. */
237 s
= block_job_create(job_id
, &stream_job_driver
, NULL
, bs
,
238 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
240 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
242 speed
, creation_flags
, NULL
, NULL
, errp
);
247 /* Block all intermediate nodes between bs and base, because they will
248 * disappear from the chain after this operation. The streaming job reads
249 * every block only once, assuming that it doesn't change, so block writes
251 for (iter
= backing_bs(bs
); iter
&& iter
!= base
; iter
= backing_bs(iter
)) {
252 block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
253 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
,
258 s
->backing_file_str
= g_strdup(backing_file_str
);
259 s
->bs_read_only
= bs_read_only
;
261 s
->on_error
= on_error
;
262 trace_stream_start(bs
, base
, s
);
263 job_start(&s
->common
.job
);
268 bdrv_reopen_set_read_only(bs
, true, NULL
);