4 * Copyright IBM, Corp. 2011
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "block/block_int.h"
17 #include "block/blockjob_int.h"
18 #include "qapi/error.h"
19 #include "qapi/qmp/qerror.h"
20 #include "qemu/ratelimit.h"
21 #include "sysemu/block-backend.h"
25 * Size of data buffer for populating the image file. This should be large
26 * enough to process multiple clusters in a single call, so that populating
27 * contiguous regions of the image is efficient.
29 STREAM_BUFFER_SIZE
= 512 * 1024, /* in bytes */
32 typedef struct StreamBlockJob
{
34 BlockDriverState
*base
;
35 BlockdevOnError on_error
;
36 char *backing_file_str
;
40 static int coroutine_fn
stream_populate(BlockBackend
*blk
,
41 int64_t offset
, uint64_t bytes
,
50 assert(bytes
< SIZE_MAX
);
51 qemu_iovec_init_external(&qiov
, &iov
, 1);
53 /* Copy-on-read the unallocated clusters */
54 return blk_co_preadv(blk
, offset
, qiov
.size
, &qiov
, BDRV_REQ_COPY_ON_READ
);
57 static void stream_exit(Job
*job
)
59 StreamBlockJob
*s
= container_of(job
, StreamBlockJob
, common
.job
);
60 BlockJob
*bjob
= &s
->common
;
61 BlockDriverState
*bs
= blk_bs(bjob
->blk
);
62 BlockDriverState
*base
= s
->base
;
63 Error
*local_err
= NULL
;
66 if (!job_is_cancelled(job
) && bs
->backing
&& ret
== 0) {
67 const char *base_id
= NULL
, *base_fmt
= NULL
;
69 base_id
= s
->backing_file_str
;
71 base_fmt
= base
->drv
->format_name
;
74 ret
= bdrv_change_backing_file(bs
, base_id
, base_fmt
);
75 bdrv_set_backing_hd(bs
, base
, &local_err
);
77 error_report_err(local_err
);
84 /* Reopen the image back in read-only mode if necessary */
85 if (s
->bs_flags
!= bdrv_get_flags(bs
)) {
86 /* Give up write permissions before making it read-only */
87 blk_set_perm(bjob
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
88 bdrv_reopen(bs
, s
->bs_flags
, NULL
);
91 g_free(s
->backing_file_str
);
95 static int coroutine_fn
stream_run(Job
*job
, Error
**errp
)
97 StreamBlockJob
*s
= container_of(job
, StreamBlockJob
, common
.job
);
98 BlockBackend
*blk
= s
->common
.blk
;
99 BlockDriverState
*bs
= blk_bs(blk
);
100 BlockDriverState
*base
= s
->base
;
103 uint64_t delay_ns
= 0;
106 int64_t n
= 0; /* bytes */
113 len
= bdrv_getlength(bs
);
118 job_progress_set_remaining(&s
->common
.job
, len
);
120 buf
= qemu_blockalign(bs
, STREAM_BUFFER_SIZE
);
122 /* Turn on copy-on-read for the whole block device so that guest read
123 * requests help us make progress. Only do this when copying the entire
124 * backing chain since the copy-on-read operation does not take base into
128 bdrv_enable_copy_on_read(bs
);
131 for ( ; offset
< len
; offset
+= n
) {
134 /* Note that even when no rate limit is applied we need to yield
135 * with no pending I/O here so that bdrv_drain_all() returns.
137 job_sleep_ns(&s
->common
.job
, delay_ns
);
138 if (job_is_cancelled(&s
->common
.job
)) {
144 ret
= bdrv_is_allocated(bs
, offset
, STREAM_BUFFER_SIZE
, &n
);
146 /* Allocated in the top, no need to copy. */
147 } else if (ret
>= 0) {
148 /* Copy if allocated in the intermediate images. Limit to the
149 * known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE). */
150 ret
= bdrv_is_allocated_above(backing_bs(bs
), base
,
153 /* Finish early if end of backing file has been reached */
154 if (ret
== 0 && n
== 0) {
160 trace_stream_one_iteration(s
, offset
, n
, ret
);
162 ret
= stream_populate(blk
, offset
, n
, buf
);
165 BlockErrorAction action
=
166 block_job_error_action(&s
->common
, s
->on_error
, true, -ret
);
167 if (action
== BLOCK_ERROR_ACTION_STOP
) {
174 if (action
== BLOCK_ERROR_ACTION_REPORT
) {
180 /* Publish progress */
181 job_progress_update(&s
->common
.job
, n
);
183 delay_ns
= block_job_ratelimit_get_delay(&s
->common
, n
);
190 bdrv_disable_copy_on_read(bs
);
193 /* Do not remove the backing file if an error was there but ignored. */
199 /* Modify backing chain and close BDSes in main loop */
203 static const BlockJobDriver stream_job_driver
= {
205 .instance_size
= sizeof(StreamBlockJob
),
206 .job_type
= JOB_TYPE_STREAM
,
207 .free
= block_job_free
,
210 .user_resume
= block_job_user_resume
,
211 .drain
= block_job_drain
,
215 void stream_start(const char *job_id
, BlockDriverState
*bs
,
216 BlockDriverState
*base
, const char *backing_file_str
,
217 int64_t speed
, BlockdevOnError on_error
, Error
**errp
)
220 BlockDriverState
*iter
;
223 /* Make sure that the image is opened in read-write mode */
224 orig_bs_flags
= bdrv_get_flags(bs
);
225 if (!(orig_bs_flags
& BDRV_O_RDWR
)) {
226 if (bdrv_reopen(bs
, orig_bs_flags
| BDRV_O_RDWR
, errp
) != 0) {
231 /* Prevent concurrent jobs trying to modify the graph structure here, we
232 * already have our own plans. Also don't allow resize as the image size is
233 * queried only at the job start and then cached. */
234 s
= block_job_create(job_id
, &stream_job_driver
, NULL
, bs
,
235 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
237 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
239 speed
, JOB_DEFAULT
, NULL
, NULL
, errp
);
244 /* Block all intermediate nodes between bs and base, because they will
245 * disappear from the chain after this operation. The streaming job reads
246 * every block only once, assuming that it doesn't change, so block writes
248 for (iter
= backing_bs(bs
); iter
&& iter
!= base
; iter
= backing_bs(iter
)) {
249 block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
250 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
,
255 s
->backing_file_str
= g_strdup(backing_file_str
);
256 s
->bs_flags
= orig_bs_flags
;
258 s
->on_error
= on_error
;
259 trace_stream_start(bs
, base
, s
);
260 job_start(&s
->common
.job
);
264 if (orig_bs_flags
!= bdrv_get_flags(bs
)) {
265 bdrv_reopen(bs
, orig_bs_flags
, NULL
);