2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/coroutine.h"
37 #include "qemu/timer.h"
40 * The block job API is composed of two categories of functions.
42 * The first includes functions used by the monitor. The monitor is
43 * peculiar in that it accesses the block job list with block_job_get, and
44 * therefore needs consistency across block_job_get and the actual operation
45 * (e.g. block_job_set_speed). The consistency is achieved with
46 * aio_context_acquire/release. These functions are declared in blockjob.h.
48 * The second includes functions used by the block job drivers and sometimes
49 * by the core block layer. These do not care about locking, because the
50 * whole coroutine runs under the AioContext lock, and are declared in
54 static bool is_block_job(Job
*job
)
56 return job_type(job
) == JOB_TYPE_BACKUP
||
57 job_type(job
) == JOB_TYPE_COMMIT
||
58 job_type(job
) == JOB_TYPE_MIRROR
||
59 job_type(job
) == JOB_TYPE_STREAM
;
62 BlockJob
*block_job_next(BlockJob
*bjob
)
64 Job
*job
= bjob
? &bjob
->job
: NULL
;
68 } while (job
&& !is_block_job(job
));
70 return job
? container_of(job
, BlockJob
, job
) : NULL
;
73 BlockJob
*block_job_get(const char *id
)
75 Job
*job
= job_get(id
);
77 if (job
&& is_block_job(job
)) {
78 return container_of(job
, BlockJob
, job
);
84 static void block_job_attached_aio_context(AioContext
*new_context
,
86 static void block_job_detach_aio_context(void *opaque
);
88 void block_job_free(Job
*job
)
90 BlockJob
*bjob
= container_of(job
, BlockJob
, job
);
91 BlockDriverState
*bs
= blk_bs(bjob
->blk
);
94 block_job_remove_all_bdrv(bjob
);
95 blk_remove_aio_context_notifier(bjob
->blk
,
96 block_job_attached_aio_context
,
97 block_job_detach_aio_context
, bjob
);
99 error_free(bjob
->blocker
);
102 static void block_job_attached_aio_context(AioContext
*new_context
,
105 BlockJob
*job
= opaque
;
106 const JobDriver
*drv
= job
->job
.driver
;
107 BlockJobDriver
*bjdrv
= container_of(drv
, BlockJobDriver
, job_driver
);
109 job
->job
.aio_context
= new_context
;
110 if (bjdrv
->attached_aio_context
) {
111 bjdrv
->attached_aio_context(job
, new_context
);
114 job_resume(&job
->job
);
117 void block_job_drain(Job
*job
)
119 BlockJob
*bjob
= container_of(job
, BlockJob
, job
);
120 const JobDriver
*drv
= job
->driver
;
121 BlockJobDriver
*bjdrv
= container_of(drv
, BlockJobDriver
, job_driver
);
123 blk_drain(bjob
->blk
);
129 static void block_job_detach_aio_context(void *opaque
)
131 BlockJob
*job
= opaque
;
133 /* In case the job terminates during aio_poll()... */
136 job_pause(&job
->job
);
138 while (!job
->job
.paused
&& !job_is_completed(&job
->job
)) {
139 job_drain(&job
->job
);
142 job
->job
.aio_context
= NULL
;
143 job_unref(&job
->job
);
146 static char *child_job_get_parent_desc(BdrvChild
*c
)
148 BlockJob
*job
= c
->opaque
;
149 return g_strdup_printf("%s job '%s'", job_type_str(&job
->job
), job
->job
.id
);
152 static void child_job_drained_begin(BdrvChild
*c
)
154 BlockJob
*job
= c
->opaque
;
155 job_pause(&job
->job
);
158 static void child_job_drained_end(BdrvChild
*c
)
160 BlockJob
*job
= c
->opaque
;
161 job_resume(&job
->job
);
164 static const BdrvChildRole child_job
= {
165 .get_parent_desc
= child_job_get_parent_desc
,
166 .drained_begin
= child_job_drained_begin
,
167 .drained_end
= child_job_drained_end
,
168 .stay_at_node
= true,
171 void block_job_remove_all_bdrv(BlockJob
*job
)
174 for (l
= job
->nodes
; l
; l
= l
->next
) {
175 BdrvChild
*c
= l
->data
;
176 bdrv_op_unblock_all(c
->bs
, job
->blocker
);
177 bdrv_root_unref_child(c
);
179 g_slist_free(job
->nodes
);
183 int block_job_add_bdrv(BlockJob
*job
, const char *name
, BlockDriverState
*bs
,
184 uint64_t perm
, uint64_t shared_perm
, Error
**errp
)
188 c
= bdrv_root_attach_child(bs
, name
, &child_job
, perm
, shared_perm
,
194 job
->nodes
= g_slist_prepend(job
->nodes
, c
);
196 bdrv_op_block_all(bs
, job
->blocker
);
201 bool block_job_is_internal(BlockJob
*job
)
203 return (job
->job
.id
== NULL
);
206 const BlockJobDriver
*block_job_driver(BlockJob
*job
)
208 return container_of(job
->job
.driver
, BlockJobDriver
, job_driver
);
211 /* Assumes the job_mutex is held */
212 static bool job_timer_pending(Job
*job
)
214 return timer_pending(&job
->sleep_timer
);
217 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
219 int64_t old_speed
= job
->speed
;
221 if (job_apply_verb(&job
->job
, JOB_VERB_SET_SPEED
, errp
)) {
225 error_setg(errp
, QERR_INVALID_PARAMETER
, "speed");
229 ratelimit_set_speed(&job
->limit
, speed
, BLOCK_JOB_SLICE_TIME
);
232 if (speed
&& speed
<= old_speed
) {
236 /* kick only if a timer is pending */
237 job_enter_cond(&job
->job
, job_timer_pending
);
240 int64_t block_job_ratelimit_get_delay(BlockJob
*job
, uint64_t n
)
246 return ratelimit_calculate_delay(&job
->limit
, n
);
249 BlockJobInfo
*block_job_query(BlockJob
*job
, Error
**errp
)
253 if (block_job_is_internal(job
)) {
254 error_setg(errp
, "Cannot query QEMU internal jobs");
257 info
= g_new0(BlockJobInfo
, 1);
258 info
->type
= g_strdup(job_type_str(&job
->job
));
259 info
->device
= g_strdup(job
->job
.id
);
260 info
->busy
= atomic_read(&job
->job
.busy
);
261 info
->paused
= job
->job
.pause_count
> 0;
262 info
->offset
= job
->job
.progress_current
;
263 info
->len
= job
->job
.progress_total
;
264 info
->speed
= job
->speed
;
265 info
->io_status
= job
->iostatus
;
266 info
->ready
= job_is_ready(&job
->job
),
267 info
->status
= job
->job
.status
;
268 info
->auto_finalize
= job
->job
.auto_finalize
;
269 info
->auto_dismiss
= job
->job
.auto_dismiss
;
270 info
->has_error
= job
->job
.ret
!= 0;
271 info
->error
= job
->job
.ret
? g_strdup(strerror(-job
->job
.ret
)) : NULL
;
275 static void block_job_iostatus_set_err(BlockJob
*job
, int error
)
277 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
278 job
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
279 BLOCK_DEVICE_IO_STATUS_FAILED
;
283 static void block_job_event_cancelled(Notifier
*n
, void *opaque
)
285 BlockJob
*job
= opaque
;
287 if (block_job_is_internal(job
)) {
291 qapi_event_send_block_job_cancelled(job_type(&job
->job
),
293 job
->job
.progress_total
,
294 job
->job
.progress_current
,
299 static void block_job_event_completed(Notifier
*n
, void *opaque
)
301 BlockJob
*job
= opaque
;
302 const char *msg
= NULL
;
304 if (block_job_is_internal(job
)) {
308 if (job
->job
.ret
< 0) {
309 msg
= strerror(-job
->job
.ret
);
312 qapi_event_send_block_job_completed(job_type(&job
->job
),
314 job
->job
.progress_total
,
315 job
->job
.progress_current
,
322 static void block_job_event_pending(Notifier
*n
, void *opaque
)
324 BlockJob
*job
= opaque
;
326 if (block_job_is_internal(job
)) {
330 qapi_event_send_block_job_pending(job_type(&job
->job
),
335 static void block_job_event_ready(Notifier
*n
, void *opaque
)
337 BlockJob
*job
= opaque
;
339 if (block_job_is_internal(job
)) {
343 qapi_event_send_block_job_ready(job_type(&job
->job
),
345 job
->job
.progress_total
,
346 job
->job
.progress_current
,
347 job
->speed
, &error_abort
);
352 * API for block job drivers and the block layer. These functions are
353 * declared in blockjob_int.h.
356 void *block_job_create(const char *job_id
, const BlockJobDriver
*driver
,
357 JobTxn
*txn
, BlockDriverState
*bs
, uint64_t perm
,
358 uint64_t shared_perm
, int64_t speed
, int flags
,
359 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
)
366 error_setg(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
370 if (job_id
== NULL
&& !(flags
& JOB_INTERNAL
)) {
371 job_id
= bdrv_get_device_name(bs
);
374 blk
= blk_new(perm
, shared_perm
);
375 ret
= blk_insert_bs(blk
, bs
, errp
);
381 job
= job_create(job_id
, &driver
->job_driver
, txn
, blk_get_aio_context(blk
),
382 flags
, cb
, opaque
, errp
);
388 assert(is_block_job(&job
->job
));
389 assert(job
->job
.driver
->free
== &block_job_free
);
390 assert(job
->job
.driver
->user_resume
== &block_job_user_resume
);
391 assert(job
->job
.driver
->drain
== &block_job_drain
);
395 job
->finalize_cancelled_notifier
.notify
= block_job_event_cancelled
;
396 job
->finalize_completed_notifier
.notify
= block_job_event_completed
;
397 job
->pending_notifier
.notify
= block_job_event_pending
;
398 job
->ready_notifier
.notify
= block_job_event_ready
;
400 notifier_list_add(&job
->job
.on_finalize_cancelled
,
401 &job
->finalize_cancelled_notifier
);
402 notifier_list_add(&job
->job
.on_finalize_completed
,
403 &job
->finalize_completed_notifier
);
404 notifier_list_add(&job
->job
.on_pending
, &job
->pending_notifier
);
405 notifier_list_add(&job
->job
.on_ready
, &job
->ready_notifier
);
407 error_setg(&job
->blocker
, "block device is in use by block job: %s",
408 job_type_str(&job
->job
));
409 block_job_add_bdrv(job
, "main node", bs
, 0, BLK_PERM_ALL
, &error_abort
);
412 bdrv_op_unblock(bs
, BLOCK_OP_TYPE_DATAPLANE
, job
->blocker
);
414 blk_add_aio_context_notifier(blk
, block_job_attached_aio_context
,
415 block_job_detach_aio_context
, job
);
417 /* Only set speed when necessary to avoid NotSupported error */
419 Error
*local_err
= NULL
;
421 block_job_set_speed(job
, speed
, &local_err
);
423 job_early_fail(&job
->job
);
424 error_propagate(errp
, local_err
);
432 void block_job_iostatus_reset(BlockJob
*job
)
434 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
437 assert(job
->job
.user_paused
&& job
->job
.pause_count
> 0);
438 job
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
441 void block_job_user_resume(Job
*job
)
443 BlockJob
*bjob
= container_of(job
, BlockJob
, job
);
444 block_job_iostatus_reset(bjob
);
447 BlockErrorAction
block_job_error_action(BlockJob
*job
, BlockdevOnError on_err
,
448 int is_read
, int error
)
450 BlockErrorAction action
;
453 case BLOCKDEV_ON_ERROR_ENOSPC
:
454 case BLOCKDEV_ON_ERROR_AUTO
:
455 action
= (error
== ENOSPC
) ?
456 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
458 case BLOCKDEV_ON_ERROR_STOP
:
459 action
= BLOCK_ERROR_ACTION_STOP
;
461 case BLOCKDEV_ON_ERROR_REPORT
:
462 action
= BLOCK_ERROR_ACTION_REPORT
;
464 case BLOCKDEV_ON_ERROR_IGNORE
:
465 action
= BLOCK_ERROR_ACTION_IGNORE
;
470 if (!block_job_is_internal(job
)) {
471 qapi_event_send_block_job_error(job
->job
.id
,
472 is_read
? IO_OPERATION_TYPE_READ
:
473 IO_OPERATION_TYPE_WRITE
,
474 action
, &error_abort
);
476 if (action
== BLOCK_ERROR_ACTION_STOP
) {
477 job_pause(&job
->job
);
478 /* make the pause user visible, which will be resumed from QMP. */
479 job
->job
.user_paused
= true;
480 block_job_iostatus_set_err(job
, error
);