2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "block/block.h"
28 #include "block/blockjob_int.h"
29 #include "block/block_int.h"
30 #include "block/trace.h"
31 #include "sysemu/block-backend.h"
32 #include "qapi/error.h"
33 #include "qapi/qapi-events-block-core.h"
34 #include "qapi/qmp/qerror.h"
35 #include "qemu/coroutine.h"
36 #include "qemu/timer.h"
39 * The block job API is composed of two categories of functions.
41 * The first includes functions used by the monitor. The monitor is
42 * peculiar in that it accesses the block job list with block_job_get, and
43 * therefore needs consistency across block_job_get and the actual operation
44 * (e.g. block_job_set_speed). The consistency is achieved with
45 * aio_context_acquire/release. These functions are declared in blockjob.h.
47 * The second includes functions used by the block job drivers and sometimes
48 * by the core block layer. These do not care about locking, because the
49 * whole coroutine runs under the AioContext lock, and are declared in
53 static bool is_block_job(Job
*job
)
55 return job_type(job
) == JOB_TYPE_BACKUP
||
56 job_type(job
) == JOB_TYPE_COMMIT
||
57 job_type(job
) == JOB_TYPE_MIRROR
||
58 job_type(job
) == JOB_TYPE_STREAM
;
61 BlockJob
*block_job_next(BlockJob
*bjob
)
63 Job
*job
= bjob
? &bjob
->job
: NULL
;
67 } while (job
&& !is_block_job(job
));
69 return job
? container_of(job
, BlockJob
, job
) : NULL
;
72 BlockJob
*block_job_get(const char *id
)
74 Job
*job
= job_get(id
);
76 if (job
&& is_block_job(job
)) {
77 return container_of(job
, BlockJob
, job
);
83 void block_job_free(Job
*job
)
85 BlockJob
*bjob
= container_of(job
, BlockJob
, job
);
86 BlockDriverState
*bs
= blk_bs(bjob
->blk
);
89 block_job_remove_all_bdrv(bjob
);
91 error_free(bjob
->blocker
);
94 void block_job_drain(Job
*job
)
96 BlockJob
*bjob
= container_of(job
, BlockJob
, job
);
97 const JobDriver
*drv
= job
->driver
;
98 BlockJobDriver
*bjdrv
= container_of(drv
, BlockJobDriver
, job_driver
);
100 blk_drain(bjob
->blk
);
106 static char *child_job_get_parent_desc(BdrvChild
*c
)
108 BlockJob
*job
= c
->opaque
;
109 return g_strdup_printf("%s job '%s'", job_type_str(&job
->job
), job
->job
.id
);
112 static void child_job_drained_begin(BdrvChild
*c
)
114 BlockJob
*job
= c
->opaque
;
115 job_pause(&job
->job
);
118 static bool child_job_drained_poll(BdrvChild
*c
)
120 BlockJob
*bjob
= c
->opaque
;
121 Job
*job
= &bjob
->job
;
122 const BlockJobDriver
*drv
= block_job_driver(bjob
);
124 /* An inactive or completed job doesn't have any pending requests. Jobs
125 * with !job->busy are either already paused or have a pause point after
126 * being reentered, so no job driver code will run before they pause. */
127 if (!job
->busy
|| job_is_completed(job
)) {
131 /* Otherwise, assume that it isn't fully stopped yet, but allow the job to
132 * override this assumption. */
133 if (drv
->drained_poll
) {
134 return drv
->drained_poll(bjob
);
140 static void child_job_drained_end(BdrvChild
*c
)
142 BlockJob
*job
= c
->opaque
;
143 job_resume(&job
->job
);
146 static bool child_job_can_set_aio_ctx(BdrvChild
*c
, AioContext
*ctx
,
147 GSList
**ignore
, Error
**errp
)
149 BlockJob
*job
= c
->opaque
;
152 for (l
= job
->nodes
; l
; l
= l
->next
) {
153 BdrvChild
*sibling
= l
->data
;
154 if (!bdrv_child_can_set_aio_context(sibling
, ctx
, ignore
, errp
)) {
161 static void child_job_set_aio_ctx(BdrvChild
*c
, AioContext
*ctx
,
164 BlockJob
*job
= c
->opaque
;
167 for (l
= job
->nodes
; l
; l
= l
->next
) {
168 BdrvChild
*sibling
= l
->data
;
169 if (g_slist_find(*ignore
, sibling
)) {
172 *ignore
= g_slist_prepend(*ignore
, sibling
);
173 bdrv_set_aio_context_ignore(sibling
->bs
, ctx
, ignore
);
176 job
->job
.aio_context
= ctx
;
179 static const BdrvChildRole child_job
= {
180 .get_parent_desc
= child_job_get_parent_desc
,
181 .drained_begin
= child_job_drained_begin
,
182 .drained_poll
= child_job_drained_poll
,
183 .drained_end
= child_job_drained_end
,
184 .can_set_aio_ctx
= child_job_can_set_aio_ctx
,
185 .set_aio_ctx
= child_job_set_aio_ctx
,
186 .stay_at_node
= true,
189 void block_job_remove_all_bdrv(BlockJob
*job
)
192 for (l
= job
->nodes
; l
; l
= l
->next
) {
193 BdrvChild
*c
= l
->data
;
194 bdrv_op_unblock_all(c
->bs
, job
->blocker
);
195 bdrv_root_unref_child(c
);
197 g_slist_free(job
->nodes
);
201 int block_job_add_bdrv(BlockJob
*job
, const char *name
, BlockDriverState
*bs
,
202 uint64_t perm
, uint64_t shared_perm
, Error
**errp
)
207 if (job
->job
.aio_context
!= qemu_get_aio_context()) {
208 aio_context_release(job
->job
.aio_context
);
210 c
= bdrv_root_attach_child(bs
, name
, &child_job
, job
->job
.aio_context
,
211 perm
, shared_perm
, job
, errp
);
212 if (job
->job
.aio_context
!= qemu_get_aio_context()) {
213 aio_context_acquire(job
->job
.aio_context
);
219 job
->nodes
= g_slist_prepend(job
->nodes
, c
);
220 bdrv_op_block_all(bs
, job
->blocker
);
225 static void block_job_on_idle(Notifier
*n
, void *opaque
)
230 bool block_job_is_internal(BlockJob
*job
)
232 return (job
->job
.id
== NULL
);
235 const BlockJobDriver
*block_job_driver(BlockJob
*job
)
237 return container_of(job
->job
.driver
, BlockJobDriver
, job_driver
);
240 /* Assumes the job_mutex is held */
241 static bool job_timer_pending(Job
*job
)
243 return timer_pending(&job
->sleep_timer
);
246 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
248 int64_t old_speed
= job
->speed
;
250 if (job_apply_verb(&job
->job
, JOB_VERB_SET_SPEED
, errp
)) {
254 error_setg(errp
, QERR_INVALID_PARAMETER
, "speed");
258 ratelimit_set_speed(&job
->limit
, speed
, BLOCK_JOB_SLICE_TIME
);
261 if (speed
&& speed
<= old_speed
) {
265 /* kick only if a timer is pending */
266 job_enter_cond(&job
->job
, job_timer_pending
);
269 int64_t block_job_ratelimit_get_delay(BlockJob
*job
, uint64_t n
)
275 return ratelimit_calculate_delay(&job
->limit
, n
);
278 BlockJobInfo
*block_job_query(BlockJob
*job
, Error
**errp
)
282 if (block_job_is_internal(job
)) {
283 error_setg(errp
, "Cannot query QEMU internal jobs");
286 info
= g_new0(BlockJobInfo
, 1);
287 info
->type
= g_strdup(job_type_str(&job
->job
));
288 info
->device
= g_strdup(job
->job
.id
);
289 info
->busy
= atomic_read(&job
->job
.busy
);
290 info
->paused
= job
->job
.pause_count
> 0;
291 info
->offset
= job
->job
.progress_current
;
292 info
->len
= job
->job
.progress_total
;
293 info
->speed
= job
->speed
;
294 info
->io_status
= job
->iostatus
;
295 info
->ready
= job_is_ready(&job
->job
),
296 info
->status
= job
->job
.status
;
297 info
->auto_finalize
= job
->job
.auto_finalize
;
298 info
->auto_dismiss
= job
->job
.auto_dismiss
;
299 info
->has_error
= job
->job
.ret
!= 0;
300 info
->error
= job
->job
.ret
? g_strdup(strerror(-job
->job
.ret
)) : NULL
;
304 static void block_job_iostatus_set_err(BlockJob
*job
, int error
)
306 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
307 job
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
308 BLOCK_DEVICE_IO_STATUS_FAILED
;
312 static void block_job_event_cancelled(Notifier
*n
, void *opaque
)
314 BlockJob
*job
= opaque
;
316 if (block_job_is_internal(job
)) {
320 qapi_event_send_block_job_cancelled(job_type(&job
->job
),
322 job
->job
.progress_total
,
323 job
->job
.progress_current
,
327 static void block_job_event_completed(Notifier
*n
, void *opaque
)
329 BlockJob
*job
= opaque
;
330 const char *msg
= NULL
;
332 if (block_job_is_internal(job
)) {
336 if (job
->job
.ret
< 0) {
337 msg
= strerror(-job
->job
.ret
);
340 qapi_event_send_block_job_completed(job_type(&job
->job
),
342 job
->job
.progress_total
,
343 job
->job
.progress_current
,
349 static void block_job_event_pending(Notifier
*n
, void *opaque
)
351 BlockJob
*job
= opaque
;
353 if (block_job_is_internal(job
)) {
357 qapi_event_send_block_job_pending(job_type(&job
->job
),
361 static void block_job_event_ready(Notifier
*n
, void *opaque
)
363 BlockJob
*job
= opaque
;
365 if (block_job_is_internal(job
)) {
369 qapi_event_send_block_job_ready(job_type(&job
->job
),
371 job
->job
.progress_total
,
372 job
->job
.progress_current
,
378 * API for block job drivers and the block layer. These functions are
379 * declared in blockjob_int.h.
382 void *block_job_create(const char *job_id
, const BlockJobDriver
*driver
,
383 JobTxn
*txn
, BlockDriverState
*bs
, uint64_t perm
,
384 uint64_t shared_perm
, int64_t speed
, int flags
,
385 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
)
392 error_setg(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
396 if (job_id
== NULL
&& !(flags
& JOB_INTERNAL
)) {
397 job_id
= bdrv_get_device_name(bs
);
400 blk
= blk_new(bdrv_get_aio_context(bs
), perm
, shared_perm
);
401 ret
= blk_insert_bs(blk
, bs
, errp
);
407 job
= job_create(job_id
, &driver
->job_driver
, txn
, blk_get_aio_context(blk
),
408 flags
, cb
, opaque
, errp
);
414 assert(is_block_job(&job
->job
));
415 assert(job
->job
.driver
->free
== &block_job_free
);
416 assert(job
->job
.driver
->user_resume
== &block_job_user_resume
);
417 assert(job
->job
.driver
->drain
== &block_job_drain
);
421 job
->finalize_cancelled_notifier
.notify
= block_job_event_cancelled
;
422 job
->finalize_completed_notifier
.notify
= block_job_event_completed
;
423 job
->pending_notifier
.notify
= block_job_event_pending
;
424 job
->ready_notifier
.notify
= block_job_event_ready
;
425 job
->idle_notifier
.notify
= block_job_on_idle
;
427 notifier_list_add(&job
->job
.on_finalize_cancelled
,
428 &job
->finalize_cancelled_notifier
);
429 notifier_list_add(&job
->job
.on_finalize_completed
,
430 &job
->finalize_completed_notifier
);
431 notifier_list_add(&job
->job
.on_pending
, &job
->pending_notifier
);
432 notifier_list_add(&job
->job
.on_ready
, &job
->ready_notifier
);
433 notifier_list_add(&job
->job
.on_idle
, &job
->idle_notifier
);
435 error_setg(&job
->blocker
, "block device is in use by block job: %s",
436 job_type_str(&job
->job
));
437 block_job_add_bdrv(job
, "main node", bs
, 0, BLK_PERM_ALL
, &error_abort
);
440 bdrv_op_unblock(bs
, BLOCK_OP_TYPE_DATAPLANE
, job
->blocker
);
442 blk_set_allow_aio_context_change(blk
, true);
444 /* Only set speed when necessary to avoid NotSupported error */
446 Error
*local_err
= NULL
;
448 block_job_set_speed(job
, speed
, &local_err
);
450 job_early_fail(&job
->job
);
451 error_propagate(errp
, local_err
);
459 void block_job_iostatus_reset(BlockJob
*job
)
461 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
464 assert(job
->job
.user_paused
&& job
->job
.pause_count
> 0);
465 job
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
468 void block_job_user_resume(Job
*job
)
470 BlockJob
*bjob
= container_of(job
, BlockJob
, job
);
471 block_job_iostatus_reset(bjob
);
474 BlockErrorAction
block_job_error_action(BlockJob
*job
, BlockdevOnError on_err
,
475 int is_read
, int error
)
477 BlockErrorAction action
;
480 case BLOCKDEV_ON_ERROR_ENOSPC
:
481 case BLOCKDEV_ON_ERROR_AUTO
:
482 action
= (error
== ENOSPC
) ?
483 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
485 case BLOCKDEV_ON_ERROR_STOP
:
486 action
= BLOCK_ERROR_ACTION_STOP
;
488 case BLOCKDEV_ON_ERROR_REPORT
:
489 action
= BLOCK_ERROR_ACTION_REPORT
;
491 case BLOCKDEV_ON_ERROR_IGNORE
:
492 action
= BLOCK_ERROR_ACTION_IGNORE
;
497 if (!block_job_is_internal(job
)) {
498 qapi_event_send_block_job_error(job
->job
.id
,
499 is_read
? IO_OPERATION_TYPE_READ
:
500 IO_OPERATION_TYPE_WRITE
,
503 if (action
== BLOCK_ERROR_ACTION_STOP
) {
504 if (!job
->job
.user_paused
) {
505 job_pause(&job
->job
);
506 /* make the pause user visible, which will be resumed from QMP. */
507 job
->job
.user_paused
= true;
509 block_job_iostatus_set_err(job
, error
);