2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/coroutine.h"
38 #include "qemu/timer.h"
40 /* Right now, this mutex is only needed to synchronize accesses to job->busy
41 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
43 static QemuMutex block_job_mutex
;
45 /* BlockJob State Transition Table */
46 bool BlockJobSTT
[BLOCK_JOB_STATUS__MAX
][BLOCK_JOB_STATUS__MAX
] = {
47 /* U, C, R, P, Y, S, X, E, N */
48 /* U: */ [BLOCK_JOB_STATUS_UNDEFINED
] = {0, 1, 0, 0, 0, 0, 0, 0, 0},
49 /* C: */ [BLOCK_JOB_STATUS_CREATED
] = {0, 0, 1, 0, 0, 0, 1, 0, 1},
50 /* R: */ [BLOCK_JOB_STATUS_RUNNING
] = {0, 0, 0, 1, 1, 0, 1, 1, 0},
51 /* P: */ [BLOCK_JOB_STATUS_PAUSED
] = {0, 0, 1, 0, 0, 0, 0, 0, 0},
52 /* Y: */ [BLOCK_JOB_STATUS_READY
] = {0, 0, 0, 0, 0, 1, 1, 1, 0},
53 /* S: */ [BLOCK_JOB_STATUS_STANDBY
] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
54 /* X: */ [BLOCK_JOB_STATUS_ABORTING
] = {0, 0, 0, 0, 0, 0, 0, 1, 0},
55 /* E: */ [BLOCK_JOB_STATUS_CONCLUDED
] = {0, 0, 0, 0, 0, 0, 0, 0, 1},
56 /* N: */ [BLOCK_JOB_STATUS_NULL
] = {0, 0, 0, 0, 0, 0, 0, 0, 0},
59 bool BlockJobVerbTable
[BLOCK_JOB_VERB__MAX
][BLOCK_JOB_STATUS__MAX
] = {
60 /* U, C, R, P, Y, S, X, E, N */
61 [BLOCK_JOB_VERB_CANCEL
] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
62 [BLOCK_JOB_VERB_PAUSE
] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
63 [BLOCK_JOB_VERB_RESUME
] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
64 [BLOCK_JOB_VERB_SET_SPEED
] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
65 [BLOCK_JOB_VERB_COMPLETE
] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
66 [BLOCK_JOB_VERB_DISMISS
] = {0, 0, 0, 0, 0, 0, 0, 1, 0},
69 static void block_job_state_transition(BlockJob
*job
, BlockJobStatus s1
)
71 BlockJobStatus s0
= job
->status
;
72 assert(s1
>= 0 && s1
<= BLOCK_JOB_STATUS__MAX
);
73 trace_block_job_state_transition(job
, job
->ret
, BlockJobSTT
[s0
][s1
] ?
74 "allowed" : "disallowed",
75 qapi_enum_lookup(&BlockJobStatus_lookup
,
77 qapi_enum_lookup(&BlockJobStatus_lookup
,
79 assert(BlockJobSTT
[s0
][s1
]);
83 static int block_job_apply_verb(BlockJob
*job
, BlockJobVerb bv
, Error
**errp
)
85 assert(bv
>= 0 && bv
<= BLOCK_JOB_VERB__MAX
);
86 trace_block_job_apply_verb(job
, qapi_enum_lookup(&BlockJobStatus_lookup
,
88 qapi_enum_lookup(&BlockJobVerb_lookup
, bv
),
89 BlockJobVerbTable
[bv
][job
->status
] ?
90 "allowed" : "prohibited");
91 if (BlockJobVerbTable
[bv
][job
->status
]) {
94 error_setg(errp
, "Job '%s' in state '%s' cannot accept command verb '%s'",
95 job
->id
, qapi_enum_lookup(&BlockJobStatus_lookup
, job
->status
),
96 qapi_enum_lookup(&BlockJobVerb_lookup
, bv
));
100 static void block_job_lock(void)
102 qemu_mutex_lock(&block_job_mutex
);
105 static void block_job_unlock(void)
107 qemu_mutex_unlock(&block_job_mutex
);
110 static void __attribute__((__constructor__
)) block_job_init(void)
112 qemu_mutex_init(&block_job_mutex
);
115 static void block_job_event_cancelled(BlockJob
*job
);
116 static void block_job_event_completed(BlockJob
*job
, const char *msg
);
117 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
));
119 /* Transactional group of block jobs */
122 /* Is this txn being cancelled? */
126 QLIST_HEAD(, BlockJob
) jobs
;
128 /* Reference count */
132 static QLIST_HEAD(, BlockJob
) block_jobs
= QLIST_HEAD_INITIALIZER(block_jobs
);
135 * The block job API is composed of two categories of functions.
137 * The first includes functions used by the monitor. The monitor is
138 * peculiar in that it accesses the block job list with block_job_get, and
139 * therefore needs consistency across block_job_get and the actual operation
140 * (e.g. block_job_set_speed). The consistency is achieved with
141 * aio_context_acquire/release. These functions are declared in blockjob.h.
143 * The second includes functions used by the block job drivers and sometimes
144 * by the core block layer. These do not care about locking, because the
145 * whole coroutine runs under the AioContext lock, and are declared in
149 BlockJob
*block_job_next(BlockJob
*job
)
152 return QLIST_FIRST(&block_jobs
);
154 return QLIST_NEXT(job
, job_list
);
157 BlockJob
*block_job_get(const char *id
)
161 QLIST_FOREACH(job
, &block_jobs
, job_list
) {
162 if (job
->id
&& !strcmp(id
, job
->id
)) {
170 BlockJobTxn
*block_job_txn_new(void)
172 BlockJobTxn
*txn
= g_new0(BlockJobTxn
, 1);
173 QLIST_INIT(&txn
->jobs
);
178 static void block_job_txn_ref(BlockJobTxn
*txn
)
183 void block_job_txn_unref(BlockJobTxn
*txn
)
185 if (txn
&& --txn
->refcnt
== 0) {
190 void block_job_txn_add_job(BlockJobTxn
*txn
, BlockJob
*job
)
199 QLIST_INSERT_HEAD(&txn
->jobs
, job
, txn_list
);
200 block_job_txn_ref(txn
);
203 static void block_job_pause(BlockJob
*job
)
208 static void block_job_resume(BlockJob
*job
)
210 assert(job
->pause_count
> 0);
212 if (job
->pause_count
) {
215 block_job_enter(job
);
218 void block_job_ref(BlockJob
*job
)
223 static void block_job_attached_aio_context(AioContext
*new_context
,
225 static void block_job_detach_aio_context(void *opaque
);
227 void block_job_unref(BlockJob
*job
)
229 if (--job
->refcnt
== 0) {
230 assert(job
->status
== BLOCK_JOB_STATUS_NULL
);
231 BlockDriverState
*bs
= blk_bs(job
->blk
);
232 QLIST_REMOVE(job
, job_list
);
234 block_job_remove_all_bdrv(job
);
235 blk_remove_aio_context_notifier(job
->blk
,
236 block_job_attached_aio_context
,
237 block_job_detach_aio_context
, job
);
239 error_free(job
->blocker
);
241 assert(!timer_pending(&job
->sleep_timer
));
246 static void block_job_attached_aio_context(AioContext
*new_context
,
249 BlockJob
*job
= opaque
;
251 if (job
->driver
->attached_aio_context
) {
252 job
->driver
->attached_aio_context(job
, new_context
);
255 block_job_resume(job
);
258 static void block_job_drain(BlockJob
*job
)
260 /* If job is !job->busy this kicks it into the next pause point. */
261 block_job_enter(job
);
264 if (job
->driver
->drain
) {
265 job
->driver
->drain(job
);
269 static void block_job_detach_aio_context(void *opaque
)
271 BlockJob
*job
= opaque
;
273 /* In case the job terminates during aio_poll()... */
276 block_job_pause(job
);
278 while (!job
->paused
&& !job
->completed
) {
279 block_job_drain(job
);
282 block_job_unref(job
);
285 static char *child_job_get_parent_desc(BdrvChild
*c
)
287 BlockJob
*job
= c
->opaque
;
288 return g_strdup_printf("%s job '%s'",
289 BlockJobType_str(job
->driver
->job_type
),
293 static void child_job_drained_begin(BdrvChild
*c
)
295 BlockJob
*job
= c
->opaque
;
296 block_job_pause(job
);
299 static void child_job_drained_end(BdrvChild
*c
)
301 BlockJob
*job
= c
->opaque
;
302 block_job_resume(job
);
305 static const BdrvChildRole child_job
= {
306 .get_parent_desc
= child_job_get_parent_desc
,
307 .drained_begin
= child_job_drained_begin
,
308 .drained_end
= child_job_drained_end
,
309 .stay_at_node
= true,
312 void block_job_remove_all_bdrv(BlockJob
*job
)
315 for (l
= job
->nodes
; l
; l
= l
->next
) {
316 BdrvChild
*c
= l
->data
;
317 bdrv_op_unblock_all(c
->bs
, job
->blocker
);
318 bdrv_root_unref_child(c
);
320 g_slist_free(job
->nodes
);
324 int block_job_add_bdrv(BlockJob
*job
, const char *name
, BlockDriverState
*bs
,
325 uint64_t perm
, uint64_t shared_perm
, Error
**errp
)
329 c
= bdrv_root_attach_child(bs
, name
, &child_job
, perm
, shared_perm
,
335 job
->nodes
= g_slist_prepend(job
->nodes
, c
);
337 bdrv_op_block_all(bs
, job
->blocker
);
342 bool block_job_is_internal(BlockJob
*job
)
344 return (job
->id
== NULL
);
347 static bool block_job_started(BlockJob
*job
)
353 * All jobs must allow a pause point before entering their job proper. This
354 * ensures that jobs can be paused prior to being started, then resumed later.
356 static void coroutine_fn
block_job_co_entry(void *opaque
)
358 BlockJob
*job
= opaque
;
360 assert(job
&& job
->driver
&& job
->driver
->start
);
361 block_job_pause_point(job
);
362 job
->driver
->start(job
);
365 static void block_job_sleep_timer_cb(void *opaque
)
367 BlockJob
*job
= opaque
;
369 block_job_enter(job
);
372 void block_job_start(BlockJob
*job
)
374 assert(job
&& !block_job_started(job
) && job
->paused
&&
375 job
->driver
&& job
->driver
->start
);
376 job
->co
= qemu_coroutine_create(block_job_co_entry
, job
);
380 block_job_state_transition(job
, BLOCK_JOB_STATUS_RUNNING
);
381 bdrv_coroutine_enter(blk_bs(job
->blk
), job
->co
);
384 static void block_job_decommission(BlockJob
*job
)
387 job
->completed
= true;
390 job
->deferred_to_main_loop
= true;
391 block_job_state_transition(job
, BLOCK_JOB_STATUS_NULL
);
392 block_job_unref(job
);
395 static void block_job_do_dismiss(BlockJob
*job
)
397 block_job_decommission(job
);
400 static void block_job_conclude(BlockJob
*job
)
402 block_job_state_transition(job
, BLOCK_JOB_STATUS_CONCLUDED
);
403 if (job
->auto_dismiss
|| !block_job_started(job
)) {
404 block_job_do_dismiss(job
);
408 static void block_job_completed_single(BlockJob
*job
)
410 assert(job
->completed
);
412 if (job
->ret
|| block_job_is_cancelled(job
)) {
413 block_job_state_transition(job
, BLOCK_JOB_STATUS_ABORTING
);
417 if (job
->driver
->commit
) {
418 job
->driver
->commit(job
);
421 if (job
->driver
->abort
) {
422 job
->driver
->abort(job
);
425 if (job
->driver
->clean
) {
426 job
->driver
->clean(job
);
430 job
->cb(job
->opaque
, job
->ret
);
433 /* Emit events only if we actually started */
434 if (block_job_started(job
)) {
435 if (block_job_is_cancelled(job
)) {
436 block_job_event_cancelled(job
);
438 const char *msg
= NULL
;
440 msg
= strerror(-job
->ret
);
442 block_job_event_completed(job
, msg
);
446 QLIST_REMOVE(job
, txn_list
);
447 block_job_txn_unref(job
->txn
);
448 block_job_conclude(job
);
451 static void block_job_cancel_async(BlockJob
*job
)
453 if (job
->iostatus
!= BLOCK_DEVICE_IO_STATUS_OK
) {
454 block_job_iostatus_reset(job
);
456 if (job
->user_paused
) {
457 /* Do not call block_job_enter here, the caller will handle it. */
458 job
->user_paused
= false;
461 job
->cancelled
= true;
464 static int block_job_finish_sync(BlockJob
*job
,
465 void (*finish
)(BlockJob
*, Error
**errp
),
468 Error
*local_err
= NULL
;
471 assert(blk_bs(job
->blk
)->job
== job
);
476 finish(job
, &local_err
);
479 error_propagate(errp
, local_err
);
480 block_job_unref(job
);
483 /* block_job_drain calls block_job_enter, and it should be enough to
484 * induce progress until the job completes or moves to the main thread.
486 while (!job
->deferred_to_main_loop
&& !job
->completed
) {
487 block_job_drain(job
);
489 while (!job
->completed
) {
490 aio_poll(qemu_get_aio_context(), true);
492 ret
= (job
->cancelled
&& job
->ret
== 0) ? -ECANCELED
: job
->ret
;
493 block_job_unref(job
);
497 static void block_job_completed_txn_abort(BlockJob
*job
)
500 BlockJobTxn
*txn
= job
->txn
;
505 * We are cancelled by another job, which will handle everything.
509 txn
->aborting
= true;
510 block_job_txn_ref(txn
);
512 /* We are the first failed job. Cancel other jobs. */
513 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
514 ctx
= blk_get_aio_context(other_job
->blk
);
515 aio_context_acquire(ctx
);
518 /* Other jobs are effectively cancelled by us, set the status for
519 * them; this job, however, may or may not be cancelled, depending
520 * on the caller, so leave it. */
521 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
522 if (other_job
!= job
) {
523 block_job_cancel_async(other_job
);
526 while (!QLIST_EMPTY(&txn
->jobs
)) {
527 other_job
= QLIST_FIRST(&txn
->jobs
);
528 ctx
= blk_get_aio_context(other_job
->blk
);
529 if (!other_job
->completed
) {
530 assert(other_job
->cancelled
);
531 block_job_finish_sync(other_job
, NULL
, NULL
);
533 block_job_completed_single(other_job
);
534 aio_context_release(ctx
);
537 block_job_txn_unref(txn
);
540 static void block_job_completed_txn_success(BlockJob
*job
)
543 BlockJobTxn
*txn
= job
->txn
;
544 BlockJob
*other_job
, *next
;
546 * Successful completion, see if there are other running jobs in this
549 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
550 if (!other_job
->completed
) {
554 /* We are the last completed job, commit the transaction. */
555 QLIST_FOREACH_SAFE(other_job
, &txn
->jobs
, txn_list
, next
) {
556 ctx
= blk_get_aio_context(other_job
->blk
);
557 aio_context_acquire(ctx
);
558 assert(other_job
->ret
== 0);
559 block_job_completed_single(other_job
);
560 aio_context_release(ctx
);
564 /* Assumes the block_job_mutex is held */
565 static bool block_job_timer_pending(BlockJob
*job
)
567 return timer_pending(&job
->sleep_timer
);
570 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
572 Error
*local_err
= NULL
;
573 int64_t old_speed
= job
->speed
;
575 if (!job
->driver
->set_speed
) {
576 error_setg(errp
, QERR_UNSUPPORTED
);
579 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_SET_SPEED
, errp
)) {
582 job
->driver
->set_speed(job
, speed
, &local_err
);
584 error_propagate(errp
, local_err
);
589 if (speed
&& speed
<= old_speed
) {
593 /* kick only if a timer is pending */
594 block_job_enter_cond(job
, block_job_timer_pending
);
597 void block_job_complete(BlockJob
*job
, Error
**errp
)
599 /* Should not be reachable via external interface for internal jobs */
601 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_COMPLETE
, errp
)) {
604 if (job
->pause_count
|| job
->cancelled
|| !job
->driver
->complete
) {
605 error_setg(errp
, "The active block job '%s' cannot be completed",
610 job
->driver
->complete(job
, errp
);
613 void block_job_dismiss(BlockJob
**jobptr
, Error
**errp
)
615 BlockJob
*job
= *jobptr
;
616 /* similarly to _complete, this is QMP-interface only. */
618 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_DISMISS
, errp
)) {
622 block_job_do_dismiss(job
);
626 void block_job_user_pause(BlockJob
*job
, Error
**errp
)
628 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_PAUSE
, errp
)) {
631 if (job
->user_paused
) {
632 error_setg(errp
, "Job is already paused");
635 job
->user_paused
= true;
636 block_job_pause(job
);
639 bool block_job_user_paused(BlockJob
*job
)
641 return job
->user_paused
;
644 void block_job_user_resume(BlockJob
*job
, Error
**errp
)
647 if (!job
->user_paused
|| job
->pause_count
<= 0) {
648 error_setg(errp
, "Can't resume a job that was not paused");
651 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_RESUME
, errp
)) {
654 block_job_iostatus_reset(job
);
655 job
->user_paused
= false;
656 block_job_resume(job
);
659 void block_job_cancel(BlockJob
*job
)
661 if (job
->status
== BLOCK_JOB_STATUS_CONCLUDED
) {
662 block_job_do_dismiss(job
);
663 } else if (block_job_started(job
)) {
664 block_job_cancel_async(job
);
665 block_job_enter(job
);
667 block_job_completed(job
, -ECANCELED
);
671 void block_job_user_cancel(BlockJob
*job
, Error
**errp
)
673 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_CANCEL
, errp
)) {
676 block_job_cancel(job
);
679 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
680 * used with block_job_finish_sync() without the need for (rather nasty)
681 * function pointer casts there. */
682 static void block_job_cancel_err(BlockJob
*job
, Error
**errp
)
684 block_job_cancel(job
);
687 int block_job_cancel_sync(BlockJob
*job
)
689 return block_job_finish_sync(job
, &block_job_cancel_err
, NULL
);
692 void block_job_cancel_sync_all(void)
695 AioContext
*aio_context
;
697 while ((job
= QLIST_FIRST(&block_jobs
))) {
698 aio_context
= blk_get_aio_context(job
->blk
);
699 aio_context_acquire(aio_context
);
700 block_job_cancel_sync(job
);
701 aio_context_release(aio_context
);
705 int block_job_complete_sync(BlockJob
*job
, Error
**errp
)
707 return block_job_finish_sync(job
, &block_job_complete
, errp
);
710 BlockJobInfo
*block_job_query(BlockJob
*job
, Error
**errp
)
714 if (block_job_is_internal(job
)) {
715 error_setg(errp
, "Cannot query QEMU internal jobs");
718 info
= g_new0(BlockJobInfo
, 1);
719 info
->type
= g_strdup(BlockJobType_str(job
->driver
->job_type
));
720 info
->device
= g_strdup(job
->id
);
721 info
->len
= job
->len
;
722 info
->busy
= atomic_read(&job
->busy
);
723 info
->paused
= job
->pause_count
> 0;
724 info
->offset
= job
->offset
;
725 info
->speed
= job
->speed
;
726 info
->io_status
= job
->iostatus
;
727 info
->ready
= job
->ready
;
728 info
->status
= job
->status
;
732 static void block_job_iostatus_set_err(BlockJob
*job
, int error
)
734 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
735 job
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
736 BLOCK_DEVICE_IO_STATUS_FAILED
;
740 static void block_job_event_cancelled(BlockJob
*job
)
742 if (block_job_is_internal(job
)) {
746 qapi_event_send_block_job_cancelled(job
->driver
->job_type
,
754 static void block_job_event_completed(BlockJob
*job
, const char *msg
)
756 if (block_job_is_internal(job
)) {
760 qapi_event_send_block_job_completed(job
->driver
->job_type
,
771 * API for block job drivers and the block layer. These functions are
772 * declared in blockjob_int.h.
775 void *block_job_create(const char *job_id
, const BlockJobDriver
*driver
,
776 BlockJobTxn
*txn
, BlockDriverState
*bs
, uint64_t perm
,
777 uint64_t shared_perm
, int64_t speed
, int flags
,
778 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
)
785 error_setg(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
789 if (job_id
== NULL
&& !(flags
& BLOCK_JOB_INTERNAL
)) {
790 job_id
= bdrv_get_device_name(bs
);
792 error_setg(errp
, "An explicit job ID is required for this node");
798 if (flags
& BLOCK_JOB_INTERNAL
) {
799 error_setg(errp
, "Cannot specify job ID for internal block job");
803 if (!id_wellformed(job_id
)) {
804 error_setg(errp
, "Invalid job ID '%s'", job_id
);
808 if (block_job_get(job_id
)) {
809 error_setg(errp
, "Job ID '%s' already in use", job_id
);
814 blk
= blk_new(perm
, shared_perm
);
815 ret
= blk_insert_bs(blk
, bs
, errp
);
821 job
= g_malloc0(driver
->instance_size
);
822 job
->driver
= driver
;
823 job
->id
= g_strdup(job_id
);
826 job
->opaque
= opaque
;
829 job
->pause_count
= 1;
831 job
->auto_dismiss
= !(flags
& BLOCK_JOB_MANUAL_DISMISS
);
832 block_job_state_transition(job
, BLOCK_JOB_STATUS_CREATED
);
833 aio_timer_init(qemu_get_aio_context(), &job
->sleep_timer
,
834 QEMU_CLOCK_REALTIME
, SCALE_NS
,
835 block_job_sleep_timer_cb
, job
);
837 error_setg(&job
->blocker
, "block device is in use by block job: %s",
838 BlockJobType_str(driver
->job_type
));
839 block_job_add_bdrv(job
, "main node", bs
, 0, BLK_PERM_ALL
, &error_abort
);
842 bdrv_op_unblock(bs
, BLOCK_OP_TYPE_DATAPLANE
, job
->blocker
);
844 QLIST_INSERT_HEAD(&block_jobs
, job
, job_list
);
846 blk_add_aio_context_notifier(blk
, block_job_attached_aio_context
,
847 block_job_detach_aio_context
, job
);
849 /* Only set speed when necessary to avoid NotSupported error */
851 Error
*local_err
= NULL
;
853 block_job_set_speed(job
, speed
, &local_err
);
855 block_job_early_fail(job
);
856 error_propagate(errp
, local_err
);
861 /* Single jobs are modeled as single-job transactions for sake of
862 * consolidating the job management logic */
864 txn
= block_job_txn_new();
865 block_job_txn_add_job(txn
, job
);
866 block_job_txn_unref(txn
);
868 block_job_txn_add_job(txn
, job
);
874 void block_job_pause_all(void)
876 BlockJob
*job
= NULL
;
877 while ((job
= block_job_next(job
))) {
878 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
880 aio_context_acquire(aio_context
);
882 block_job_pause(job
);
883 aio_context_release(aio_context
);
887 void block_job_early_fail(BlockJob
*job
)
889 assert(job
->status
== BLOCK_JOB_STATUS_CREATED
);
890 block_job_decommission(job
);
893 void block_job_completed(BlockJob
*job
, int ret
)
895 assert(job
&& job
->txn
&& !job
->completed
);
896 assert(blk_bs(job
->blk
)->job
== job
);
897 job
->completed
= true;
899 if (ret
< 0 || block_job_is_cancelled(job
)) {
900 block_job_completed_txn_abort(job
);
902 block_job_completed_txn_success(job
);
906 static bool block_job_should_pause(BlockJob
*job
)
908 return job
->pause_count
> 0;
911 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
912 * Reentering the job coroutine with block_job_enter() before the timer has
913 * expired is allowed and cancels the timer.
915 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
916 * called explicitly. */
917 static void block_job_do_yield(BlockJob
*job
, uint64_t ns
)
921 timer_mod(&job
->sleep_timer
, ns
);
925 qemu_coroutine_yield();
927 /* Set by block_job_enter before re-entering the coroutine. */
931 void coroutine_fn
block_job_pause_point(BlockJob
*job
)
933 assert(job
&& block_job_started(job
));
935 if (!block_job_should_pause(job
)) {
938 if (block_job_is_cancelled(job
)) {
942 if (job
->driver
->pause
) {
943 job
->driver
->pause(job
);
946 if (block_job_should_pause(job
) && !block_job_is_cancelled(job
)) {
947 BlockJobStatus status
= job
->status
;
948 block_job_state_transition(job
, status
== BLOCK_JOB_STATUS_READY
? \
949 BLOCK_JOB_STATUS_STANDBY
: \
950 BLOCK_JOB_STATUS_PAUSED
);
952 block_job_do_yield(job
, -1);
954 block_job_state_transition(job
, status
);
957 if (job
->driver
->resume
) {
958 job
->driver
->resume(job
);
962 void block_job_resume_all(void)
964 BlockJob
*job
, *next
;
966 QLIST_FOREACH_SAFE(job
, &block_jobs
, job_list
, next
) {
967 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
969 aio_context_acquire(aio_context
);
970 block_job_resume(job
);
971 block_job_unref(job
);
972 aio_context_release(aio_context
);
977 * Conditionally enter a block_job pending a call to fn() while
978 * under the block_job_lock critical section.
980 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
))
982 if (!block_job_started(job
)) {
985 if (job
->deferred_to_main_loop
) {
995 if (fn
&& !fn(job
)) {
1000 assert(!job
->deferred_to_main_loop
);
1001 timer_del(&job
->sleep_timer
);
1004 aio_co_wake(job
->co
);
1007 void block_job_enter(BlockJob
*job
)
1009 block_job_enter_cond(job
, NULL
);
1012 bool block_job_is_cancelled(BlockJob
*job
)
1014 return job
->cancelled
;
1017 void block_job_sleep_ns(BlockJob
*job
, int64_t ns
)
1021 /* Check cancellation *before* setting busy = false, too! */
1022 if (block_job_is_cancelled(job
)) {
1026 if (!block_job_should_pause(job
)) {
1027 block_job_do_yield(job
, qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + ns
);
1030 block_job_pause_point(job
);
1033 void block_job_yield(BlockJob
*job
)
1037 /* Check cancellation *before* setting busy = false, too! */
1038 if (block_job_is_cancelled(job
)) {
1042 if (!block_job_should_pause(job
)) {
1043 block_job_do_yield(job
, -1);
1046 block_job_pause_point(job
);
1049 void block_job_iostatus_reset(BlockJob
*job
)
1051 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1054 assert(job
->user_paused
&& job
->pause_count
> 0);
1055 job
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1058 void block_job_event_ready(BlockJob
*job
)
1060 block_job_state_transition(job
, BLOCK_JOB_STATUS_READY
);
1063 if (block_job_is_internal(job
)) {
1067 qapi_event_send_block_job_ready(job
->driver
->job_type
,
1071 job
->speed
, &error_abort
);
1074 BlockErrorAction
block_job_error_action(BlockJob
*job
, BlockdevOnError on_err
,
1075 int is_read
, int error
)
1077 BlockErrorAction action
;
1080 case BLOCKDEV_ON_ERROR_ENOSPC
:
1081 case BLOCKDEV_ON_ERROR_AUTO
:
1082 action
= (error
== ENOSPC
) ?
1083 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1085 case BLOCKDEV_ON_ERROR_STOP
:
1086 action
= BLOCK_ERROR_ACTION_STOP
;
1088 case BLOCKDEV_ON_ERROR_REPORT
:
1089 action
= BLOCK_ERROR_ACTION_REPORT
;
1091 case BLOCKDEV_ON_ERROR_IGNORE
:
1092 action
= BLOCK_ERROR_ACTION_IGNORE
;
1097 if (!block_job_is_internal(job
)) {
1098 qapi_event_send_block_job_error(job
->id
,
1099 is_read
? IO_OPERATION_TYPE_READ
:
1100 IO_OPERATION_TYPE_WRITE
,
1101 action
, &error_abort
);
1103 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1104 block_job_pause(job
);
1105 /* make the pause user visible, which will be resumed from QMP. */
1106 job
->user_paused
= true;
1107 block_job_iostatus_set_err(job
, error
);
1114 AioContext
*aio_context
;
1115 BlockJobDeferToMainLoopFn
*fn
;
1117 } BlockJobDeferToMainLoopData
;
1119 static void block_job_defer_to_main_loop_bh(void *opaque
)
1121 BlockJobDeferToMainLoopData
*data
= opaque
;
1122 AioContext
*aio_context
;
1124 /* Prevent race with block_job_defer_to_main_loop() */
1125 aio_context_acquire(data
->aio_context
);
1127 /* Fetch BDS AioContext again, in case it has changed */
1128 aio_context
= blk_get_aio_context(data
->job
->blk
);
1129 if (aio_context
!= data
->aio_context
) {
1130 aio_context_acquire(aio_context
);
1133 data
->fn(data
->job
, data
->opaque
);
1135 if (aio_context
!= data
->aio_context
) {
1136 aio_context_release(aio_context
);
1139 aio_context_release(data
->aio_context
);
1144 void block_job_defer_to_main_loop(BlockJob
*job
,
1145 BlockJobDeferToMainLoopFn
*fn
,
1148 BlockJobDeferToMainLoopData
*data
= g_malloc(sizeof(*data
));
1150 data
->aio_context
= blk_get_aio_context(job
->blk
);
1152 data
->opaque
= opaque
;
1153 job
->deferred_to_main_loop
= true;
1155 aio_bh_schedule_oneshot(qemu_get_aio_context(),
1156 block_job_defer_to_main_loop_bh
, data
);