exec.c: ensure all AddressSpaceDispatch updates under RCU
[qemu.git] / blockjob.c
blob422851fde5da5eb929841e6500957909a4f35e67
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "trace.h"
29 #include "block/block.h"
30 #include "block/blockjob.h"
31 #include "block/block_int.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/qmp/qerror.h"
34 #include "qapi/qmp/qjson.h"
35 #include "qemu/coroutine.h"
36 #include "qemu/id.h"
37 #include "qmp-commands.h"
38 #include "qemu/timer.h"
39 #include "qapi-event.h"
41 /* Transactional group of block jobs */
42 struct BlockJobTxn {
44 /* Is this txn being cancelled? */
45 bool aborting;
47 /* List of jobs */
48 QLIST_HEAD(, BlockJob) jobs;
50 /* Reference count */
51 int refcnt;
54 static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
56 BlockJob *block_job_next(BlockJob *job)
58 if (!job) {
59 return QLIST_FIRST(&block_jobs);
61 return QLIST_NEXT(job, job_list);
64 BlockJob *block_job_get(const char *id)
66 BlockJob *job;
68 QLIST_FOREACH(job, &block_jobs, job_list) {
69 if (!strcmp(id, job->id)) {
70 return job;
74 return NULL;
77 static void block_job_attached_aio_context(AioContext *new_context,
78 void *opaque)
80 BlockJob *job = opaque;
82 if (job->driver->attached_aio_context) {
83 job->driver->attached_aio_context(job, new_context);
86 block_job_resume(job);
89 static void block_job_drain(BlockJob *job)
91 /* If job is !job->busy this kicks it into the next pause point. */
92 block_job_enter(job);
94 blk_drain(job->blk);
95 if (job->driver->drain) {
96 job->driver->drain(job);
100 static void block_job_detach_aio_context(void *opaque)
102 BlockJob *job = opaque;
104 /* In case the job terminates during aio_poll()... */
105 block_job_ref(job);
107 block_job_pause(job);
109 while (!job->paused && !job->completed) {
110 block_job_drain(job);
113 block_job_unref(job);
116 void block_job_add_bdrv(BlockJob *job, BlockDriverState *bs)
118 job->nodes = g_slist_prepend(job->nodes, bs);
119 bdrv_ref(bs);
120 bdrv_op_block_all(bs, job->blocker);
123 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
124 BlockDriverState *bs, int64_t speed,
125 BlockCompletionFunc *cb, void *opaque, Error **errp)
127 BlockBackend *blk;
128 BlockJob *job;
130 assert(cb);
131 if (bs->job) {
132 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
133 return NULL;
136 if (job_id == NULL) {
137 job_id = bdrv_get_device_name(bs);
138 if (!*job_id) {
139 error_setg(errp, "An explicit job ID is required for this node");
140 return NULL;
144 if (!id_wellformed(job_id)) {
145 error_setg(errp, "Invalid job ID '%s'", job_id);
146 return NULL;
149 if (block_job_get(job_id)) {
150 error_setg(errp, "Job ID '%s' already in use", job_id);
151 return NULL;
154 blk = blk_new();
155 blk_insert_bs(blk, bs);
157 job = g_malloc0(driver->instance_size);
158 error_setg(&job->blocker, "block device is in use by block job: %s",
159 BlockJobType_lookup[driver->job_type]);
160 block_job_add_bdrv(job, bs);
161 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
163 job->driver = driver;
164 job->id = g_strdup(job_id);
165 job->blk = blk;
166 job->cb = cb;
167 job->opaque = opaque;
168 job->busy = true;
169 job->refcnt = 1;
170 bs->job = job;
172 QLIST_INSERT_HEAD(&block_jobs, job, job_list);
174 blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
175 block_job_detach_aio_context, job);
177 /* Only set speed when necessary to avoid NotSupported error */
178 if (speed != 0) {
179 Error *local_err = NULL;
181 block_job_set_speed(job, speed, &local_err);
182 if (local_err) {
183 block_job_unref(job);
184 error_propagate(errp, local_err);
185 return NULL;
188 return job;
191 void block_job_ref(BlockJob *job)
193 ++job->refcnt;
196 void block_job_unref(BlockJob *job)
198 if (--job->refcnt == 0) {
199 GSList *l;
200 BlockDriverState *bs = blk_bs(job->blk);
201 bs->job = NULL;
202 for (l = job->nodes; l; l = l->next) {
203 bs = l->data;
204 bdrv_op_unblock_all(bs, job->blocker);
205 bdrv_unref(bs);
207 g_slist_free(job->nodes);
208 blk_remove_aio_context_notifier(job->blk,
209 block_job_attached_aio_context,
210 block_job_detach_aio_context, job);
211 blk_unref(job->blk);
212 error_free(job->blocker);
213 g_free(job->id);
214 QLIST_REMOVE(job, job_list);
215 g_free(job);
219 static void block_job_completed_single(BlockJob *job)
221 if (!job->ret) {
222 if (job->driver->commit) {
223 job->driver->commit(job);
225 } else {
226 if (job->driver->abort) {
227 job->driver->abort(job);
230 job->cb(job->opaque, job->ret);
231 if (job->txn) {
232 block_job_txn_unref(job->txn);
234 block_job_unref(job);
237 static void block_job_completed_txn_abort(BlockJob *job)
239 AioContext *ctx;
240 BlockJobTxn *txn = job->txn;
241 BlockJob *other_job, *next;
243 if (txn->aborting) {
245 * We are cancelled by another job, which will handle everything.
247 return;
249 txn->aborting = true;
250 /* We are the first failed job. Cancel other jobs. */
251 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
252 ctx = blk_get_aio_context(other_job->blk);
253 aio_context_acquire(ctx);
255 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
256 if (other_job == job || other_job->completed) {
257 /* Other jobs are "effectively" cancelled by us, set the status for
258 * them; this job, however, may or may not be cancelled, depending
259 * on the caller, so leave it. */
260 if (other_job != job) {
261 other_job->cancelled = true;
263 continue;
265 block_job_cancel_sync(other_job);
266 assert(other_job->completed);
268 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
269 ctx = blk_get_aio_context(other_job->blk);
270 block_job_completed_single(other_job);
271 aio_context_release(ctx);
275 static void block_job_completed_txn_success(BlockJob *job)
277 AioContext *ctx;
278 BlockJobTxn *txn = job->txn;
279 BlockJob *other_job, *next;
281 * Successful completion, see if there are other running jobs in this
282 * txn.
284 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
285 if (!other_job->completed) {
286 return;
289 /* We are the last completed job, commit the transaction. */
290 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
291 ctx = blk_get_aio_context(other_job->blk);
292 aio_context_acquire(ctx);
293 assert(other_job->ret == 0);
294 block_job_completed_single(other_job);
295 aio_context_release(ctx);
299 void block_job_completed(BlockJob *job, int ret)
301 assert(blk_bs(job->blk)->job == job);
302 assert(!job->completed);
303 job->completed = true;
304 job->ret = ret;
305 if (!job->txn) {
306 block_job_completed_single(job);
307 } else if (ret < 0 || block_job_is_cancelled(job)) {
308 block_job_completed_txn_abort(job);
309 } else {
310 block_job_completed_txn_success(job);
314 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
316 Error *local_err = NULL;
318 if (!job->driver->set_speed) {
319 error_setg(errp, QERR_UNSUPPORTED);
320 return;
322 job->driver->set_speed(job, speed, &local_err);
323 if (local_err) {
324 error_propagate(errp, local_err);
325 return;
328 job->speed = speed;
331 void block_job_complete(BlockJob *job, Error **errp)
333 if (job->pause_count || job->cancelled || !job->driver->complete) {
334 error_setg(errp, "The active block job '%s' cannot be completed",
335 job->id);
336 return;
339 job->driver->complete(job, errp);
342 void block_job_pause(BlockJob *job)
344 job->pause_count++;
347 static bool block_job_should_pause(BlockJob *job)
349 return job->pause_count > 0;
352 void coroutine_fn block_job_pause_point(BlockJob *job)
354 if (!block_job_should_pause(job)) {
355 return;
357 if (block_job_is_cancelled(job)) {
358 return;
361 if (job->driver->pause) {
362 job->driver->pause(job);
365 if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
366 job->paused = true;
367 job->busy = false;
368 qemu_coroutine_yield(); /* wait for block_job_resume() */
369 job->busy = true;
370 job->paused = false;
373 if (job->driver->resume) {
374 job->driver->resume(job);
378 void block_job_resume(BlockJob *job)
380 assert(job->pause_count > 0);
381 job->pause_count--;
382 if (job->pause_count) {
383 return;
385 block_job_enter(job);
388 void block_job_enter(BlockJob *job)
390 if (job->co && !job->busy) {
391 qemu_coroutine_enter(job->co);
395 void block_job_cancel(BlockJob *job)
397 job->cancelled = true;
398 block_job_iostatus_reset(job);
399 block_job_enter(job);
402 bool block_job_is_cancelled(BlockJob *job)
404 return job->cancelled;
407 void block_job_iostatus_reset(BlockJob *job)
409 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
410 if (job->driver->iostatus_reset) {
411 job->driver->iostatus_reset(job);
415 static int block_job_finish_sync(BlockJob *job,
416 void (*finish)(BlockJob *, Error **errp),
417 Error **errp)
419 Error *local_err = NULL;
420 int ret;
422 assert(blk_bs(job->blk)->job == job);
424 block_job_ref(job);
426 finish(job, &local_err);
427 if (local_err) {
428 error_propagate(errp, local_err);
429 block_job_unref(job);
430 return -EBUSY;
432 /* block_job_drain calls block_job_enter, and it should be enough to
433 * induce progress until the job completes or moves to the main thread.
435 while (!job->deferred_to_main_loop && !job->completed) {
436 block_job_drain(job);
438 while (!job->completed) {
439 aio_poll(qemu_get_aio_context(), true);
441 ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
442 block_job_unref(job);
443 return ret;
446 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
447 * used with block_job_finish_sync() without the need for (rather nasty)
448 * function pointer casts there. */
449 static void block_job_cancel_err(BlockJob *job, Error **errp)
451 block_job_cancel(job);
454 int block_job_cancel_sync(BlockJob *job)
456 return block_job_finish_sync(job, &block_job_cancel_err, NULL);
459 void block_job_cancel_sync_all(void)
461 BlockJob *job;
462 AioContext *aio_context;
464 while ((job = QLIST_FIRST(&block_jobs))) {
465 aio_context = blk_get_aio_context(job->blk);
466 aio_context_acquire(aio_context);
467 block_job_cancel_sync(job);
468 aio_context_release(aio_context);
472 int block_job_complete_sync(BlockJob *job, Error **errp)
474 return block_job_finish_sync(job, &block_job_complete, errp);
477 void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
479 assert(job->busy);
481 /* Check cancellation *before* setting busy = false, too! */
482 if (block_job_is_cancelled(job)) {
483 return;
486 job->busy = false;
487 if (!block_job_should_pause(job)) {
488 co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
490 job->busy = true;
492 block_job_pause_point(job);
495 void block_job_yield(BlockJob *job)
497 assert(job->busy);
499 /* Check cancellation *before* setting busy = false, too! */
500 if (block_job_is_cancelled(job)) {
501 return;
504 job->busy = false;
505 if (!block_job_should_pause(job)) {
506 qemu_coroutine_yield();
508 job->busy = true;
510 block_job_pause_point(job);
513 BlockJobInfo *block_job_query(BlockJob *job)
515 BlockJobInfo *info = g_new0(BlockJobInfo, 1);
516 info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]);
517 info->device = g_strdup(job->id);
518 info->len = job->len;
519 info->busy = job->busy;
520 info->paused = job->pause_count > 0;
521 info->offset = job->offset;
522 info->speed = job->speed;
523 info->io_status = job->iostatus;
524 info->ready = job->ready;
525 return info;
528 static void block_job_iostatus_set_err(BlockJob *job, int error)
530 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
531 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
532 BLOCK_DEVICE_IO_STATUS_FAILED;
536 void block_job_event_cancelled(BlockJob *job)
538 qapi_event_send_block_job_cancelled(job->driver->job_type,
539 job->id,
540 job->len,
541 job->offset,
542 job->speed,
543 &error_abort);
546 void block_job_event_completed(BlockJob *job, const char *msg)
548 qapi_event_send_block_job_completed(job->driver->job_type,
549 job->id,
550 job->len,
551 job->offset,
552 job->speed,
553 !!msg,
554 msg,
555 &error_abort);
558 void block_job_event_ready(BlockJob *job)
560 job->ready = true;
562 qapi_event_send_block_job_ready(job->driver->job_type,
563 job->id,
564 job->len,
565 job->offset,
566 job->speed, &error_abort);
569 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
570 int is_read, int error)
572 BlockErrorAction action;
574 switch (on_err) {
575 case BLOCKDEV_ON_ERROR_ENOSPC:
576 case BLOCKDEV_ON_ERROR_AUTO:
577 action = (error == ENOSPC) ?
578 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
579 break;
580 case BLOCKDEV_ON_ERROR_STOP:
581 action = BLOCK_ERROR_ACTION_STOP;
582 break;
583 case BLOCKDEV_ON_ERROR_REPORT:
584 action = BLOCK_ERROR_ACTION_REPORT;
585 break;
586 case BLOCKDEV_ON_ERROR_IGNORE:
587 action = BLOCK_ERROR_ACTION_IGNORE;
588 break;
589 default:
590 abort();
592 qapi_event_send_block_job_error(job->id,
593 is_read ? IO_OPERATION_TYPE_READ :
594 IO_OPERATION_TYPE_WRITE,
595 action, &error_abort);
596 if (action == BLOCK_ERROR_ACTION_STOP) {
597 /* make the pause user visible, which will be resumed from QMP. */
598 job->user_paused = true;
599 block_job_pause(job);
600 block_job_iostatus_set_err(job, error);
602 return action;
605 typedef struct {
606 BlockJob *job;
607 AioContext *aio_context;
608 BlockJobDeferToMainLoopFn *fn;
609 void *opaque;
610 } BlockJobDeferToMainLoopData;
612 static void block_job_defer_to_main_loop_bh(void *opaque)
614 BlockJobDeferToMainLoopData *data = opaque;
615 AioContext *aio_context;
617 /* Prevent race with block_job_defer_to_main_loop() */
618 aio_context_acquire(data->aio_context);
620 /* Fetch BDS AioContext again, in case it has changed */
621 aio_context = blk_get_aio_context(data->job->blk);
622 aio_context_acquire(aio_context);
624 data->job->deferred_to_main_loop = false;
625 data->fn(data->job, data->opaque);
627 aio_context_release(aio_context);
629 aio_context_release(data->aio_context);
631 g_free(data);
634 void block_job_defer_to_main_loop(BlockJob *job,
635 BlockJobDeferToMainLoopFn *fn,
636 void *opaque)
638 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
639 data->job = job;
640 data->aio_context = blk_get_aio_context(job->blk);
641 data->fn = fn;
642 data->opaque = opaque;
643 job->deferred_to_main_loop = true;
645 aio_bh_schedule_oneshot(qemu_get_aio_context(),
646 block_job_defer_to_main_loop_bh, data);
649 BlockJobTxn *block_job_txn_new(void)
651 BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
652 QLIST_INIT(&txn->jobs);
653 txn->refcnt = 1;
654 return txn;
657 static void block_job_txn_ref(BlockJobTxn *txn)
659 txn->refcnt++;
662 void block_job_txn_unref(BlockJobTxn *txn)
664 if (txn && --txn->refcnt == 0) {
665 g_free(txn);
669 void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
671 if (!txn) {
672 return;
675 assert(!job->txn);
676 job->txn = txn;
678 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
679 block_job_txn_ref(txn);