test-bdrv-drain: AIO_WAIT_WHILE() in job .commit/.abort
[qemu/ar7.git] / blockjob.c
blob4d5342259c914abb24dcbb85297fcbff65d24b58
1 /*
2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/coroutine.h"
37 #include "qemu/timer.h"
40 * The block job API is composed of two categories of functions.
42 * The first includes functions used by the monitor. The monitor is
43 * peculiar in that it accesses the block job list with block_job_get, and
44 * therefore needs consistency across block_job_get and the actual operation
45 * (e.g. block_job_set_speed). The consistency is achieved with
46 * aio_context_acquire/release. These functions are declared in blockjob.h.
48 * The second includes functions used by the block job drivers and sometimes
49 * by the core block layer. These do not care about locking, because the
50 * whole coroutine runs under the AioContext lock, and are declared in
51 * blockjob_int.h.
54 static bool is_block_job(Job *job)
56 return job_type(job) == JOB_TYPE_BACKUP ||
57 job_type(job) == JOB_TYPE_COMMIT ||
58 job_type(job) == JOB_TYPE_MIRROR ||
59 job_type(job) == JOB_TYPE_STREAM;
62 BlockJob *block_job_next(BlockJob *bjob)
64 Job *job = bjob ? &bjob->job : NULL;
66 do {
67 job = job_next(job);
68 } while (job && !is_block_job(job));
70 return job ? container_of(job, BlockJob, job) : NULL;
73 BlockJob *block_job_get(const char *id)
75 Job *job = job_get(id);
77 if (job && is_block_job(job)) {
78 return container_of(job, BlockJob, job);
79 } else {
80 return NULL;
84 static void block_job_attached_aio_context(AioContext *new_context,
85 void *opaque);
86 static void block_job_detach_aio_context(void *opaque);
88 void block_job_free(Job *job)
90 BlockJob *bjob = container_of(job, BlockJob, job);
91 BlockDriverState *bs = blk_bs(bjob->blk);
93 bs->job = NULL;
94 block_job_remove_all_bdrv(bjob);
95 blk_remove_aio_context_notifier(bjob->blk,
96 block_job_attached_aio_context,
97 block_job_detach_aio_context, bjob);
98 blk_unref(bjob->blk);
99 error_free(bjob->blocker);
102 static void block_job_attached_aio_context(AioContext *new_context,
103 void *opaque)
105 BlockJob *job = opaque;
106 const JobDriver *drv = job->job.driver;
107 BlockJobDriver *bjdrv = container_of(drv, BlockJobDriver, job_driver);
109 job->job.aio_context = new_context;
110 if (bjdrv->attached_aio_context) {
111 bjdrv->attached_aio_context(job, new_context);
114 job_resume(&job->job);
117 void block_job_drain(Job *job)
119 BlockJob *bjob = container_of(job, BlockJob, job);
120 const JobDriver *drv = job->driver;
121 BlockJobDriver *bjdrv = container_of(drv, BlockJobDriver, job_driver);
123 blk_drain(bjob->blk);
124 if (bjdrv->drain) {
125 bjdrv->drain(bjob);
129 static void block_job_detach_aio_context(void *opaque)
131 BlockJob *job = opaque;
133 /* In case the job terminates during aio_poll()... */
134 job_ref(&job->job);
136 job_pause(&job->job);
138 while (!job->job.paused && !job_is_completed(&job->job)) {
139 job_drain(&job->job);
142 job->job.aio_context = NULL;
143 job_unref(&job->job);
146 static char *child_job_get_parent_desc(BdrvChild *c)
148 BlockJob *job = c->opaque;
149 return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
152 static void child_job_drained_begin(BdrvChild *c)
154 BlockJob *job = c->opaque;
155 job_pause(&job->job);
158 static bool child_job_drained_poll(BdrvChild *c)
160 BlockJob *bjob = c->opaque;
161 Job *job = &bjob->job;
162 const BlockJobDriver *drv = block_job_driver(bjob);
164 /* An inactive or completed job doesn't have any pending requests. Jobs
165 * with !job->busy are either already paused or have a pause point after
166 * being reentered, so no job driver code will run before they pause. */
167 if (!job->busy || job_is_completed(job)) {
168 return false;
171 /* Otherwise, assume that it isn't fully stopped yet, but allow the job to
172 * override this assumption. */
173 if (drv->drained_poll) {
174 return drv->drained_poll(bjob);
175 } else {
176 return true;
180 static void child_job_drained_end(BdrvChild *c)
182 BlockJob *job = c->opaque;
183 job_resume(&job->job);
186 static const BdrvChildRole child_job = {
187 .get_parent_desc = child_job_get_parent_desc,
188 .drained_begin = child_job_drained_begin,
189 .drained_poll = child_job_drained_poll,
190 .drained_end = child_job_drained_end,
191 .stay_at_node = true,
194 void block_job_remove_all_bdrv(BlockJob *job)
196 GSList *l;
197 for (l = job->nodes; l; l = l->next) {
198 BdrvChild *c = l->data;
199 bdrv_op_unblock_all(c->bs, job->blocker);
200 bdrv_root_unref_child(c);
202 g_slist_free(job->nodes);
203 job->nodes = NULL;
206 int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
207 uint64_t perm, uint64_t shared_perm, Error **errp)
209 BdrvChild *c;
211 c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
212 job, errp);
213 if (c == NULL) {
214 return -EPERM;
217 job->nodes = g_slist_prepend(job->nodes, c);
218 bdrv_ref(bs);
219 bdrv_op_block_all(bs, job->blocker);
221 return 0;
224 void block_job_wakeup_all_bdrv(BlockJob *job)
226 GSList *l;
228 for (l = job->nodes; l; l = l->next) {
229 BdrvChild *c = l->data;
230 bdrv_wakeup(c->bs);
234 static void block_job_on_idle(Notifier *n, void *opaque)
236 BlockJob *job = opaque;
237 block_job_wakeup_all_bdrv(job);
240 bool block_job_is_internal(BlockJob *job)
242 return (job->job.id == NULL);
245 const BlockJobDriver *block_job_driver(BlockJob *job)
247 return container_of(job->job.driver, BlockJobDriver, job_driver);
250 /* Assumes the job_mutex is held */
251 static bool job_timer_pending(Job *job)
253 return timer_pending(&job->sleep_timer);
256 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
258 int64_t old_speed = job->speed;
260 if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp)) {
261 return;
263 if (speed < 0) {
264 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
265 return;
268 ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
270 job->speed = speed;
271 if (speed && speed <= old_speed) {
272 return;
275 /* kick only if a timer is pending */
276 job_enter_cond(&job->job, job_timer_pending);
279 int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
281 if (!job->speed) {
282 return 0;
285 return ratelimit_calculate_delay(&job->limit, n);
288 BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
290 BlockJobInfo *info;
292 if (block_job_is_internal(job)) {
293 error_setg(errp, "Cannot query QEMU internal jobs");
294 return NULL;
296 info = g_new0(BlockJobInfo, 1);
297 info->type = g_strdup(job_type_str(&job->job));
298 info->device = g_strdup(job->job.id);
299 info->busy = atomic_read(&job->job.busy);
300 info->paused = job->job.pause_count > 0;
301 info->offset = job->job.progress_current;
302 info->len = job->job.progress_total;
303 info->speed = job->speed;
304 info->io_status = job->iostatus;
305 info->ready = job_is_ready(&job->job),
306 info->status = job->job.status;
307 info->auto_finalize = job->job.auto_finalize;
308 info->auto_dismiss = job->job.auto_dismiss;
309 info->has_error = job->job.ret != 0;
310 info->error = job->job.ret ? g_strdup(strerror(-job->job.ret)) : NULL;
311 return info;
314 static void block_job_iostatus_set_err(BlockJob *job, int error)
316 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
317 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
318 BLOCK_DEVICE_IO_STATUS_FAILED;
322 static void block_job_event_cancelled(Notifier *n, void *opaque)
324 BlockJob *job = opaque;
326 if (block_job_is_internal(job)) {
327 return;
330 qapi_event_send_block_job_cancelled(job_type(&job->job),
331 job->job.id,
332 job->job.progress_total,
333 job->job.progress_current,
334 job->speed);
337 static void block_job_event_completed(Notifier *n, void *opaque)
339 BlockJob *job = opaque;
340 const char *msg = NULL;
342 if (block_job_is_internal(job)) {
343 return;
346 if (job->job.ret < 0) {
347 msg = strerror(-job->job.ret);
350 qapi_event_send_block_job_completed(job_type(&job->job),
351 job->job.id,
352 job->job.progress_total,
353 job->job.progress_current,
354 job->speed,
355 !!msg,
356 msg);
359 static void block_job_event_pending(Notifier *n, void *opaque)
361 BlockJob *job = opaque;
363 if (block_job_is_internal(job)) {
364 return;
367 qapi_event_send_block_job_pending(job_type(&job->job),
368 job->job.id);
371 static void block_job_event_ready(Notifier *n, void *opaque)
373 BlockJob *job = opaque;
375 if (block_job_is_internal(job)) {
376 return;
379 qapi_event_send_block_job_ready(job_type(&job->job),
380 job->job.id,
381 job->job.progress_total,
382 job->job.progress_current,
383 job->speed);
388 * API for block job drivers and the block layer. These functions are
389 * declared in blockjob_int.h.
392 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
393 JobTxn *txn, BlockDriverState *bs, uint64_t perm,
394 uint64_t shared_perm, int64_t speed, int flags,
395 BlockCompletionFunc *cb, void *opaque, Error **errp)
397 BlockBackend *blk;
398 BlockJob *job;
399 int ret;
401 if (bs->job) {
402 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
403 return NULL;
406 if (job_id == NULL && !(flags & JOB_INTERNAL)) {
407 job_id = bdrv_get_device_name(bs);
410 blk = blk_new(perm, shared_perm);
411 ret = blk_insert_bs(blk, bs, errp);
412 if (ret < 0) {
413 blk_unref(blk);
414 return NULL;
417 job = job_create(job_id, &driver->job_driver, txn, blk_get_aio_context(blk),
418 flags, cb, opaque, errp);
419 if (job == NULL) {
420 blk_unref(blk);
421 return NULL;
424 assert(is_block_job(&job->job));
425 assert(job->job.driver->free == &block_job_free);
426 assert(job->job.driver->user_resume == &block_job_user_resume);
427 assert(job->job.driver->drain == &block_job_drain);
429 job->blk = blk;
431 job->finalize_cancelled_notifier.notify = block_job_event_cancelled;
432 job->finalize_completed_notifier.notify = block_job_event_completed;
433 job->pending_notifier.notify = block_job_event_pending;
434 job->ready_notifier.notify = block_job_event_ready;
435 job->idle_notifier.notify = block_job_on_idle;
437 notifier_list_add(&job->job.on_finalize_cancelled,
438 &job->finalize_cancelled_notifier);
439 notifier_list_add(&job->job.on_finalize_completed,
440 &job->finalize_completed_notifier);
441 notifier_list_add(&job->job.on_pending, &job->pending_notifier);
442 notifier_list_add(&job->job.on_ready, &job->ready_notifier);
443 notifier_list_add(&job->job.on_idle, &job->idle_notifier);
445 error_setg(&job->blocker, "block device is in use by block job: %s",
446 job_type_str(&job->job));
447 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
448 bs->job = job;
450 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
452 blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
453 block_job_detach_aio_context, job);
455 /* Only set speed when necessary to avoid NotSupported error */
456 if (speed != 0) {
457 Error *local_err = NULL;
459 block_job_set_speed(job, speed, &local_err);
460 if (local_err) {
461 job_early_fail(&job->job);
462 error_propagate(errp, local_err);
463 return NULL;
467 return job;
470 void block_job_iostatus_reset(BlockJob *job)
472 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
473 return;
475 assert(job->job.user_paused && job->job.pause_count > 0);
476 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
479 void block_job_user_resume(Job *job)
481 BlockJob *bjob = container_of(job, BlockJob, job);
482 block_job_iostatus_reset(bjob);
485 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
486 int is_read, int error)
488 BlockErrorAction action;
490 switch (on_err) {
491 case BLOCKDEV_ON_ERROR_ENOSPC:
492 case BLOCKDEV_ON_ERROR_AUTO:
493 action = (error == ENOSPC) ?
494 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
495 break;
496 case BLOCKDEV_ON_ERROR_STOP:
497 action = BLOCK_ERROR_ACTION_STOP;
498 break;
499 case BLOCKDEV_ON_ERROR_REPORT:
500 action = BLOCK_ERROR_ACTION_REPORT;
501 break;
502 case BLOCKDEV_ON_ERROR_IGNORE:
503 action = BLOCK_ERROR_ACTION_IGNORE;
504 break;
505 default:
506 abort();
508 if (!block_job_is_internal(job)) {
509 qapi_event_send_block_job_error(job->job.id,
510 is_read ? IO_OPERATION_TYPE_READ :
511 IO_OPERATION_TYPE_WRITE,
512 action);
514 if (action == BLOCK_ERROR_ACTION_STOP) {
515 job_pause(&job->job);
516 /* make the pause user visible, which will be resumed from QMP. */
517 job->job.user_paused = true;
518 block_job_iostatus_set_err(job, error);
520 return action;