4 * Copyright Igalia, S.L. 2016
7 * Alberto Garcia <berto@igalia.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu/main-loop.h"
16 #include "block/blockjob_int.h"
17 #include "sysemu/block-backend.h"
18 #include "qapi/qmp/qdict.h"
21 static const BlockJobDriver test_block_job_driver
= {
23 .instance_size
= sizeof(BlockJob
),
24 .free
= block_job_free
,
25 .user_resume
= block_job_user_resume
,
29 static void block_job_cb(void *opaque
, int ret
)
33 static BlockJob
*mk_job(BlockBackend
*blk
, const char *id
,
34 const BlockJobDriver
*drv
, bool should_succeed
,
40 job
= block_job_create(id
, drv
, NULL
, blk_bs(blk
),
41 0, BLK_PERM_ALL
, 0, flags
, block_job_cb
,
45 g_assert_nonnull(job
);
47 g_assert_cmpstr(job
->job
.id
, ==, id
);
49 g_assert_cmpstr(job
->job
.id
, ==, blk_name(blk
));
52 error_free_or_abort(&err
);
59 static BlockJob
*do_test_id(BlockBackend
*blk
, const char *id
,
62 return mk_job(blk
, id
, &test_block_job_driver
,
63 should_succeed
, JOB_DEFAULT
);
66 /* This creates a BlockBackend (optionally with a name) with a
67 * BlockDriverState inserted. */
68 static BlockBackend
*create_blk(const char *name
)
70 /* No I/O is performed on this device */
71 BlockBackend
*blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
74 QDict
*opt
= qdict_new();
75 qdict_put_str(opt
, "file.read-zeroes", "on");
76 bs
= bdrv_open("null-co://", NULL
, opt
, 0, &error_abort
);
79 blk_insert_bs(blk
, bs
, &error_abort
);
84 monitor_add_blk(blk
, name
, &err
);
91 /* This destroys the backend */
92 static void destroy_blk(BlockBackend
*blk
)
94 if (blk_name(blk
)[0] != '\0') {
95 monitor_remove_blk(blk
);
102 static void test_job_ids(void)
104 BlockBackend
*blk
[3];
107 blk
[0] = create_blk(NULL
);
108 blk
[1] = create_blk("drive1");
109 blk
[2] = create_blk("drive2");
111 /* No job ID provided and the block backend has no name */
112 job
[0] = do_test_id(blk
[0], NULL
, false);
114 /* These are all invalid job IDs */
115 job
[0] = do_test_id(blk
[0], "0id", false);
116 job
[0] = do_test_id(blk
[0], "", false);
117 job
[0] = do_test_id(blk
[0], " ", false);
118 job
[0] = do_test_id(blk
[0], "123", false);
119 job
[0] = do_test_id(blk
[0], "_id", false);
120 job
[0] = do_test_id(blk
[0], "-id", false);
121 job
[0] = do_test_id(blk
[0], ".id", false);
122 job
[0] = do_test_id(blk
[0], "#id", false);
124 /* This one is valid */
125 job
[0] = do_test_id(blk
[0], "id0", true);
127 /* We can have two jobs in the same BDS */
128 job
[1] = do_test_id(blk
[0], "id1", true);
129 job_early_fail(&job
[1]->job
);
131 /* Duplicate job IDs are not allowed */
132 job
[1] = do_test_id(blk
[1], "id0", false);
134 /* But once job[0] finishes we can reuse its ID */
135 job_early_fail(&job
[0]->job
);
136 job
[1] = do_test_id(blk
[1], "id0", true);
138 /* No job ID specified, defaults to the backend name ('drive1') */
139 job_early_fail(&job
[1]->job
);
140 job
[1] = do_test_id(blk
[1], NULL
, true);
142 /* Duplicate job ID */
143 job
[2] = do_test_id(blk
[2], "drive1", false);
145 /* The ID of job[2] would default to 'drive2' but it is already in use */
146 job
[0] = do_test_id(blk
[0], "drive2", true);
147 job
[2] = do_test_id(blk
[2], NULL
, false);
149 /* This one is valid */
150 job
[2] = do_test_id(blk
[2], "id_2", true);
152 job_early_fail(&job
[0]->job
);
153 job_early_fail(&job
[1]->job
);
154 job_early_fail(&job
[2]->job
);
161 typedef struct CancelJob
{
164 bool should_converge
;
165 bool should_complete
;
168 static void cancel_job_complete(Job
*job
, Error
**errp
)
170 CancelJob
*s
= container_of(job
, CancelJob
, common
.job
);
171 s
->should_complete
= true;
174 static int coroutine_fn
cancel_job_run(Job
*job
, Error
**errp
)
176 CancelJob
*s
= container_of(job
, CancelJob
, common
.job
);
178 while (!s
->should_complete
) {
179 if (job_is_cancelled(&s
->common
.job
)) {
183 if (!job_is_ready(&s
->common
.job
) && s
->should_converge
) {
184 job_transition_to_ready(&s
->common
.job
);
187 job_sleep_ns(&s
->common
.job
, 100000);
193 static const BlockJobDriver test_cancel_driver
= {
195 .instance_size
= sizeof(CancelJob
),
196 .free
= block_job_free
,
197 .user_resume
= block_job_user_resume
,
198 .run
= cancel_job_run
,
199 .complete
= cancel_job_complete
,
203 static CancelJob
*create_common(Job
**pjob
)
210 blk
= create_blk(NULL
);
211 bjob
= mk_job(blk
, "Steve", &test_cancel_driver
, true,
212 JOB_MANUAL_FINALIZE
| JOB_MANUAL_DISMISS
);
214 WITH_JOB_LOCK_GUARD() {
216 assert(job
->status
== JOB_STATUS_CREATED
);
219 s
= container_of(bjob
, CancelJob
, common
);
226 static void cancel_common(CancelJob
*s
)
228 BlockJob
*job
= &s
->common
;
229 BlockBackend
*blk
= s
->blk
;
230 JobStatus sts
= job
->job
.status
;
231 AioContext
*ctx
= job
->job
.aio_context
;
233 job_cancel_sync(&job
->job
, true);
234 WITH_JOB_LOCK_GUARD() {
235 if (sts
!= JOB_STATUS_CREATED
&& sts
!= JOB_STATUS_CONCLUDED
) {
236 Job
*dummy
= &job
->job
;
237 job_dismiss_locked(&dummy
, &error_abort
);
239 assert(job
->job
.status
== JOB_STATUS_NULL
);
240 job_unref_locked(&job
->job
);
243 aio_context_acquire(ctx
);
245 aio_context_release(ctx
);
249 static void test_cancel_created(void)
254 s
= create_common(&job
);
258 static void assert_job_status_is(Job
*job
, int status
)
260 WITH_JOB_LOCK_GUARD() {
261 assert(job
->status
== status
);
265 static void test_cancel_running(void)
270 s
= create_common(&job
);
273 assert_job_status_is(job
, JOB_STATUS_RUNNING
);
278 static void test_cancel_paused(void)
283 s
= create_common(&job
);
286 WITH_JOB_LOCK_GUARD() {
287 assert(job
->status
== JOB_STATUS_RUNNING
);
288 job_user_pause_locked(job
, &error_abort
);
291 assert_job_status_is(job
, JOB_STATUS_PAUSED
);
296 static void test_cancel_ready(void)
301 s
= create_common(&job
);
304 assert_job_status_is(job
, JOB_STATUS_RUNNING
);
306 s
->should_converge
= true;
308 assert_job_status_is(job
, JOB_STATUS_READY
);
313 static void test_cancel_standby(void)
318 s
= create_common(&job
);
321 assert_job_status_is(job
, JOB_STATUS_RUNNING
);
323 s
->should_converge
= true;
325 WITH_JOB_LOCK_GUARD() {
326 assert(job
->status
== JOB_STATUS_READY
);
327 job_user_pause_locked(job
, &error_abort
);
330 assert_job_status_is(job
, JOB_STATUS_STANDBY
);
335 static void test_cancel_pending(void)
340 s
= create_common(&job
);
343 assert_job_status_is(job
, JOB_STATUS_RUNNING
);
345 s
->should_converge
= true;
347 WITH_JOB_LOCK_GUARD() {
348 assert(job
->status
== JOB_STATUS_READY
);
349 job_complete_locked(job
, &error_abort
);
352 while (!job
->deferred_to_main_loop
) {
353 aio_poll(qemu_get_aio_context(), true);
355 assert_job_status_is(job
, JOB_STATUS_READY
);
356 aio_poll(qemu_get_aio_context(), true);
357 assert_job_status_is(job
, JOB_STATUS_PENDING
);
362 static void test_cancel_concluded(void)
367 s
= create_common(&job
);
370 assert_job_status_is(job
, JOB_STATUS_RUNNING
);
372 s
->should_converge
= true;
374 WITH_JOB_LOCK_GUARD() {
375 assert(job
->status
== JOB_STATUS_READY
);
376 job_complete_locked(job
, &error_abort
);
379 while (!job
->deferred_to_main_loop
) {
380 aio_poll(qemu_get_aio_context(), true);
382 assert_job_status_is(job
, JOB_STATUS_READY
);
383 aio_poll(qemu_get_aio_context(), true);
384 assert_job_status_is(job
, JOB_STATUS_PENDING
);
386 WITH_JOB_LOCK_GUARD() {
387 job_finalize_locked(job
, &error_abort
);
388 assert(job
->status
== JOB_STATUS_CONCLUDED
);
394 /* (See test_yielding_driver for the job description) */
395 typedef struct YieldingJob
{
397 bool should_complete
;
400 static void yielding_job_complete(Job
*job
, Error
**errp
)
402 YieldingJob
*s
= container_of(job
, YieldingJob
, common
.job
);
403 s
->should_complete
= true;
407 static int coroutine_fn
yielding_job_run(Job
*job
, Error
**errp
)
409 YieldingJob
*s
= container_of(job
, YieldingJob
, common
.job
);
411 job_transition_to_ready(job
);
413 while (!s
->should_complete
) {
421 * This job transitions immediately to the READY state, and then
422 * yields until it is to complete.
424 static const BlockJobDriver test_yielding_driver
= {
426 .instance_size
= sizeof(YieldingJob
),
427 .free
= block_job_free
,
428 .user_resume
= block_job_user_resume
,
429 .run
= yielding_job_run
,
430 .complete
= yielding_job_complete
,
435 * Test that job_complete_locked() works even on jobs that are in a paused
436 * state (i.e., STANDBY).
438 * To do this, run YieldingJob in an IO thread, get it into the READY
439 * state, then have a drained section. Before ending the section,
440 * acquire the context so the job will not be entered and will thus
443 * job_complete_locked() should still work without error.
445 * Note that on the QMP interface, it is impossible to lock an IO
446 * thread before a drained section ends. In practice, the
447 * bdrv_drain_all_end() and the aio_context_acquire() will be
448 * reversed. However, that makes for worse reproducibility here:
449 * Sometimes, the job would no longer be in STANDBY then but already
450 * be started. We cannot prevent that, because the IO thread runs
451 * concurrently. We can only prevent it by taking the lock before
452 * ending the drained section, so we do that.
454 * (You can reverse the order of operations and most of the time the
455 * test will pass, but sometimes the assert(status == STANDBY) will
458 static void test_complete_in_standby(void)
466 /* Create a test drive, move it to an IO thread */
467 blk
= create_blk(NULL
);
468 iothread
= iothread_new();
470 ctx
= iothread_get_aio_context(iothread
);
471 blk_set_aio_context(blk
, ctx
, &error_abort
);
473 /* Create our test job */
474 bjob
= mk_job(blk
, "job", &test_yielding_driver
, true,
475 JOB_MANUAL_FINALIZE
| JOB_MANUAL_DISMISS
);
477 assert_job_status_is(job
, JOB_STATUS_CREATED
);
479 /* Wait for the job to become READY */
482 * Here we are waiting for the status to change, so don't bother
483 * protecting the read every time.
485 AIO_WAIT_WHILE_UNLOCKED(ctx
, job
->status
!= JOB_STATUS_READY
);
487 /* Begin the drained section, pausing the job */
488 bdrv_drain_all_begin();
489 assert_job_status_is(job
, JOB_STATUS_STANDBY
);
491 /* Lock the IO thread to prevent the job from being run */
492 aio_context_acquire(ctx
);
493 /* This will schedule the job to resume it */
494 bdrv_drain_all_end();
495 aio_context_release(ctx
);
497 WITH_JOB_LOCK_GUARD() {
498 /* But the job cannot run, so it will remain on standby */
499 assert(job
->status
== JOB_STATUS_STANDBY
);
501 /* Even though the job is on standby, this should work */
502 job_complete_locked(job
, &error_abort
);
504 /* The test is done now, clean up. */
505 job_finish_sync_locked(job
, NULL
, &error_abort
);
506 assert(job
->status
== JOB_STATUS_PENDING
);
508 job_finalize_locked(job
, &error_abort
);
509 assert(job
->status
== JOB_STATUS_CONCLUDED
);
511 job_dismiss_locked(&job
, &error_abort
);
514 aio_context_acquire(ctx
);
516 aio_context_release(ctx
);
517 iothread_join(iothread
);
520 int main(int argc
, char **argv
)
522 qemu_init_main_loop(&error_abort
);
525 g_test_init(&argc
, &argv
, NULL
);
526 g_test_add_func("/blockjob/ids", test_job_ids
);
527 g_test_add_func("/blockjob/cancel/created", test_cancel_created
);
528 g_test_add_func("/blockjob/cancel/running", test_cancel_running
);
529 g_test_add_func("/blockjob/cancel/paused", test_cancel_paused
);
530 g_test_add_func("/blockjob/cancel/ready", test_cancel_ready
);
531 g_test_add_func("/blockjob/cancel/standby", test_cancel_standby
);
532 g_test_add_func("/blockjob/cancel/pending", test_cancel_pending
);
533 g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded
);
536 * This test is flaky and sometimes fails in CI and otherwise:
537 * don't run unless user opts in via environment variable.
539 if (getenv("QEMU_TEST_FLAKY_TESTS")) {
540 g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby
);