migration/rdma: Clean up two more harmless signed vs. unsigned issues
[qemu/armbru.git] / tests / unit / test-blockjob.c
bloba130f6fefbae0811cbcd85f39a7ffc7349b49075
1 /*
2 * Blockjob tests
4 * Copyright Igalia, S.L. 2016
6 * Authors:
7 * Alberto Garcia <berto@igalia.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu/main-loop.h"
16 #include "block/blockjob_int.h"
17 #include "sysemu/block-backend.h"
18 #include "qapi/qmp/qdict.h"
19 #include "iothread.h"
21 static const BlockJobDriver test_block_job_driver = {
22 .job_driver = {
23 .instance_size = sizeof(BlockJob),
24 .free = block_job_free,
25 .user_resume = block_job_user_resume,
29 static void block_job_cb(void *opaque, int ret)
33 static BlockJob *mk_job(BlockBackend *blk, const char *id,
34 const BlockJobDriver *drv, bool should_succeed,
35 int flags)
37 BlockJob *job;
38 Error *err = NULL;
40 job = block_job_create(id, drv, NULL, blk_bs(blk),
41 0, BLK_PERM_ALL, 0, flags, block_job_cb,
42 NULL, &err);
43 if (should_succeed) {
44 g_assert_null(err);
45 g_assert_nonnull(job);
46 if (id) {
47 g_assert_cmpstr(job->job.id, ==, id);
48 } else {
49 g_assert_cmpstr(job->job.id, ==, blk_name(blk));
51 } else {
52 error_free_or_abort(&err);
53 g_assert_null(job);
56 return job;
59 static BlockJob *do_test_id(BlockBackend *blk, const char *id,
60 bool should_succeed)
62 return mk_job(blk, id, &test_block_job_driver,
63 should_succeed, JOB_DEFAULT);
66 /* This creates a BlockBackend (optionally with a name) with a
67 * BlockDriverState inserted. */
68 static BlockBackend *create_blk(const char *name)
70 /* No I/O is performed on this device */
71 BlockBackend *blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
72 BlockDriverState *bs;
74 QDict *opt = qdict_new();
75 qdict_put_str(opt, "file.read-zeroes", "on");
76 bs = bdrv_open("null-co://", NULL, opt, 0, &error_abort);
77 g_assert_nonnull(bs);
79 blk_insert_bs(blk, bs, &error_abort);
80 bdrv_unref(bs);
82 if (name) {
83 Error *err = NULL;
84 monitor_add_blk(blk, name, &err);
85 g_assert_null(err);
88 return blk;
91 /* This destroys the backend */
92 static void destroy_blk(BlockBackend *blk)
94 if (blk_name(blk)[0] != '\0') {
95 monitor_remove_blk(blk);
98 blk_remove_bs(blk);
99 blk_unref(blk);
102 static void test_job_ids(void)
104 BlockBackend *blk[3];
105 BlockJob *job[3];
107 blk[0] = create_blk(NULL);
108 blk[1] = create_blk("drive1");
109 blk[2] = create_blk("drive2");
111 /* No job ID provided and the block backend has no name */
112 job[0] = do_test_id(blk[0], NULL, false);
114 /* These are all invalid job IDs */
115 job[0] = do_test_id(blk[0], "0id", false);
116 job[0] = do_test_id(blk[0], "", false);
117 job[0] = do_test_id(blk[0], " ", false);
118 job[0] = do_test_id(blk[0], "123", false);
119 job[0] = do_test_id(blk[0], "_id", false);
120 job[0] = do_test_id(blk[0], "-id", false);
121 job[0] = do_test_id(blk[0], ".id", false);
122 job[0] = do_test_id(blk[0], "#id", false);
124 /* This one is valid */
125 job[0] = do_test_id(blk[0], "id0", true);
127 /* We can have two jobs in the same BDS */
128 job[1] = do_test_id(blk[0], "id1", true);
129 job_early_fail(&job[1]->job);
131 /* Duplicate job IDs are not allowed */
132 job[1] = do_test_id(blk[1], "id0", false);
134 /* But once job[0] finishes we can reuse its ID */
135 job_early_fail(&job[0]->job);
136 job[1] = do_test_id(blk[1], "id0", true);
138 /* No job ID specified, defaults to the backend name ('drive1') */
139 job_early_fail(&job[1]->job);
140 job[1] = do_test_id(blk[1], NULL, true);
142 /* Duplicate job ID */
143 job[2] = do_test_id(blk[2], "drive1", false);
145 /* The ID of job[2] would default to 'drive2' but it is already in use */
146 job[0] = do_test_id(blk[0], "drive2", true);
147 job[2] = do_test_id(blk[2], NULL, false);
149 /* This one is valid */
150 job[2] = do_test_id(blk[2], "id_2", true);
152 job_early_fail(&job[0]->job);
153 job_early_fail(&job[1]->job);
154 job_early_fail(&job[2]->job);
156 destroy_blk(blk[0]);
157 destroy_blk(blk[1]);
158 destroy_blk(blk[2]);
161 typedef struct CancelJob {
162 BlockJob common;
163 BlockBackend *blk;
164 bool should_converge;
165 bool should_complete;
166 } CancelJob;
168 static void cancel_job_complete(Job *job, Error **errp)
170 CancelJob *s = container_of(job, CancelJob, common.job);
171 s->should_complete = true;
174 static int coroutine_fn cancel_job_run(Job *job, Error **errp)
176 CancelJob *s = container_of(job, CancelJob, common.job);
178 while (!s->should_complete) {
179 if (job_is_cancelled(&s->common.job)) {
180 return 0;
183 if (!job_is_ready(&s->common.job) && s->should_converge) {
184 job_transition_to_ready(&s->common.job);
187 job_sleep_ns(&s->common.job, 100000);
190 return 0;
193 static const BlockJobDriver test_cancel_driver = {
194 .job_driver = {
195 .instance_size = sizeof(CancelJob),
196 .free = block_job_free,
197 .user_resume = block_job_user_resume,
198 .run = cancel_job_run,
199 .complete = cancel_job_complete,
203 static CancelJob *create_common(Job **pjob)
205 BlockBackend *blk;
206 Job *job;
207 BlockJob *bjob;
208 CancelJob *s;
210 blk = create_blk(NULL);
211 bjob = mk_job(blk, "Steve", &test_cancel_driver, true,
212 JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
213 job = &bjob->job;
214 WITH_JOB_LOCK_GUARD() {
215 job_ref_locked(job);
216 assert(job->status == JOB_STATUS_CREATED);
219 s = container_of(bjob, CancelJob, common);
220 s->blk = blk;
222 *pjob = job;
223 return s;
226 static void cancel_common(CancelJob *s)
228 BlockJob *job = &s->common;
229 BlockBackend *blk = s->blk;
230 JobStatus sts = job->job.status;
231 AioContext *ctx = job->job.aio_context;
233 job_cancel_sync(&job->job, true);
234 WITH_JOB_LOCK_GUARD() {
235 if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
236 Job *dummy = &job->job;
237 job_dismiss_locked(&dummy, &error_abort);
239 assert(job->job.status == JOB_STATUS_NULL);
240 job_unref_locked(&job->job);
243 aio_context_acquire(ctx);
244 destroy_blk(blk);
245 aio_context_release(ctx);
249 static void test_cancel_created(void)
251 Job *job;
252 CancelJob *s;
254 s = create_common(&job);
255 cancel_common(s);
258 static void assert_job_status_is(Job *job, int status)
260 WITH_JOB_LOCK_GUARD() {
261 assert(job->status == status);
265 static void test_cancel_running(void)
267 Job *job;
268 CancelJob *s;
270 s = create_common(&job);
272 job_start(job);
273 assert_job_status_is(job, JOB_STATUS_RUNNING);
275 cancel_common(s);
278 static void test_cancel_paused(void)
280 Job *job;
281 CancelJob *s;
283 s = create_common(&job);
285 job_start(job);
286 WITH_JOB_LOCK_GUARD() {
287 assert(job->status == JOB_STATUS_RUNNING);
288 job_user_pause_locked(job, &error_abort);
290 job_enter(job);
291 assert_job_status_is(job, JOB_STATUS_PAUSED);
293 cancel_common(s);
296 static void test_cancel_ready(void)
298 Job *job;
299 CancelJob *s;
301 s = create_common(&job);
303 job_start(job);
304 assert_job_status_is(job, JOB_STATUS_RUNNING);
306 s->should_converge = true;
307 job_enter(job);
308 assert_job_status_is(job, JOB_STATUS_READY);
310 cancel_common(s);
313 static void test_cancel_standby(void)
315 Job *job;
316 CancelJob *s;
318 s = create_common(&job);
320 job_start(job);
321 assert_job_status_is(job, JOB_STATUS_RUNNING);
323 s->should_converge = true;
324 job_enter(job);
325 WITH_JOB_LOCK_GUARD() {
326 assert(job->status == JOB_STATUS_READY);
327 job_user_pause_locked(job, &error_abort);
329 job_enter(job);
330 assert_job_status_is(job, JOB_STATUS_STANDBY);
332 cancel_common(s);
335 static void test_cancel_pending(void)
337 Job *job;
338 CancelJob *s;
340 s = create_common(&job);
342 job_start(job);
343 assert_job_status_is(job, JOB_STATUS_RUNNING);
345 s->should_converge = true;
346 job_enter(job);
347 WITH_JOB_LOCK_GUARD() {
348 assert(job->status == JOB_STATUS_READY);
349 job_complete_locked(job, &error_abort);
351 job_enter(job);
352 while (!job->deferred_to_main_loop) {
353 aio_poll(qemu_get_aio_context(), true);
355 assert_job_status_is(job, JOB_STATUS_READY);
356 aio_poll(qemu_get_aio_context(), true);
357 assert_job_status_is(job, JOB_STATUS_PENDING);
359 cancel_common(s);
362 static void test_cancel_concluded(void)
364 Job *job;
365 CancelJob *s;
367 s = create_common(&job);
369 job_start(job);
370 assert_job_status_is(job, JOB_STATUS_RUNNING);
372 s->should_converge = true;
373 job_enter(job);
374 WITH_JOB_LOCK_GUARD() {
375 assert(job->status == JOB_STATUS_READY);
376 job_complete_locked(job, &error_abort);
378 job_enter(job);
379 while (!job->deferred_to_main_loop) {
380 aio_poll(qemu_get_aio_context(), true);
382 assert_job_status_is(job, JOB_STATUS_READY);
383 aio_poll(qemu_get_aio_context(), true);
384 assert_job_status_is(job, JOB_STATUS_PENDING);
386 WITH_JOB_LOCK_GUARD() {
387 job_finalize_locked(job, &error_abort);
388 assert(job->status == JOB_STATUS_CONCLUDED);
391 cancel_common(s);
394 /* (See test_yielding_driver for the job description) */
395 typedef struct YieldingJob {
396 BlockJob common;
397 bool should_complete;
398 } YieldingJob;
400 static void yielding_job_complete(Job *job, Error **errp)
402 YieldingJob *s = container_of(job, YieldingJob, common.job);
403 s->should_complete = true;
404 job_enter(job);
407 static int coroutine_fn yielding_job_run(Job *job, Error **errp)
409 YieldingJob *s = container_of(job, YieldingJob, common.job);
411 job_transition_to_ready(job);
413 while (!s->should_complete) {
414 job_yield(job);
417 return 0;
421 * This job transitions immediately to the READY state, and then
422 * yields until it is to complete.
424 static const BlockJobDriver test_yielding_driver = {
425 .job_driver = {
426 .instance_size = sizeof(YieldingJob),
427 .free = block_job_free,
428 .user_resume = block_job_user_resume,
429 .run = yielding_job_run,
430 .complete = yielding_job_complete,
435 * Test that job_complete_locked() works even on jobs that are in a paused
436 * state (i.e., STANDBY).
438 * To do this, run YieldingJob in an IO thread, get it into the READY
439 * state, then have a drained section. Before ending the section,
440 * acquire the context so the job will not be entered and will thus
441 * remain on STANDBY.
443 * job_complete_locked() should still work without error.
445 * Note that on the QMP interface, it is impossible to lock an IO
446 * thread before a drained section ends. In practice, the
447 * bdrv_drain_all_end() and the aio_context_acquire() will be
448 * reversed. However, that makes for worse reproducibility here:
449 * Sometimes, the job would no longer be in STANDBY then but already
450 * be started. We cannot prevent that, because the IO thread runs
451 * concurrently. We can only prevent it by taking the lock before
452 * ending the drained section, so we do that.
454 * (You can reverse the order of operations and most of the time the
455 * test will pass, but sometimes the assert(status == STANDBY) will
456 * fail.)
458 static void test_complete_in_standby(void)
460 BlockBackend *blk;
461 IOThread *iothread;
462 AioContext *ctx;
463 Job *job;
464 BlockJob *bjob;
466 /* Create a test drive, move it to an IO thread */
467 blk = create_blk(NULL);
468 iothread = iothread_new();
470 ctx = iothread_get_aio_context(iothread);
471 blk_set_aio_context(blk, ctx, &error_abort);
473 /* Create our test job */
474 bjob = mk_job(blk, "job", &test_yielding_driver, true,
475 JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
476 job = &bjob->job;
477 assert_job_status_is(job, JOB_STATUS_CREATED);
479 /* Wait for the job to become READY */
480 job_start(job);
482 * Here we are waiting for the status to change, so don't bother
483 * protecting the read every time.
485 AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY);
487 /* Begin the drained section, pausing the job */
488 bdrv_drain_all_begin();
489 assert_job_status_is(job, JOB_STATUS_STANDBY);
491 /* Lock the IO thread to prevent the job from being run */
492 aio_context_acquire(ctx);
493 /* This will schedule the job to resume it */
494 bdrv_drain_all_end();
495 aio_context_release(ctx);
497 WITH_JOB_LOCK_GUARD() {
498 /* But the job cannot run, so it will remain on standby */
499 assert(job->status == JOB_STATUS_STANDBY);
501 /* Even though the job is on standby, this should work */
502 job_complete_locked(job, &error_abort);
504 /* The test is done now, clean up. */
505 job_finish_sync_locked(job, NULL, &error_abort);
506 assert(job->status == JOB_STATUS_PENDING);
508 job_finalize_locked(job, &error_abort);
509 assert(job->status == JOB_STATUS_CONCLUDED);
511 job_dismiss_locked(&job, &error_abort);
514 aio_context_acquire(ctx);
515 destroy_blk(blk);
516 aio_context_release(ctx);
517 iothread_join(iothread);
520 int main(int argc, char **argv)
522 qemu_init_main_loop(&error_abort);
523 bdrv_init();
525 g_test_init(&argc, &argv, NULL);
526 g_test_add_func("/blockjob/ids", test_job_ids);
527 g_test_add_func("/blockjob/cancel/created", test_cancel_created);
528 g_test_add_func("/blockjob/cancel/running", test_cancel_running);
529 g_test_add_func("/blockjob/cancel/paused", test_cancel_paused);
530 g_test_add_func("/blockjob/cancel/ready", test_cancel_ready);
531 g_test_add_func("/blockjob/cancel/standby", test_cancel_standby);
532 g_test_add_func("/blockjob/cancel/pending", test_cancel_pending);
533 g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded);
536 * This test is flaky and sometimes fails in CI and otherwise:
537 * don't run unless user opts in via environment variable.
539 if (getenv("QEMU_TEST_FLAKY_TESTS")) {
540 g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby);
542 return g_test_run();