Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[qemu/ar7.git] / tests / test-block-iothread.c
blob79d9cf8a57709d3cef8b4d70695ff19e2f0ba452
1 /*
2 * Block tests for iothreads
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/blockjob_int.h"
28 #include "sysemu/block-backend.h"
29 #include "qapi/error.h"
30 #include "qapi/qmp/qdict.h"
31 #include "iothread.h"
33 static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs,
34 uint64_t offset, uint64_t bytes,
35 QEMUIOVector *qiov, int flags)
37 return 0;
40 static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
41 int64_t offset, int bytes)
43 return 0;
46 static int coroutine_fn
47 bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset,
48 PreallocMode prealloc, Error **errp)
50 return 0;
53 static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
54 bool want_zero,
55 int64_t offset, int64_t count,
56 int64_t *pnum, int64_t *map,
57 BlockDriverState **file)
59 *pnum = count;
60 return 0;
63 static BlockDriver bdrv_test = {
64 .format_name = "test",
65 .instance_size = 1,
67 .bdrv_co_preadv = bdrv_test_co_prwv,
68 .bdrv_co_pwritev = bdrv_test_co_prwv,
69 .bdrv_co_pdiscard = bdrv_test_co_pdiscard,
70 .bdrv_co_truncate = bdrv_test_co_truncate,
71 .bdrv_co_block_status = bdrv_test_co_block_status,
74 static void test_sync_op_pread(BdrvChild *c)
76 uint8_t buf[512];
77 int ret;
79 /* Success */
80 ret = bdrv_pread(c, 0, buf, sizeof(buf));
81 g_assert_cmpint(ret, ==, 512);
83 /* Early error: Negative offset */
84 ret = bdrv_pread(c, -2, buf, sizeof(buf));
85 g_assert_cmpint(ret, ==, -EIO);
88 static void test_sync_op_pwrite(BdrvChild *c)
90 uint8_t buf[512];
91 int ret;
93 /* Success */
94 ret = bdrv_pwrite(c, 0, buf, sizeof(buf));
95 g_assert_cmpint(ret, ==, 512);
97 /* Early error: Negative offset */
98 ret = bdrv_pwrite(c, -2, buf, sizeof(buf));
99 g_assert_cmpint(ret, ==, -EIO);
102 static void test_sync_op_blk_pread(BlockBackend *blk)
104 uint8_t buf[512];
105 int ret;
107 /* Success */
108 ret = blk_pread(blk, 0, buf, sizeof(buf));
109 g_assert_cmpint(ret, ==, 512);
111 /* Early error: Negative offset */
112 ret = blk_pread(blk, -2, buf, sizeof(buf));
113 g_assert_cmpint(ret, ==, -EIO);
116 static void test_sync_op_blk_pwrite(BlockBackend *blk)
118 uint8_t buf[512];
119 int ret;
121 /* Success */
122 ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0);
123 g_assert_cmpint(ret, ==, 512);
125 /* Early error: Negative offset */
126 ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0);
127 g_assert_cmpint(ret, ==, -EIO);
130 static void test_sync_op_load_vmstate(BdrvChild *c)
132 uint8_t buf[512];
133 int ret;
135 /* Error: Driver does not support snapshots */
136 ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
137 g_assert_cmpint(ret, ==, -ENOTSUP);
140 static void test_sync_op_save_vmstate(BdrvChild *c)
142 uint8_t buf[512];
143 int ret;
145 /* Error: Driver does not support snapshots */
146 ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
147 g_assert_cmpint(ret, ==, -ENOTSUP);
150 static void test_sync_op_pdiscard(BdrvChild *c)
152 int ret;
154 /* Normal success path */
155 c->bs->open_flags |= BDRV_O_UNMAP;
156 ret = bdrv_pdiscard(c, 0, 512);
157 g_assert_cmpint(ret, ==, 0);
159 /* Early success: UNMAP not supported */
160 c->bs->open_flags &= ~BDRV_O_UNMAP;
161 ret = bdrv_pdiscard(c, 0, 512);
162 g_assert_cmpint(ret, ==, 0);
164 /* Early error: Negative offset */
165 ret = bdrv_pdiscard(c, -2, 512);
166 g_assert_cmpint(ret, ==, -EIO);
169 static void test_sync_op_blk_pdiscard(BlockBackend *blk)
171 int ret;
173 /* Early success: UNMAP not supported */
174 ret = blk_pdiscard(blk, 0, 512);
175 g_assert_cmpint(ret, ==, 0);
177 /* Early error: Negative offset */
178 ret = blk_pdiscard(blk, -2, 512);
179 g_assert_cmpint(ret, ==, -EIO);
182 static void test_sync_op_truncate(BdrvChild *c)
184 int ret;
186 /* Normal success path */
187 ret = bdrv_truncate(c, 65536, PREALLOC_MODE_OFF, NULL);
188 g_assert_cmpint(ret, ==, 0);
190 /* Early error: Negative offset */
191 ret = bdrv_truncate(c, -2, PREALLOC_MODE_OFF, NULL);
192 g_assert_cmpint(ret, ==, -EINVAL);
194 /* Error: Read-only image */
195 c->bs->read_only = true;
196 c->bs->open_flags &= ~BDRV_O_RDWR;
198 ret = bdrv_truncate(c, 65536, PREALLOC_MODE_OFF, NULL);
199 g_assert_cmpint(ret, ==, -EACCES);
201 c->bs->read_only = false;
202 c->bs->open_flags |= BDRV_O_RDWR;
205 static void test_sync_op_block_status(BdrvChild *c)
207 int ret;
208 int64_t n;
210 /* Normal success path */
211 ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
212 g_assert_cmpint(ret, ==, 0);
214 /* Early success: No driver support */
215 bdrv_test.bdrv_co_block_status = NULL;
216 ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
217 g_assert_cmpint(ret, ==, 1);
219 /* Early success: bytes = 0 */
220 ret = bdrv_is_allocated(c->bs, 0, 0, &n);
221 g_assert_cmpint(ret, ==, 0);
223 /* Early success: Offset > image size*/
224 ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
225 g_assert_cmpint(ret, ==, 0);
228 static void test_sync_op_flush(BdrvChild *c)
230 int ret;
232 /* Normal success path */
233 ret = bdrv_flush(c->bs);
234 g_assert_cmpint(ret, ==, 0);
236 /* Early success: Read-only image */
237 c->bs->read_only = true;
238 c->bs->open_flags &= ~BDRV_O_RDWR;
240 ret = bdrv_flush(c->bs);
241 g_assert_cmpint(ret, ==, 0);
243 c->bs->read_only = false;
244 c->bs->open_flags |= BDRV_O_RDWR;
247 static void test_sync_op_blk_flush(BlockBackend *blk)
249 BlockDriverState *bs = blk_bs(blk);
250 int ret;
252 /* Normal success path */
253 ret = blk_flush(blk);
254 g_assert_cmpint(ret, ==, 0);
256 /* Early success: Read-only image */
257 bs->read_only = true;
258 bs->open_flags &= ~BDRV_O_RDWR;
260 ret = blk_flush(blk);
261 g_assert_cmpint(ret, ==, 0);
263 bs->read_only = false;
264 bs->open_flags |= BDRV_O_RDWR;
267 static void test_sync_op_check(BdrvChild *c)
269 BdrvCheckResult result;
270 int ret;
272 /* Error: Driver does not implement check */
273 ret = bdrv_check(c->bs, &result, 0);
274 g_assert_cmpint(ret, ==, -ENOTSUP);
277 static void test_sync_op_invalidate_cache(BdrvChild *c)
279 /* Early success: Image is not inactive */
280 bdrv_invalidate_cache(c->bs, NULL);
284 typedef struct SyncOpTest {
285 const char *name;
286 void (*fn)(BdrvChild *c);
287 void (*blkfn)(BlockBackend *blk);
288 } SyncOpTest;
290 const SyncOpTest sync_op_tests[] = {
292 .name = "/sync-op/pread",
293 .fn = test_sync_op_pread,
294 .blkfn = test_sync_op_blk_pread,
295 }, {
296 .name = "/sync-op/pwrite",
297 .fn = test_sync_op_pwrite,
298 .blkfn = test_sync_op_blk_pwrite,
299 }, {
300 .name = "/sync-op/load_vmstate",
301 .fn = test_sync_op_load_vmstate,
302 }, {
303 .name = "/sync-op/save_vmstate",
304 .fn = test_sync_op_save_vmstate,
305 }, {
306 .name = "/sync-op/pdiscard",
307 .fn = test_sync_op_pdiscard,
308 .blkfn = test_sync_op_blk_pdiscard,
309 }, {
310 .name = "/sync-op/truncate",
311 .fn = test_sync_op_truncate,
312 }, {
313 .name = "/sync-op/block_status",
314 .fn = test_sync_op_block_status,
315 }, {
316 .name = "/sync-op/flush",
317 .fn = test_sync_op_flush,
318 .blkfn = test_sync_op_blk_flush,
319 }, {
320 .name = "/sync-op/check",
321 .fn = test_sync_op_check,
322 }, {
323 .name = "/sync-op/invalidate_cache",
324 .fn = test_sync_op_invalidate_cache,
328 /* Test synchronous operations that run in a different iothread, so we have to
329 * poll for the coroutine there to return. */
330 static void test_sync_op(const void *opaque)
332 const SyncOpTest *t = opaque;
333 IOThread *iothread = iothread_new();
334 AioContext *ctx = iothread_get_aio_context(iothread);
335 BlockBackend *blk;
336 BlockDriverState *bs;
337 BdrvChild *c;
339 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
340 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
341 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
342 blk_insert_bs(blk, bs, &error_abort);
343 c = QLIST_FIRST(&bs->parents);
345 blk_set_aio_context(blk, ctx, &error_abort);
346 aio_context_acquire(ctx);
347 t->fn(c);
348 if (t->blkfn) {
349 t->blkfn(blk);
351 aio_context_release(ctx);
352 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
354 bdrv_unref(bs);
355 blk_unref(blk);
358 typedef struct TestBlockJob {
359 BlockJob common;
360 bool should_complete;
361 int n;
362 } TestBlockJob;
364 static int test_job_prepare(Job *job)
366 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
367 return 0;
370 static int coroutine_fn test_job_run(Job *job, Error **errp)
372 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
374 job_transition_to_ready(&s->common.job);
375 while (!s->should_complete) {
376 s->n++;
377 g_assert(qemu_get_current_aio_context() == job->aio_context);
379 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
380 * emulate some actual activity (probably some I/O) here so that the
381 * drain involved in AioContext switches has to wait for this activity
382 * to stop. */
383 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
385 job_pause_point(&s->common.job);
388 g_assert(qemu_get_current_aio_context() == job->aio_context);
389 return 0;
392 static void test_job_complete(Job *job, Error **errp)
394 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
395 s->should_complete = true;
398 BlockJobDriver test_job_driver = {
399 .job_driver = {
400 .instance_size = sizeof(TestBlockJob),
401 .free = block_job_free,
402 .user_resume = block_job_user_resume,
403 .drain = block_job_drain,
404 .run = test_job_run,
405 .complete = test_job_complete,
406 .prepare = test_job_prepare,
410 static void test_attach_blockjob(void)
412 IOThread *iothread = iothread_new();
413 AioContext *ctx = iothread_get_aio_context(iothread);
414 BlockBackend *blk;
415 BlockDriverState *bs;
416 TestBlockJob *tjob;
418 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
419 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
420 blk_insert_bs(blk, bs, &error_abort);
422 tjob = block_job_create("job0", &test_job_driver, NULL, bs,
423 0, BLK_PERM_ALL,
424 0, 0, NULL, NULL, &error_abort);
425 job_start(&tjob->common.job);
427 while (tjob->n == 0) {
428 aio_poll(qemu_get_aio_context(), false);
431 blk_set_aio_context(blk, ctx, &error_abort);
433 tjob->n = 0;
434 while (tjob->n == 0) {
435 aio_poll(qemu_get_aio_context(), false);
438 aio_context_acquire(ctx);
439 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
440 aio_context_release(ctx);
442 tjob->n = 0;
443 while (tjob->n == 0) {
444 aio_poll(qemu_get_aio_context(), false);
447 blk_set_aio_context(blk, ctx, &error_abort);
449 tjob->n = 0;
450 while (tjob->n == 0) {
451 aio_poll(qemu_get_aio_context(), false);
454 aio_context_acquire(ctx);
455 job_complete_sync(&tjob->common.job, &error_abort);
456 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
457 aio_context_release(ctx);
459 bdrv_unref(bs);
460 blk_unref(blk);
464 * Test that changing the AioContext for one node in a tree (here through blk)
465 * changes all other nodes as well:
467 * blk
469 * | bs_verify [blkverify]
470 * | / \
471 * | / \
472 * bs_a [bdrv_test] bs_b [bdrv_test]
475 static void test_propagate_basic(void)
477 IOThread *iothread = iothread_new();
478 AioContext *ctx = iothread_get_aio_context(iothread);
479 BlockBackend *blk;
480 BlockDriverState *bs_a, *bs_b, *bs_verify;
481 QDict *options;
483 /* Create bs_a and its BlockBackend */
484 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
485 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
486 blk_insert_bs(blk, bs_a, &error_abort);
488 /* Create bs_b */
489 bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
491 /* Create blkverify filter that references both bs_a and bs_b */
492 options = qdict_new();
493 qdict_put_str(options, "driver", "blkverify");
494 qdict_put_str(options, "test", "bs_a");
495 qdict_put_str(options, "raw", "bs_b");
497 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
499 /* Switch the AioContext */
500 blk_set_aio_context(blk, ctx, &error_abort);
501 g_assert(blk_get_aio_context(blk) == ctx);
502 g_assert(bdrv_get_aio_context(bs_a) == ctx);
503 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
504 g_assert(bdrv_get_aio_context(bs_b) == ctx);
506 /* Switch the AioContext back */
507 ctx = qemu_get_aio_context();
508 blk_set_aio_context(blk, ctx, &error_abort);
509 g_assert(blk_get_aio_context(blk) == ctx);
510 g_assert(bdrv_get_aio_context(bs_a) == ctx);
511 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
512 g_assert(bdrv_get_aio_context(bs_b) == ctx);
514 bdrv_unref(bs_verify);
515 bdrv_unref(bs_b);
516 bdrv_unref(bs_a);
517 blk_unref(blk);
521 * Test that diamonds in the graph don't lead to endless recursion:
523 * blk
525 * bs_verify [blkverify]
526 * / \
527 * / \
528 * bs_b [raw] bs_c[raw]
529 * \ /
530 * \ /
531 * bs_a [bdrv_test]
533 static void test_propagate_diamond(void)
535 IOThread *iothread = iothread_new();
536 AioContext *ctx = iothread_get_aio_context(iothread);
537 BlockBackend *blk;
538 BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
539 QDict *options;
541 /* Create bs_a */
542 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
544 /* Create bs_b and bc_c */
545 options = qdict_new();
546 qdict_put_str(options, "driver", "raw");
547 qdict_put_str(options, "file", "bs_a");
548 qdict_put_str(options, "node-name", "bs_b");
549 bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
551 options = qdict_new();
552 qdict_put_str(options, "driver", "raw");
553 qdict_put_str(options, "file", "bs_a");
554 qdict_put_str(options, "node-name", "bs_c");
555 bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
557 /* Create blkverify filter that references both bs_b and bs_c */
558 options = qdict_new();
559 qdict_put_str(options, "driver", "blkverify");
560 qdict_put_str(options, "test", "bs_b");
561 qdict_put_str(options, "raw", "bs_c");
563 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
564 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
565 blk_insert_bs(blk, bs_verify, &error_abort);
567 /* Switch the AioContext */
568 blk_set_aio_context(blk, ctx, &error_abort);
569 g_assert(blk_get_aio_context(blk) == ctx);
570 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
571 g_assert(bdrv_get_aio_context(bs_a) == ctx);
572 g_assert(bdrv_get_aio_context(bs_b) == ctx);
573 g_assert(bdrv_get_aio_context(bs_c) == ctx);
575 /* Switch the AioContext back */
576 ctx = qemu_get_aio_context();
577 blk_set_aio_context(blk, ctx, &error_abort);
578 g_assert(blk_get_aio_context(blk) == ctx);
579 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
580 g_assert(bdrv_get_aio_context(bs_a) == ctx);
581 g_assert(bdrv_get_aio_context(bs_b) == ctx);
582 g_assert(bdrv_get_aio_context(bs_c) == ctx);
584 blk_unref(blk);
585 bdrv_unref(bs_verify);
586 bdrv_unref(bs_c);
587 bdrv_unref(bs_b);
588 bdrv_unref(bs_a);
591 static void test_propagate_mirror(void)
593 IOThread *iothread = iothread_new();
594 AioContext *ctx = iothread_get_aio_context(iothread);
595 AioContext *main_ctx = qemu_get_aio_context();
596 BlockDriverState *src, *target, *filter;
597 BlockBackend *blk;
598 Job *job;
599 Error *local_err = NULL;
601 /* Create src and target*/
602 src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
603 target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
604 &error_abort);
606 /* Start a mirror job */
607 mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
608 MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN,
609 BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
610 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
611 &error_abort);
612 job = job_get("job0");
613 filter = bdrv_find_node("filter_node");
615 /* Change the AioContext of src */
616 bdrv_try_set_aio_context(src, ctx, &error_abort);
617 g_assert(bdrv_get_aio_context(src) == ctx);
618 g_assert(bdrv_get_aio_context(target) == ctx);
619 g_assert(bdrv_get_aio_context(filter) == ctx);
620 g_assert(job->aio_context == ctx);
622 /* Change the AioContext of target */
623 aio_context_acquire(ctx);
624 bdrv_try_set_aio_context(target, main_ctx, &error_abort);
625 aio_context_release(ctx);
626 g_assert(bdrv_get_aio_context(src) == main_ctx);
627 g_assert(bdrv_get_aio_context(target) == main_ctx);
628 g_assert(bdrv_get_aio_context(filter) == main_ctx);
630 /* With a BlockBackend on src, changing target must fail */
631 blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
632 blk_insert_bs(blk, src, &error_abort);
634 bdrv_try_set_aio_context(target, ctx, &local_err);
635 g_assert(local_err);
636 error_free(local_err);
638 g_assert(blk_get_aio_context(blk) == main_ctx);
639 g_assert(bdrv_get_aio_context(src) == main_ctx);
640 g_assert(bdrv_get_aio_context(target) == main_ctx);
641 g_assert(bdrv_get_aio_context(filter) == main_ctx);
643 /* ...unless we explicitly allow it */
644 aio_context_acquire(ctx);
645 blk_set_allow_aio_context_change(blk, true);
646 bdrv_try_set_aio_context(target, ctx, &error_abort);
647 aio_context_release(ctx);
649 g_assert(blk_get_aio_context(blk) == ctx);
650 g_assert(bdrv_get_aio_context(src) == ctx);
651 g_assert(bdrv_get_aio_context(target) == ctx);
652 g_assert(bdrv_get_aio_context(filter) == ctx);
654 job_cancel_sync_all();
656 aio_context_acquire(ctx);
657 blk_set_aio_context(blk, main_ctx, &error_abort);
658 bdrv_try_set_aio_context(target, main_ctx, &error_abort);
659 aio_context_release(ctx);
661 blk_unref(blk);
662 bdrv_unref(src);
663 bdrv_unref(target);
666 static void test_attach_second_node(void)
668 IOThread *iothread = iothread_new();
669 AioContext *ctx = iothread_get_aio_context(iothread);
670 AioContext *main_ctx = qemu_get_aio_context();
671 BlockBackend *blk;
672 BlockDriverState *bs, *filter;
673 QDict *options;
675 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
676 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
677 blk_insert_bs(blk, bs, &error_abort);
679 options = qdict_new();
680 qdict_put_str(options, "driver", "raw");
681 qdict_put_str(options, "file", "base");
683 filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
684 g_assert(blk_get_aio_context(blk) == ctx);
685 g_assert(bdrv_get_aio_context(bs) == ctx);
686 g_assert(bdrv_get_aio_context(filter) == ctx);
688 blk_set_aio_context(blk, main_ctx, &error_abort);
689 g_assert(blk_get_aio_context(blk) == main_ctx);
690 g_assert(bdrv_get_aio_context(bs) == main_ctx);
691 g_assert(bdrv_get_aio_context(filter) == main_ctx);
693 bdrv_unref(filter);
694 bdrv_unref(bs);
695 blk_unref(blk);
698 static void test_attach_preserve_blk_ctx(void)
700 IOThread *iothread = iothread_new();
701 AioContext *ctx = iothread_get_aio_context(iothread);
702 BlockBackend *blk;
703 BlockDriverState *bs;
705 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
706 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
707 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
709 /* Add node to BlockBackend that has an iothread context assigned */
710 blk_insert_bs(blk, bs, &error_abort);
711 g_assert(blk_get_aio_context(blk) == ctx);
712 g_assert(bdrv_get_aio_context(bs) == ctx);
714 /* Remove the node again */
715 blk_remove_bs(blk);
716 g_assert(blk_get_aio_context(blk) == ctx);
717 g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
719 /* Re-attach the node */
720 blk_insert_bs(blk, bs, &error_abort);
721 g_assert(blk_get_aio_context(blk) == ctx);
722 g_assert(bdrv_get_aio_context(bs) == ctx);
724 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
725 bdrv_unref(bs);
726 blk_unref(blk);
729 int main(int argc, char **argv)
731 int i;
733 bdrv_init();
734 qemu_init_main_loop(&error_abort);
736 g_test_init(&argc, &argv, NULL);
738 for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
739 const SyncOpTest *t = &sync_op_tests[i];
740 g_test_add_data_func(t->name, t, test_sync_op);
743 g_test_add_func("/attach/blockjob", test_attach_blockjob);
744 g_test_add_func("/attach/second_node", test_attach_second_node);
745 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
746 g_test_add_func("/propagate/basic", test_propagate_basic);
747 g_test_add_func("/propagate/diamond", test_propagate_diamond);
748 g_test_add_func("/propagate/mirror", test_propagate_mirror);
750 return g_test_run();