2 * Block tests for iothreads
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/blockjob_int.h"
28 #include "sysemu/block-backend.h"
29 #include "qapi/error.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qemu/main-loop.h"
34 static int coroutine_fn
bdrv_test_co_prwv(BlockDriverState
*bs
,
35 uint64_t offset
, uint64_t bytes
,
36 QEMUIOVector
*qiov
, int flags
)
41 static int coroutine_fn
bdrv_test_co_pdiscard(BlockDriverState
*bs
,
42 int64_t offset
, int bytes
)
47 static int coroutine_fn
48 bdrv_test_co_truncate(BlockDriverState
*bs
, int64_t offset
,
49 PreallocMode prealloc
, Error
**errp
)
54 static int coroutine_fn
bdrv_test_co_block_status(BlockDriverState
*bs
,
56 int64_t offset
, int64_t count
,
57 int64_t *pnum
, int64_t *map
,
58 BlockDriverState
**file
)
64 static BlockDriver bdrv_test
= {
65 .format_name
= "test",
68 .bdrv_co_preadv
= bdrv_test_co_prwv
,
69 .bdrv_co_pwritev
= bdrv_test_co_prwv
,
70 .bdrv_co_pdiscard
= bdrv_test_co_pdiscard
,
71 .bdrv_co_truncate
= bdrv_test_co_truncate
,
72 .bdrv_co_block_status
= bdrv_test_co_block_status
,
75 static void test_sync_op_pread(BdrvChild
*c
)
81 ret
= bdrv_pread(c
, 0, buf
, sizeof(buf
));
82 g_assert_cmpint(ret
, ==, 512);
84 /* Early error: Negative offset */
85 ret
= bdrv_pread(c
, -2, buf
, sizeof(buf
));
86 g_assert_cmpint(ret
, ==, -EIO
);
89 static void test_sync_op_pwrite(BdrvChild
*c
)
95 ret
= bdrv_pwrite(c
, 0, buf
, sizeof(buf
));
96 g_assert_cmpint(ret
, ==, 512);
98 /* Early error: Negative offset */
99 ret
= bdrv_pwrite(c
, -2, buf
, sizeof(buf
));
100 g_assert_cmpint(ret
, ==, -EIO
);
103 static void test_sync_op_blk_pread(BlockBackend
*blk
)
109 ret
= blk_pread(blk
, 0, buf
, sizeof(buf
));
110 g_assert_cmpint(ret
, ==, 512);
112 /* Early error: Negative offset */
113 ret
= blk_pread(blk
, -2, buf
, sizeof(buf
));
114 g_assert_cmpint(ret
, ==, -EIO
);
117 static void test_sync_op_blk_pwrite(BlockBackend
*blk
)
123 ret
= blk_pwrite(blk
, 0, buf
, sizeof(buf
), 0);
124 g_assert_cmpint(ret
, ==, 512);
126 /* Early error: Negative offset */
127 ret
= blk_pwrite(blk
, -2, buf
, sizeof(buf
), 0);
128 g_assert_cmpint(ret
, ==, -EIO
);
131 static void test_sync_op_load_vmstate(BdrvChild
*c
)
136 /* Error: Driver does not support snapshots */
137 ret
= bdrv_load_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
138 g_assert_cmpint(ret
, ==, -ENOTSUP
);
141 static void test_sync_op_save_vmstate(BdrvChild
*c
)
146 /* Error: Driver does not support snapshots */
147 ret
= bdrv_save_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
148 g_assert_cmpint(ret
, ==, -ENOTSUP
);
151 static void test_sync_op_pdiscard(BdrvChild
*c
)
155 /* Normal success path */
156 c
->bs
->open_flags
|= BDRV_O_UNMAP
;
157 ret
= bdrv_pdiscard(c
, 0, 512);
158 g_assert_cmpint(ret
, ==, 0);
160 /* Early success: UNMAP not supported */
161 c
->bs
->open_flags
&= ~BDRV_O_UNMAP
;
162 ret
= bdrv_pdiscard(c
, 0, 512);
163 g_assert_cmpint(ret
, ==, 0);
165 /* Early error: Negative offset */
166 ret
= bdrv_pdiscard(c
, -2, 512);
167 g_assert_cmpint(ret
, ==, -EIO
);
170 static void test_sync_op_blk_pdiscard(BlockBackend
*blk
)
174 /* Early success: UNMAP not supported */
175 ret
= blk_pdiscard(blk
, 0, 512);
176 g_assert_cmpint(ret
, ==, 0);
178 /* Early error: Negative offset */
179 ret
= blk_pdiscard(blk
, -2, 512);
180 g_assert_cmpint(ret
, ==, -EIO
);
183 static void test_sync_op_truncate(BdrvChild
*c
)
187 /* Normal success path */
188 ret
= bdrv_truncate(c
, 65536, PREALLOC_MODE_OFF
, NULL
);
189 g_assert_cmpint(ret
, ==, 0);
191 /* Early error: Negative offset */
192 ret
= bdrv_truncate(c
, -2, PREALLOC_MODE_OFF
, NULL
);
193 g_assert_cmpint(ret
, ==, -EINVAL
);
195 /* Error: Read-only image */
196 c
->bs
->read_only
= true;
197 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
199 ret
= bdrv_truncate(c
, 65536, PREALLOC_MODE_OFF
, NULL
);
200 g_assert_cmpint(ret
, ==, -EACCES
);
202 c
->bs
->read_only
= false;
203 c
->bs
->open_flags
|= BDRV_O_RDWR
;
206 static void test_sync_op_block_status(BdrvChild
*c
)
211 /* Normal success path */
212 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
213 g_assert_cmpint(ret
, ==, 0);
215 /* Early success: No driver support */
216 bdrv_test
.bdrv_co_block_status
= NULL
;
217 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
218 g_assert_cmpint(ret
, ==, 1);
220 /* Early success: bytes = 0 */
221 ret
= bdrv_is_allocated(c
->bs
, 0, 0, &n
);
222 g_assert_cmpint(ret
, ==, 0);
224 /* Early success: Offset > image size*/
225 ret
= bdrv_is_allocated(c
->bs
, 0x1000000, 0x1000000, &n
);
226 g_assert_cmpint(ret
, ==, 0);
229 static void test_sync_op_flush(BdrvChild
*c
)
233 /* Normal success path */
234 ret
= bdrv_flush(c
->bs
);
235 g_assert_cmpint(ret
, ==, 0);
237 /* Early success: Read-only image */
238 c
->bs
->read_only
= true;
239 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
241 ret
= bdrv_flush(c
->bs
);
242 g_assert_cmpint(ret
, ==, 0);
244 c
->bs
->read_only
= false;
245 c
->bs
->open_flags
|= BDRV_O_RDWR
;
248 static void test_sync_op_blk_flush(BlockBackend
*blk
)
250 BlockDriverState
*bs
= blk_bs(blk
);
253 /* Normal success path */
254 ret
= blk_flush(blk
);
255 g_assert_cmpint(ret
, ==, 0);
257 /* Early success: Read-only image */
258 bs
->read_only
= true;
259 bs
->open_flags
&= ~BDRV_O_RDWR
;
261 ret
= blk_flush(blk
);
262 g_assert_cmpint(ret
, ==, 0);
264 bs
->read_only
= false;
265 bs
->open_flags
|= BDRV_O_RDWR
;
268 static void test_sync_op_check(BdrvChild
*c
)
270 BdrvCheckResult result
;
273 /* Error: Driver does not implement check */
274 ret
= bdrv_check(c
->bs
, &result
, 0);
275 g_assert_cmpint(ret
, ==, -ENOTSUP
);
278 static void test_sync_op_invalidate_cache(BdrvChild
*c
)
280 /* Early success: Image is not inactive */
281 bdrv_invalidate_cache(c
->bs
, NULL
);
285 typedef struct SyncOpTest
{
287 void (*fn
)(BdrvChild
*c
);
288 void (*blkfn
)(BlockBackend
*blk
);
291 const SyncOpTest sync_op_tests
[] = {
293 .name
= "/sync-op/pread",
294 .fn
= test_sync_op_pread
,
295 .blkfn
= test_sync_op_blk_pread
,
297 .name
= "/sync-op/pwrite",
298 .fn
= test_sync_op_pwrite
,
299 .blkfn
= test_sync_op_blk_pwrite
,
301 .name
= "/sync-op/load_vmstate",
302 .fn
= test_sync_op_load_vmstate
,
304 .name
= "/sync-op/save_vmstate",
305 .fn
= test_sync_op_save_vmstate
,
307 .name
= "/sync-op/pdiscard",
308 .fn
= test_sync_op_pdiscard
,
309 .blkfn
= test_sync_op_blk_pdiscard
,
311 .name
= "/sync-op/truncate",
312 .fn
= test_sync_op_truncate
,
314 .name
= "/sync-op/block_status",
315 .fn
= test_sync_op_block_status
,
317 .name
= "/sync-op/flush",
318 .fn
= test_sync_op_flush
,
319 .blkfn
= test_sync_op_blk_flush
,
321 .name
= "/sync-op/check",
322 .fn
= test_sync_op_check
,
324 .name
= "/sync-op/invalidate_cache",
325 .fn
= test_sync_op_invalidate_cache
,
329 /* Test synchronous operations that run in a different iothread, so we have to
330 * poll for the coroutine there to return. */
331 static void test_sync_op(const void *opaque
)
333 const SyncOpTest
*t
= opaque
;
334 IOThread
*iothread
= iothread_new();
335 AioContext
*ctx
= iothread_get_aio_context(iothread
);
337 BlockDriverState
*bs
;
340 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
341 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
342 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
343 blk_insert_bs(blk
, bs
, &error_abort
);
344 c
= QLIST_FIRST(&bs
->parents
);
346 blk_set_aio_context(blk
, ctx
, &error_abort
);
347 aio_context_acquire(ctx
);
352 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
353 aio_context_release(ctx
);
359 typedef struct TestBlockJob
{
361 bool should_complete
;
365 static int test_job_prepare(Job
*job
)
367 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
371 static int coroutine_fn
test_job_run(Job
*job
, Error
**errp
)
373 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
375 job_transition_to_ready(&s
->common
.job
);
376 while (!s
->should_complete
) {
378 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
380 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
381 * emulate some actual activity (probably some I/O) here so that the
382 * drain involved in AioContext switches has to wait for this activity
384 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME
, 1000000);
386 job_pause_point(&s
->common
.job
);
389 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
393 static void test_job_complete(Job
*job
, Error
**errp
)
395 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
396 s
->should_complete
= true;
399 BlockJobDriver test_job_driver
= {
401 .instance_size
= sizeof(TestBlockJob
),
402 .free
= block_job_free
,
403 .user_resume
= block_job_user_resume
,
404 .drain
= block_job_drain
,
406 .complete
= test_job_complete
,
407 .prepare
= test_job_prepare
,
411 static void test_attach_blockjob(void)
413 IOThread
*iothread
= iothread_new();
414 AioContext
*ctx
= iothread_get_aio_context(iothread
);
416 BlockDriverState
*bs
;
419 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
420 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
421 blk_insert_bs(blk
, bs
, &error_abort
);
423 tjob
= block_job_create("job0", &test_job_driver
, NULL
, bs
,
425 0, 0, NULL
, NULL
, &error_abort
);
426 job_start(&tjob
->common
.job
);
428 while (tjob
->n
== 0) {
429 aio_poll(qemu_get_aio_context(), false);
432 blk_set_aio_context(blk
, ctx
, &error_abort
);
435 while (tjob
->n
== 0) {
436 aio_poll(qemu_get_aio_context(), false);
439 aio_context_acquire(ctx
);
440 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
441 aio_context_release(ctx
);
444 while (tjob
->n
== 0) {
445 aio_poll(qemu_get_aio_context(), false);
448 blk_set_aio_context(blk
, ctx
, &error_abort
);
451 while (tjob
->n
== 0) {
452 aio_poll(qemu_get_aio_context(), false);
455 aio_context_acquire(ctx
);
456 job_complete_sync(&tjob
->common
.job
, &error_abort
);
457 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
458 aio_context_release(ctx
);
465 * Test that changing the AioContext for one node in a tree (here through blk)
466 * changes all other nodes as well:
470 * | bs_verify [blkverify]
473 * bs_a [bdrv_test] bs_b [bdrv_test]
476 static void test_propagate_basic(void)
478 IOThread
*iothread
= iothread_new();
479 AioContext
*ctx
= iothread_get_aio_context(iothread
);
480 AioContext
*main_ctx
;
482 BlockDriverState
*bs_a
, *bs_b
, *bs_verify
;
485 /* Create bs_a and its BlockBackend */
486 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
487 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
488 blk_insert_bs(blk
, bs_a
, &error_abort
);
491 bs_b
= bdrv_new_open_driver(&bdrv_test
, "bs_b", BDRV_O_RDWR
, &error_abort
);
493 /* Create blkverify filter that references both bs_a and bs_b */
494 options
= qdict_new();
495 qdict_put_str(options
, "driver", "blkverify");
496 qdict_put_str(options
, "test", "bs_a");
497 qdict_put_str(options
, "raw", "bs_b");
499 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
501 /* Switch the AioContext */
502 blk_set_aio_context(blk
, ctx
, &error_abort
);
503 g_assert(blk_get_aio_context(blk
) == ctx
);
504 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
505 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
506 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
508 /* Switch the AioContext back */
509 main_ctx
= qemu_get_aio_context();
510 aio_context_acquire(ctx
);
511 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
512 aio_context_release(ctx
);
513 g_assert(blk_get_aio_context(blk
) == main_ctx
);
514 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
515 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
516 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
518 bdrv_unref(bs_verify
);
525 * Test that diamonds in the graph don't lead to endless recursion:
529 * bs_verify [blkverify]
532 * bs_b [raw] bs_c[raw]
537 static void test_propagate_diamond(void)
539 IOThread
*iothread
= iothread_new();
540 AioContext
*ctx
= iothread_get_aio_context(iothread
);
541 AioContext
*main_ctx
;
543 BlockDriverState
*bs_a
, *bs_b
, *bs_c
, *bs_verify
;
547 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
549 /* Create bs_b and bc_c */
550 options
= qdict_new();
551 qdict_put_str(options
, "driver", "raw");
552 qdict_put_str(options
, "file", "bs_a");
553 qdict_put_str(options
, "node-name", "bs_b");
554 bs_b
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
556 options
= qdict_new();
557 qdict_put_str(options
, "driver", "raw");
558 qdict_put_str(options
, "file", "bs_a");
559 qdict_put_str(options
, "node-name", "bs_c");
560 bs_c
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
562 /* Create blkverify filter that references both bs_b and bs_c */
563 options
= qdict_new();
564 qdict_put_str(options
, "driver", "blkverify");
565 qdict_put_str(options
, "test", "bs_b");
566 qdict_put_str(options
, "raw", "bs_c");
568 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
569 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
570 blk_insert_bs(blk
, bs_verify
, &error_abort
);
572 /* Switch the AioContext */
573 blk_set_aio_context(blk
, ctx
, &error_abort
);
574 g_assert(blk_get_aio_context(blk
) == ctx
);
575 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
576 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
577 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
578 g_assert(bdrv_get_aio_context(bs_c
) == ctx
);
580 /* Switch the AioContext back */
581 main_ctx
= qemu_get_aio_context();
582 aio_context_acquire(ctx
);
583 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
584 aio_context_release(ctx
);
585 g_assert(blk_get_aio_context(blk
) == main_ctx
);
586 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
587 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
588 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
589 g_assert(bdrv_get_aio_context(bs_c
) == main_ctx
);
592 bdrv_unref(bs_verify
);
598 static void test_propagate_mirror(void)
600 IOThread
*iothread
= iothread_new();
601 AioContext
*ctx
= iothread_get_aio_context(iothread
);
602 AioContext
*main_ctx
= qemu_get_aio_context();
603 BlockDriverState
*src
, *target
, *filter
;
606 Error
*local_err
= NULL
;
608 /* Create src and target*/
609 src
= bdrv_new_open_driver(&bdrv_test
, "src", BDRV_O_RDWR
, &error_abort
);
610 target
= bdrv_new_open_driver(&bdrv_test
, "target", BDRV_O_RDWR
,
613 /* Start a mirror job */
614 mirror_start("job0", src
, target
, NULL
, JOB_DEFAULT
, 0, 0, 0,
615 MIRROR_SYNC_MODE_NONE
, MIRROR_OPEN_BACKING_CHAIN
,
616 BLOCKDEV_ON_ERROR_REPORT
, BLOCKDEV_ON_ERROR_REPORT
,
617 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND
,
619 job
= job_get("job0");
620 filter
= bdrv_find_node("filter_node");
622 /* Change the AioContext of src */
623 bdrv_try_set_aio_context(src
, ctx
, &error_abort
);
624 g_assert(bdrv_get_aio_context(src
) == ctx
);
625 g_assert(bdrv_get_aio_context(target
) == ctx
);
626 g_assert(bdrv_get_aio_context(filter
) == ctx
);
627 g_assert(job
->aio_context
== ctx
);
629 /* Change the AioContext of target */
630 aio_context_acquire(ctx
);
631 bdrv_try_set_aio_context(target
, main_ctx
, &error_abort
);
632 aio_context_release(ctx
);
633 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
634 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
635 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
637 /* With a BlockBackend on src, changing target must fail */
638 blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
639 blk_insert_bs(blk
, src
, &error_abort
);
641 bdrv_try_set_aio_context(target
, ctx
, &local_err
);
643 error_free(local_err
);
645 g_assert(blk_get_aio_context(blk
) == main_ctx
);
646 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
647 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
648 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
650 /* ...unless we explicitly allow it */
651 aio_context_acquire(ctx
);
652 blk_set_allow_aio_context_change(blk
, true);
653 bdrv_try_set_aio_context(target
, ctx
, &error_abort
);
654 aio_context_release(ctx
);
656 g_assert(blk_get_aio_context(blk
) == ctx
);
657 g_assert(bdrv_get_aio_context(src
) == ctx
);
658 g_assert(bdrv_get_aio_context(target
) == ctx
);
659 g_assert(bdrv_get_aio_context(filter
) == ctx
);
661 job_cancel_sync_all();
663 aio_context_acquire(ctx
);
664 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
665 bdrv_try_set_aio_context(target
, main_ctx
, &error_abort
);
666 aio_context_release(ctx
);
673 static void test_attach_second_node(void)
675 IOThread
*iothread
= iothread_new();
676 AioContext
*ctx
= iothread_get_aio_context(iothread
);
677 AioContext
*main_ctx
= qemu_get_aio_context();
679 BlockDriverState
*bs
, *filter
;
682 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
683 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
684 blk_insert_bs(blk
, bs
, &error_abort
);
686 options
= qdict_new();
687 qdict_put_str(options
, "driver", "raw");
688 qdict_put_str(options
, "file", "base");
690 filter
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
691 g_assert(blk_get_aio_context(blk
) == ctx
);
692 g_assert(bdrv_get_aio_context(bs
) == ctx
);
693 g_assert(bdrv_get_aio_context(filter
) == ctx
);
695 aio_context_acquire(ctx
);
696 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
697 aio_context_release(ctx
);
698 g_assert(blk_get_aio_context(blk
) == main_ctx
);
699 g_assert(bdrv_get_aio_context(bs
) == main_ctx
);
700 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
707 static void test_attach_preserve_blk_ctx(void)
709 IOThread
*iothread
= iothread_new();
710 AioContext
*ctx
= iothread_get_aio_context(iothread
);
712 BlockDriverState
*bs
;
714 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
715 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
716 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
718 /* Add node to BlockBackend that has an iothread context assigned */
719 blk_insert_bs(blk
, bs
, &error_abort
);
720 g_assert(blk_get_aio_context(blk
) == ctx
);
721 g_assert(bdrv_get_aio_context(bs
) == ctx
);
723 /* Remove the node again */
724 aio_context_acquire(ctx
);
726 aio_context_release(ctx
);
727 g_assert(blk_get_aio_context(blk
) == ctx
);
728 g_assert(bdrv_get_aio_context(bs
) == qemu_get_aio_context());
730 /* Re-attach the node */
731 blk_insert_bs(blk
, bs
, &error_abort
);
732 g_assert(blk_get_aio_context(blk
) == ctx
);
733 g_assert(bdrv_get_aio_context(bs
) == ctx
);
735 aio_context_acquire(ctx
);
736 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
737 aio_context_release(ctx
);
742 int main(int argc
, char **argv
)
747 qemu_init_main_loop(&error_abort
);
749 g_test_init(&argc
, &argv
, NULL
);
751 for (i
= 0; i
< ARRAY_SIZE(sync_op_tests
); i
++) {
752 const SyncOpTest
*t
= &sync_op_tests
[i
];
753 g_test_add_data_func(t
->name
, t
, test_sync_op
);
756 g_test_add_func("/attach/blockjob", test_attach_blockjob
);
757 g_test_add_func("/attach/second_node", test_attach_second_node
);
758 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx
);
759 g_test_add_func("/propagate/basic", test_propagate_basic
);
760 g_test_add_func("/propagate/diamond", test_propagate_diamond
);
761 g_test_add_func("/propagate/mirror", test_propagate_mirror
);