2 * Block tests for iothreads
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/blockjob_int.h"
28 #include "sysemu/block-backend.h"
29 #include "qapi/error.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qemu/main-loop.h"
34 static int coroutine_fn
bdrv_test_co_prwv(BlockDriverState
*bs
,
35 uint64_t offset
, uint64_t bytes
,
36 QEMUIOVector
*qiov
, int flags
)
41 static int coroutine_fn
bdrv_test_co_pdiscard(BlockDriverState
*bs
,
42 int64_t offset
, int bytes
)
47 static int coroutine_fn
48 bdrv_test_co_truncate(BlockDriverState
*bs
, int64_t offset
, bool exact
,
49 PreallocMode prealloc
, BdrvRequestFlags flags
,
55 static int coroutine_fn
bdrv_test_co_block_status(BlockDriverState
*bs
,
57 int64_t offset
, int64_t count
,
58 int64_t *pnum
, int64_t *map
,
59 BlockDriverState
**file
)
65 static BlockDriver bdrv_test
= {
66 .format_name
= "test",
69 .bdrv_co_preadv
= bdrv_test_co_prwv
,
70 .bdrv_co_pwritev
= bdrv_test_co_prwv
,
71 .bdrv_co_pdiscard
= bdrv_test_co_pdiscard
,
72 .bdrv_co_truncate
= bdrv_test_co_truncate
,
73 .bdrv_co_block_status
= bdrv_test_co_block_status
,
76 static void test_sync_op_pread(BdrvChild
*c
)
82 ret
= bdrv_pread(c
, 0, buf
, sizeof(buf
));
83 g_assert_cmpint(ret
, ==, 512);
85 /* Early error: Negative offset */
86 ret
= bdrv_pread(c
, -2, buf
, sizeof(buf
));
87 g_assert_cmpint(ret
, ==, -EIO
);
90 static void test_sync_op_pwrite(BdrvChild
*c
)
92 uint8_t buf
[512] = { 0 };
96 ret
= bdrv_pwrite(c
, 0, buf
, sizeof(buf
));
97 g_assert_cmpint(ret
, ==, 512);
99 /* Early error: Negative offset */
100 ret
= bdrv_pwrite(c
, -2, buf
, sizeof(buf
));
101 g_assert_cmpint(ret
, ==, -EIO
);
104 static void test_sync_op_blk_pread(BlockBackend
*blk
)
110 ret
= blk_pread(blk
, 0, buf
, sizeof(buf
));
111 g_assert_cmpint(ret
, ==, 512);
113 /* Early error: Negative offset */
114 ret
= blk_pread(blk
, -2, buf
, sizeof(buf
));
115 g_assert_cmpint(ret
, ==, -EIO
);
118 static void test_sync_op_blk_pwrite(BlockBackend
*blk
)
120 uint8_t buf
[512] = { 0 };
124 ret
= blk_pwrite(blk
, 0, buf
, sizeof(buf
), 0);
125 g_assert_cmpint(ret
, ==, 512);
127 /* Early error: Negative offset */
128 ret
= blk_pwrite(blk
, -2, buf
, sizeof(buf
), 0);
129 g_assert_cmpint(ret
, ==, -EIO
);
132 static void test_sync_op_load_vmstate(BdrvChild
*c
)
137 /* Error: Driver does not support snapshots */
138 ret
= bdrv_load_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
139 g_assert_cmpint(ret
, ==, -ENOTSUP
);
142 static void test_sync_op_save_vmstate(BdrvChild
*c
)
144 uint8_t buf
[512] = { 0 };
147 /* Error: Driver does not support snapshots */
148 ret
= bdrv_save_vmstate(c
->bs
, buf
, 0, sizeof(buf
));
149 g_assert_cmpint(ret
, ==, -ENOTSUP
);
152 static void test_sync_op_pdiscard(BdrvChild
*c
)
156 /* Normal success path */
157 c
->bs
->open_flags
|= BDRV_O_UNMAP
;
158 ret
= bdrv_pdiscard(c
, 0, 512);
159 g_assert_cmpint(ret
, ==, 0);
161 /* Early success: UNMAP not supported */
162 c
->bs
->open_flags
&= ~BDRV_O_UNMAP
;
163 ret
= bdrv_pdiscard(c
, 0, 512);
164 g_assert_cmpint(ret
, ==, 0);
166 /* Early error: Negative offset */
167 ret
= bdrv_pdiscard(c
, -2, 512);
168 g_assert_cmpint(ret
, ==, -EIO
);
171 static void test_sync_op_blk_pdiscard(BlockBackend
*blk
)
175 /* Early success: UNMAP not supported */
176 ret
= blk_pdiscard(blk
, 0, 512);
177 g_assert_cmpint(ret
, ==, 0);
179 /* Early error: Negative offset */
180 ret
= blk_pdiscard(blk
, -2, 512);
181 g_assert_cmpint(ret
, ==, -EIO
);
184 static void test_sync_op_truncate(BdrvChild
*c
)
188 /* Normal success path */
189 ret
= bdrv_truncate(c
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
190 g_assert_cmpint(ret
, ==, 0);
192 /* Early error: Negative offset */
193 ret
= bdrv_truncate(c
, -2, false, PREALLOC_MODE_OFF
, 0, NULL
);
194 g_assert_cmpint(ret
, ==, -EINVAL
);
196 /* Error: Read-only image */
197 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
199 ret
= bdrv_truncate(c
, 65536, false, PREALLOC_MODE_OFF
, 0, NULL
);
200 g_assert_cmpint(ret
, ==, -EACCES
);
202 c
->bs
->open_flags
|= BDRV_O_RDWR
;
205 static void test_sync_op_block_status(BdrvChild
*c
)
210 /* Normal success path */
211 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
212 g_assert_cmpint(ret
, ==, 0);
214 /* Early success: No driver support */
215 bdrv_test
.bdrv_co_block_status
= NULL
;
216 ret
= bdrv_is_allocated(c
->bs
, 0, 65536, &n
);
217 g_assert_cmpint(ret
, ==, 1);
219 /* Early success: bytes = 0 */
220 ret
= bdrv_is_allocated(c
->bs
, 0, 0, &n
);
221 g_assert_cmpint(ret
, ==, 0);
223 /* Early success: Offset > image size*/
224 ret
= bdrv_is_allocated(c
->bs
, 0x1000000, 0x1000000, &n
);
225 g_assert_cmpint(ret
, ==, 0);
228 static void test_sync_op_flush(BdrvChild
*c
)
232 /* Normal success path */
233 ret
= bdrv_flush(c
->bs
);
234 g_assert_cmpint(ret
, ==, 0);
236 /* Early success: Read-only image */
237 c
->bs
->open_flags
&= ~BDRV_O_RDWR
;
239 ret
= bdrv_flush(c
->bs
);
240 g_assert_cmpint(ret
, ==, 0);
242 c
->bs
->open_flags
|= BDRV_O_RDWR
;
245 static void test_sync_op_blk_flush(BlockBackend
*blk
)
247 BlockDriverState
*bs
= blk_bs(blk
);
250 /* Normal success path */
251 ret
= blk_flush(blk
);
252 g_assert_cmpint(ret
, ==, 0);
254 /* Early success: Read-only image */
255 bs
->open_flags
&= ~BDRV_O_RDWR
;
257 ret
= blk_flush(blk
);
258 g_assert_cmpint(ret
, ==, 0);
260 bs
->open_flags
|= BDRV_O_RDWR
;
263 static void test_sync_op_check(BdrvChild
*c
)
265 BdrvCheckResult result
;
268 /* Error: Driver does not implement check */
269 ret
= bdrv_check(c
->bs
, &result
, 0);
270 g_assert_cmpint(ret
, ==, -ENOTSUP
);
273 static void test_sync_op_invalidate_cache(BdrvChild
*c
)
275 /* Early success: Image is not inactive */
276 bdrv_invalidate_cache(c
->bs
, NULL
);
280 typedef struct SyncOpTest
{
282 void (*fn
)(BdrvChild
*c
);
283 void (*blkfn
)(BlockBackend
*blk
);
286 const SyncOpTest sync_op_tests
[] = {
288 .name
= "/sync-op/pread",
289 .fn
= test_sync_op_pread
,
290 .blkfn
= test_sync_op_blk_pread
,
292 .name
= "/sync-op/pwrite",
293 .fn
= test_sync_op_pwrite
,
294 .blkfn
= test_sync_op_blk_pwrite
,
296 .name
= "/sync-op/load_vmstate",
297 .fn
= test_sync_op_load_vmstate
,
299 .name
= "/sync-op/save_vmstate",
300 .fn
= test_sync_op_save_vmstate
,
302 .name
= "/sync-op/pdiscard",
303 .fn
= test_sync_op_pdiscard
,
304 .blkfn
= test_sync_op_blk_pdiscard
,
306 .name
= "/sync-op/truncate",
307 .fn
= test_sync_op_truncate
,
309 .name
= "/sync-op/block_status",
310 .fn
= test_sync_op_block_status
,
312 .name
= "/sync-op/flush",
313 .fn
= test_sync_op_flush
,
314 .blkfn
= test_sync_op_blk_flush
,
316 .name
= "/sync-op/check",
317 .fn
= test_sync_op_check
,
319 .name
= "/sync-op/invalidate_cache",
320 .fn
= test_sync_op_invalidate_cache
,
324 /* Test synchronous operations that run in a different iothread, so we have to
325 * poll for the coroutine there to return. */
326 static void test_sync_op(const void *opaque
)
328 const SyncOpTest
*t
= opaque
;
329 IOThread
*iothread
= iothread_new();
330 AioContext
*ctx
= iothread_get_aio_context(iothread
);
332 BlockDriverState
*bs
;
335 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
336 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
337 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
338 blk_insert_bs(blk
, bs
, &error_abort
);
339 c
= QLIST_FIRST(&bs
->parents
);
341 blk_set_aio_context(blk
, ctx
, &error_abort
);
342 aio_context_acquire(ctx
);
347 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
348 aio_context_release(ctx
);
354 typedef struct TestBlockJob
{
356 bool should_complete
;
360 static int test_job_prepare(Job
*job
)
362 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
366 static int coroutine_fn
test_job_run(Job
*job
, Error
**errp
)
368 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
370 job_transition_to_ready(&s
->common
.job
);
371 while (!s
->should_complete
) {
373 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
375 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
376 * emulate some actual activity (probably some I/O) here so that the
377 * drain involved in AioContext switches has to wait for this activity
379 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME
, 1000000);
381 job_pause_point(&s
->common
.job
);
384 g_assert(qemu_get_current_aio_context() == job
->aio_context
);
388 static void test_job_complete(Job
*job
, Error
**errp
)
390 TestBlockJob
*s
= container_of(job
, TestBlockJob
, common
.job
);
391 s
->should_complete
= true;
394 BlockJobDriver test_job_driver
= {
396 .instance_size
= sizeof(TestBlockJob
),
397 .free
= block_job_free
,
398 .user_resume
= block_job_user_resume
,
400 .complete
= test_job_complete
,
401 .prepare
= test_job_prepare
,
405 static void test_attach_blockjob(void)
407 IOThread
*iothread
= iothread_new();
408 AioContext
*ctx
= iothread_get_aio_context(iothread
);
410 BlockDriverState
*bs
;
413 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
, BLK_PERM_ALL
);
414 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
415 blk_insert_bs(blk
, bs
, &error_abort
);
417 tjob
= block_job_create("job0", &test_job_driver
, NULL
, bs
,
419 0, 0, NULL
, NULL
, &error_abort
);
420 job_start(&tjob
->common
.job
);
422 while (tjob
->n
== 0) {
423 aio_poll(qemu_get_aio_context(), false);
426 blk_set_aio_context(blk
, ctx
, &error_abort
);
429 while (tjob
->n
== 0) {
430 aio_poll(qemu_get_aio_context(), false);
433 aio_context_acquire(ctx
);
434 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
435 aio_context_release(ctx
);
438 while (tjob
->n
== 0) {
439 aio_poll(qemu_get_aio_context(), false);
442 blk_set_aio_context(blk
, ctx
, &error_abort
);
445 while (tjob
->n
== 0) {
446 aio_poll(qemu_get_aio_context(), false);
449 aio_context_acquire(ctx
);
450 job_complete_sync(&tjob
->common
.job
, &error_abort
);
451 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
452 aio_context_release(ctx
);
459 * Test that changing the AioContext for one node in a tree (here through blk)
460 * changes all other nodes as well:
464 * | bs_verify [blkverify]
467 * bs_a [bdrv_test] bs_b [bdrv_test]
470 static void test_propagate_basic(void)
472 IOThread
*iothread
= iothread_new();
473 AioContext
*ctx
= iothread_get_aio_context(iothread
);
474 AioContext
*main_ctx
;
476 BlockDriverState
*bs_a
, *bs_b
, *bs_verify
;
480 * Create bs_a and its BlockBackend. We cannot take the RESIZE
481 * permission because blkverify will not share it on the test
484 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
& ~BLK_PERM_RESIZE
,
486 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
487 blk_insert_bs(blk
, bs_a
, &error_abort
);
490 bs_b
= bdrv_new_open_driver(&bdrv_test
, "bs_b", BDRV_O_RDWR
, &error_abort
);
492 /* Create blkverify filter that references both bs_a and bs_b */
493 options
= qdict_new();
494 qdict_put_str(options
, "driver", "blkverify");
495 qdict_put_str(options
, "test", "bs_a");
496 qdict_put_str(options
, "raw", "bs_b");
498 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
500 /* Switch the AioContext */
501 blk_set_aio_context(blk
, ctx
, &error_abort
);
502 g_assert(blk_get_aio_context(blk
) == ctx
);
503 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
504 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
505 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
507 /* Switch the AioContext back */
508 main_ctx
= qemu_get_aio_context();
509 aio_context_acquire(ctx
);
510 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
511 aio_context_release(ctx
);
512 g_assert(blk_get_aio_context(blk
) == main_ctx
);
513 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
514 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
515 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
517 bdrv_unref(bs_verify
);
524 * Test that diamonds in the graph don't lead to endless recursion:
528 * bs_verify [blkverify]
531 * bs_b [raw] bs_c[raw]
536 static void test_propagate_diamond(void)
538 IOThread
*iothread
= iothread_new();
539 AioContext
*ctx
= iothread_get_aio_context(iothread
);
540 AioContext
*main_ctx
;
542 BlockDriverState
*bs_a
, *bs_b
, *bs_c
, *bs_verify
;
546 bs_a
= bdrv_new_open_driver(&bdrv_test
, "bs_a", BDRV_O_RDWR
, &error_abort
);
548 /* Create bs_b and bc_c */
549 options
= qdict_new();
550 qdict_put_str(options
, "driver", "raw");
551 qdict_put_str(options
, "file", "bs_a");
552 qdict_put_str(options
, "node-name", "bs_b");
553 bs_b
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
555 options
= qdict_new();
556 qdict_put_str(options
, "driver", "raw");
557 qdict_put_str(options
, "file", "bs_a");
558 qdict_put_str(options
, "node-name", "bs_c");
559 bs_c
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
561 /* Create blkverify filter that references both bs_b and bs_c */
562 options
= qdict_new();
563 qdict_put_str(options
, "driver", "blkverify");
564 qdict_put_str(options
, "test", "bs_b");
565 qdict_put_str(options
, "raw", "bs_c");
567 bs_verify
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
569 * Do not take the RESIZE permission: This would require the same
570 * from bs_c and thus from bs_a; however, blkverify will not share
571 * it on bs_b, and thus it will not be available for bs_a.
573 blk
= blk_new(qemu_get_aio_context(), BLK_PERM_ALL
& ~BLK_PERM_RESIZE
,
575 blk_insert_bs(blk
, bs_verify
, &error_abort
);
577 /* Switch the AioContext */
578 blk_set_aio_context(blk
, ctx
, &error_abort
);
579 g_assert(blk_get_aio_context(blk
) == ctx
);
580 g_assert(bdrv_get_aio_context(bs_verify
) == ctx
);
581 g_assert(bdrv_get_aio_context(bs_a
) == ctx
);
582 g_assert(bdrv_get_aio_context(bs_b
) == ctx
);
583 g_assert(bdrv_get_aio_context(bs_c
) == ctx
);
585 /* Switch the AioContext back */
586 main_ctx
= qemu_get_aio_context();
587 aio_context_acquire(ctx
);
588 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
589 aio_context_release(ctx
);
590 g_assert(blk_get_aio_context(blk
) == main_ctx
);
591 g_assert(bdrv_get_aio_context(bs_verify
) == main_ctx
);
592 g_assert(bdrv_get_aio_context(bs_a
) == main_ctx
);
593 g_assert(bdrv_get_aio_context(bs_b
) == main_ctx
);
594 g_assert(bdrv_get_aio_context(bs_c
) == main_ctx
);
597 bdrv_unref(bs_verify
);
603 static void test_propagate_mirror(void)
605 IOThread
*iothread
= iothread_new();
606 AioContext
*ctx
= iothread_get_aio_context(iothread
);
607 AioContext
*main_ctx
= qemu_get_aio_context();
608 BlockDriverState
*src
, *target
, *filter
;
611 Error
*local_err
= NULL
;
613 /* Create src and target*/
614 src
= bdrv_new_open_driver(&bdrv_test
, "src", BDRV_O_RDWR
, &error_abort
);
615 target
= bdrv_new_open_driver(&bdrv_test
, "target", BDRV_O_RDWR
,
618 /* Start a mirror job */
619 mirror_start("job0", src
, target
, NULL
, JOB_DEFAULT
, 0, 0, 0,
620 MIRROR_SYNC_MODE_NONE
, MIRROR_OPEN_BACKING_CHAIN
, false,
621 BLOCKDEV_ON_ERROR_REPORT
, BLOCKDEV_ON_ERROR_REPORT
,
622 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND
,
624 job
= job_get("job0");
625 filter
= bdrv_find_node("filter_node");
627 /* Change the AioContext of src */
628 bdrv_try_set_aio_context(src
, ctx
, &error_abort
);
629 g_assert(bdrv_get_aio_context(src
) == ctx
);
630 g_assert(bdrv_get_aio_context(target
) == ctx
);
631 g_assert(bdrv_get_aio_context(filter
) == ctx
);
632 g_assert(job
->aio_context
== ctx
);
634 /* Change the AioContext of target */
635 aio_context_acquire(ctx
);
636 bdrv_try_set_aio_context(target
, main_ctx
, &error_abort
);
637 aio_context_release(ctx
);
638 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
639 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
640 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
642 /* With a BlockBackend on src, changing target must fail */
643 blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
644 blk_insert_bs(blk
, src
, &error_abort
);
646 bdrv_try_set_aio_context(target
, ctx
, &local_err
);
647 error_free_or_abort(&local_err
);
649 g_assert(blk_get_aio_context(blk
) == main_ctx
);
650 g_assert(bdrv_get_aio_context(src
) == main_ctx
);
651 g_assert(bdrv_get_aio_context(target
) == main_ctx
);
652 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
654 /* ...unless we explicitly allow it */
655 aio_context_acquire(ctx
);
656 blk_set_allow_aio_context_change(blk
, true);
657 bdrv_try_set_aio_context(target
, ctx
, &error_abort
);
658 aio_context_release(ctx
);
660 g_assert(blk_get_aio_context(blk
) == ctx
);
661 g_assert(bdrv_get_aio_context(src
) == ctx
);
662 g_assert(bdrv_get_aio_context(target
) == ctx
);
663 g_assert(bdrv_get_aio_context(filter
) == ctx
);
665 job_cancel_sync_all();
667 aio_context_acquire(ctx
);
668 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
669 bdrv_try_set_aio_context(target
, main_ctx
, &error_abort
);
670 aio_context_release(ctx
);
677 static void test_attach_second_node(void)
679 IOThread
*iothread
= iothread_new();
680 AioContext
*ctx
= iothread_get_aio_context(iothread
);
681 AioContext
*main_ctx
= qemu_get_aio_context();
683 BlockDriverState
*bs
, *filter
;
686 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
687 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
688 blk_insert_bs(blk
, bs
, &error_abort
);
690 options
= qdict_new();
691 qdict_put_str(options
, "driver", "raw");
692 qdict_put_str(options
, "file", "base");
694 filter
= bdrv_open(NULL
, NULL
, options
, BDRV_O_RDWR
, &error_abort
);
695 g_assert(blk_get_aio_context(blk
) == ctx
);
696 g_assert(bdrv_get_aio_context(bs
) == ctx
);
697 g_assert(bdrv_get_aio_context(filter
) == ctx
);
699 aio_context_acquire(ctx
);
700 blk_set_aio_context(blk
, main_ctx
, &error_abort
);
701 aio_context_release(ctx
);
702 g_assert(blk_get_aio_context(blk
) == main_ctx
);
703 g_assert(bdrv_get_aio_context(bs
) == main_ctx
);
704 g_assert(bdrv_get_aio_context(filter
) == main_ctx
);
711 static void test_attach_preserve_blk_ctx(void)
713 IOThread
*iothread
= iothread_new();
714 AioContext
*ctx
= iothread_get_aio_context(iothread
);
716 BlockDriverState
*bs
;
718 blk
= blk_new(ctx
, BLK_PERM_ALL
, BLK_PERM_ALL
);
719 bs
= bdrv_new_open_driver(&bdrv_test
, "base", BDRV_O_RDWR
, &error_abort
);
720 bs
->total_sectors
= 65536 / BDRV_SECTOR_SIZE
;
722 /* Add node to BlockBackend that has an iothread context assigned */
723 blk_insert_bs(blk
, bs
, &error_abort
);
724 g_assert(blk_get_aio_context(blk
) == ctx
);
725 g_assert(bdrv_get_aio_context(bs
) == ctx
);
727 /* Remove the node again */
728 aio_context_acquire(ctx
);
730 aio_context_release(ctx
);
731 g_assert(blk_get_aio_context(blk
) == ctx
);
732 g_assert(bdrv_get_aio_context(bs
) == qemu_get_aio_context());
734 /* Re-attach the node */
735 blk_insert_bs(blk
, bs
, &error_abort
);
736 g_assert(blk_get_aio_context(blk
) == ctx
);
737 g_assert(bdrv_get_aio_context(bs
) == ctx
);
739 aio_context_acquire(ctx
);
740 blk_set_aio_context(blk
, qemu_get_aio_context(), &error_abort
);
741 aio_context_release(ctx
);
746 int main(int argc
, char **argv
)
751 qemu_init_main_loop(&error_abort
);
753 g_test_init(&argc
, &argv
, NULL
);
755 for (i
= 0; i
< ARRAY_SIZE(sync_op_tests
); i
++) {
756 const SyncOpTest
*t
= &sync_op_tests
[i
];
757 g_test_add_data_func(t
->name
, t
, test_sync_op
);
760 g_test_add_func("/attach/blockjob", test_attach_blockjob
);
761 g_test_add_func("/attach/second_node", test_attach_second_node
);
762 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx
);
763 g_test_add_func("/propagate/basic", test_propagate_basic
);
764 g_test_add_func("/propagate/diamond", test_propagate_diamond
);
765 g_test_add_func("/propagate/mirror", test_propagate_mirror
);