4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "block/aio.h"
15 #include "qapi/error.h"
16 #include "qemu/timer.h"
17 #include "qemu/sockets.h"
18 #include "qemu/error-report.h"
19 #include "qemu/coroutine-core.h"
20 #include "qemu/main-loop.h"
22 static AioContext
*ctx
;
29 } EventNotifierTestData
;
31 /* Wait until event notifier becomes inactive */
32 static void wait_until_inactive(EventNotifierTestData
*data
)
34 while (data
->active
> 0) {
39 /* Simple callbacks for testing. */
49 QEMUClockType clock_type
;
56 static void bh_test_cb(void *opaque
)
58 BHTestData
*data
= opaque
;
59 if (++data
->n
< data
->max
) {
60 qemu_bh_schedule(data
->bh
);
64 static void timer_test_cb(void *opaque
)
66 TimerTestData
*data
= opaque
;
67 if (++data
->n
< data
->max
) {
68 timer_mod(&data
->timer
,
69 qemu_clock_get_ns(data
->clock_type
) + data
->ns
);
73 static void dummy_io_handler_read(EventNotifier
*e
)
77 static void bh_delete_cb(void *opaque
)
79 BHTestData
*data
= opaque
;
80 if (++data
->n
< data
->max
) {
81 qemu_bh_schedule(data
->bh
);
83 qemu_bh_delete(data
->bh
);
88 static void event_ready_cb(EventNotifier
*e
)
90 EventNotifierTestData
*data
= container_of(e
, EventNotifierTestData
, e
);
91 g_assert(event_notifier_test_and_clear(e
));
93 if (data
->active
> 0) {
96 if (data
->auto_set
&& data
->active
) {
97 event_notifier_set(e
);
101 /* Tests using aio_*. */
104 QemuMutex start_lock
;
105 EventNotifier notifier
;
106 bool thread_acquired
;
109 static void *test_acquire_thread(void *opaque
)
111 AcquireTestData
*data
= opaque
;
113 /* Wait for other thread to let us start */
114 qemu_mutex_lock(&data
->start_lock
);
115 qemu_mutex_unlock(&data
->start_lock
);
117 /* event_notifier_set might be called either before or after
118 * the main thread's call to poll(). The test case's outcome
119 * should be the same in either case.
121 event_notifier_set(&data
->notifier
);
122 aio_context_acquire(ctx
);
123 aio_context_release(ctx
);
125 data
->thread_acquired
= true; /* success, we got here */
130 static void set_event_notifier(AioContext
*nctx
, EventNotifier
*notifier
,
131 EventNotifierHandler
*handler
)
133 aio_set_event_notifier(nctx
, notifier
, handler
, NULL
, NULL
);
136 static void dummy_notifier_read(EventNotifier
*n
)
138 event_notifier_test_and_clear(n
);
141 static void test_acquire(void)
144 AcquireTestData data
;
146 /* Dummy event notifier ensures aio_poll() will block */
147 event_notifier_init(&data
.notifier
, false);
148 set_event_notifier(ctx
, &data
.notifier
, dummy_notifier_read
);
149 g_assert(!aio_poll(ctx
, false)); /* consume aio_notify() */
151 qemu_mutex_init(&data
.start_lock
);
152 qemu_mutex_lock(&data
.start_lock
);
153 data
.thread_acquired
= false;
155 qemu_thread_create(&thread
, "test_acquire_thread",
157 &data
, QEMU_THREAD_JOINABLE
);
159 /* Block in aio_poll(), let other thread kick us and acquire context */
160 aio_context_acquire(ctx
);
161 qemu_mutex_unlock(&data
.start_lock
); /* let the thread run */
162 g_assert(aio_poll(ctx
, true));
163 g_assert(!data
.thread_acquired
);
164 aio_context_release(ctx
);
166 qemu_thread_join(&thread
);
167 set_event_notifier(ctx
, &data
.notifier
, NULL
);
168 event_notifier_cleanup(&data
.notifier
);
170 g_assert(data
.thread_acquired
);
173 static void test_bh_schedule(void)
175 BHTestData data
= { .n
= 0 };
176 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
178 qemu_bh_schedule(data
.bh
);
179 g_assert_cmpint(data
.n
, ==, 0);
181 g_assert(aio_poll(ctx
, true));
182 g_assert_cmpint(data
.n
, ==, 1);
184 g_assert(!aio_poll(ctx
, false));
185 g_assert_cmpint(data
.n
, ==, 1);
186 qemu_bh_delete(data
.bh
);
189 static void test_bh_schedule10(void)
191 BHTestData data
= { .n
= 0, .max
= 10 };
192 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
194 qemu_bh_schedule(data
.bh
);
195 g_assert_cmpint(data
.n
, ==, 0);
197 g_assert(aio_poll(ctx
, false));
198 g_assert_cmpint(data
.n
, ==, 1);
200 g_assert(aio_poll(ctx
, true));
201 g_assert_cmpint(data
.n
, ==, 2);
203 while (data
.n
< 10) {
206 g_assert_cmpint(data
.n
, ==, 10);
208 g_assert(!aio_poll(ctx
, false));
209 g_assert_cmpint(data
.n
, ==, 10);
210 qemu_bh_delete(data
.bh
);
213 static void test_bh_cancel(void)
215 BHTestData data
= { .n
= 0 };
216 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
218 qemu_bh_schedule(data
.bh
);
219 g_assert_cmpint(data
.n
, ==, 0);
221 qemu_bh_cancel(data
.bh
);
222 g_assert_cmpint(data
.n
, ==, 0);
224 g_assert(!aio_poll(ctx
, false));
225 g_assert_cmpint(data
.n
, ==, 0);
226 qemu_bh_delete(data
.bh
);
229 static void test_bh_delete(void)
231 BHTestData data
= { .n
= 0 };
232 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
234 qemu_bh_schedule(data
.bh
);
235 g_assert_cmpint(data
.n
, ==, 0);
237 qemu_bh_delete(data
.bh
);
238 g_assert_cmpint(data
.n
, ==, 0);
240 g_assert(!aio_poll(ctx
, false));
241 g_assert_cmpint(data
.n
, ==, 0);
244 static void test_bh_delete_from_cb(void)
246 BHTestData data1
= { .n
= 0, .max
= 1 };
248 data1
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data1
);
250 qemu_bh_schedule(data1
.bh
);
251 g_assert_cmpint(data1
.n
, ==, 0);
253 while (data1
.n
< data1
.max
) {
256 g_assert_cmpint(data1
.n
, ==, data1
.max
);
257 g_assert(data1
.bh
== NULL
);
259 g_assert(!aio_poll(ctx
, false));
262 static void test_bh_delete_from_cb_many(void)
264 BHTestData data1
= { .n
= 0, .max
= 1 };
265 BHTestData data2
= { .n
= 0, .max
= 3 };
266 BHTestData data3
= { .n
= 0, .max
= 2 };
267 BHTestData data4
= { .n
= 0, .max
= 4 };
269 data1
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data1
);
270 data2
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data2
);
271 data3
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data3
);
272 data4
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data4
);
274 qemu_bh_schedule(data1
.bh
);
275 qemu_bh_schedule(data2
.bh
);
276 qemu_bh_schedule(data3
.bh
);
277 qemu_bh_schedule(data4
.bh
);
278 g_assert_cmpint(data1
.n
, ==, 0);
279 g_assert_cmpint(data2
.n
, ==, 0);
280 g_assert_cmpint(data3
.n
, ==, 0);
281 g_assert_cmpint(data4
.n
, ==, 0);
283 g_assert(aio_poll(ctx
, false));
284 g_assert_cmpint(data1
.n
, ==, 1);
285 g_assert_cmpint(data2
.n
, ==, 1);
286 g_assert_cmpint(data3
.n
, ==, 1);
287 g_assert_cmpint(data4
.n
, ==, 1);
288 g_assert(data1
.bh
== NULL
);
290 while (data1
.n
< data1
.max
||
291 data2
.n
< data2
.max
||
292 data3
.n
< data3
.max
||
293 data4
.n
< data4
.max
) {
296 g_assert_cmpint(data1
.n
, ==, data1
.max
);
297 g_assert_cmpint(data2
.n
, ==, data2
.max
);
298 g_assert_cmpint(data3
.n
, ==, data3
.max
);
299 g_assert_cmpint(data4
.n
, ==, data4
.max
);
300 g_assert(data1
.bh
== NULL
);
301 g_assert(data2
.bh
== NULL
);
302 g_assert(data3
.bh
== NULL
);
303 g_assert(data4
.bh
== NULL
);
306 static void test_bh_flush(void)
308 BHTestData data
= { .n
= 0 };
309 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
311 qemu_bh_schedule(data
.bh
);
312 g_assert_cmpint(data
.n
, ==, 0);
314 g_assert(aio_poll(ctx
, true));
315 g_assert_cmpint(data
.n
, ==, 1);
317 g_assert(!aio_poll(ctx
, false));
318 g_assert_cmpint(data
.n
, ==, 1);
319 qemu_bh_delete(data
.bh
);
322 static void test_set_event_notifier(void)
324 EventNotifierTestData data
= { .n
= 0, .active
= 0 };
325 event_notifier_init(&data
.e
, false);
326 set_event_notifier(ctx
, &data
.e
, event_ready_cb
);
327 g_assert(!aio_poll(ctx
, false));
328 g_assert_cmpint(data
.n
, ==, 0);
330 set_event_notifier(ctx
, &data
.e
, NULL
);
331 g_assert(!aio_poll(ctx
, false));
332 g_assert_cmpint(data
.n
, ==, 0);
333 event_notifier_cleanup(&data
.e
);
336 static void test_wait_event_notifier(void)
338 EventNotifierTestData data
= { .n
= 0, .active
= 1 };
339 event_notifier_init(&data
.e
, false);
340 set_event_notifier(ctx
, &data
.e
, event_ready_cb
);
341 while (aio_poll(ctx
, false));
342 g_assert_cmpint(data
.n
, ==, 0);
343 g_assert_cmpint(data
.active
, ==, 1);
345 event_notifier_set(&data
.e
);
346 g_assert(aio_poll(ctx
, false));
347 g_assert_cmpint(data
.n
, ==, 1);
348 g_assert_cmpint(data
.active
, ==, 0);
350 g_assert(!aio_poll(ctx
, false));
351 g_assert_cmpint(data
.n
, ==, 1);
352 g_assert_cmpint(data
.active
, ==, 0);
354 set_event_notifier(ctx
, &data
.e
, NULL
);
355 g_assert(!aio_poll(ctx
, false));
356 g_assert_cmpint(data
.n
, ==, 1);
358 event_notifier_cleanup(&data
.e
);
361 static void test_flush_event_notifier(void)
363 EventNotifierTestData data
= { .n
= 0, .active
= 10, .auto_set
= true };
364 event_notifier_init(&data
.e
, false);
365 set_event_notifier(ctx
, &data
.e
, event_ready_cb
);
366 while (aio_poll(ctx
, false));
367 g_assert_cmpint(data
.n
, ==, 0);
368 g_assert_cmpint(data
.active
, ==, 10);
370 event_notifier_set(&data
.e
);
371 g_assert(aio_poll(ctx
, false));
372 g_assert_cmpint(data
.n
, ==, 1);
373 g_assert_cmpint(data
.active
, ==, 9);
374 g_assert(aio_poll(ctx
, false));
376 wait_until_inactive(&data
);
377 g_assert_cmpint(data
.n
, ==, 10);
378 g_assert_cmpint(data
.active
, ==, 0);
379 g_assert(!aio_poll(ctx
, false));
381 set_event_notifier(ctx
, &data
.e
, NULL
);
382 g_assert(!aio_poll(ctx
, false));
383 event_notifier_cleanup(&data
.e
);
386 static void test_wait_event_notifier_noflush(void)
388 EventNotifierTestData data
= { .n
= 0 };
389 EventNotifierTestData dummy
= { .n
= 0, .active
= 1 };
391 event_notifier_init(&data
.e
, false);
392 set_event_notifier(ctx
, &data
.e
, event_ready_cb
);
394 g_assert(!aio_poll(ctx
, false));
395 g_assert_cmpint(data
.n
, ==, 0);
397 /* Until there is an active descriptor, aio_poll may or may not call
398 * event_ready_cb. Still, it must not block. */
399 event_notifier_set(&data
.e
);
400 g_assert(aio_poll(ctx
, true));
403 /* An active event notifier forces aio_poll to look at EventNotifiers. */
404 event_notifier_init(&dummy
.e
, false);
405 set_event_notifier(ctx
, &dummy
.e
, event_ready_cb
);
407 event_notifier_set(&data
.e
);
408 g_assert(aio_poll(ctx
, false));
409 g_assert_cmpint(data
.n
, ==, 1);
410 g_assert(!aio_poll(ctx
, false));
411 g_assert_cmpint(data
.n
, ==, 1);
413 event_notifier_set(&data
.e
);
414 g_assert(aio_poll(ctx
, false));
415 g_assert_cmpint(data
.n
, ==, 2);
416 g_assert(!aio_poll(ctx
, false));
417 g_assert_cmpint(data
.n
, ==, 2);
419 event_notifier_set(&dummy
.e
);
420 wait_until_inactive(&dummy
);
421 g_assert_cmpint(data
.n
, ==, 2);
422 g_assert_cmpint(dummy
.n
, ==, 1);
423 g_assert_cmpint(dummy
.active
, ==, 0);
425 set_event_notifier(ctx
, &dummy
.e
, NULL
);
426 event_notifier_cleanup(&dummy
.e
);
428 set_event_notifier(ctx
, &data
.e
, NULL
);
429 g_assert(!aio_poll(ctx
, false));
430 g_assert_cmpint(data
.n
, ==, 2);
432 event_notifier_cleanup(&data
.e
);
435 static void test_timer_schedule(void)
437 TimerTestData data
= { .n
= 0, .ctx
= ctx
, .ns
= SCALE_MS
* 750LL,
439 .clock_type
= QEMU_CLOCK_REALTIME
};
442 /* aio_poll will not block to wait for timers to complete unless it has
443 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
445 event_notifier_init(&e
, false);
446 set_event_notifier(ctx
, &e
, dummy_io_handler_read
);
447 aio_poll(ctx
, false);
449 aio_timer_init(ctx
, &data
.timer
, data
.clock_type
,
450 SCALE_NS
, timer_test_cb
, &data
);
451 timer_mod(&data
.timer
,
452 qemu_clock_get_ns(data
.clock_type
) +
455 g_assert_cmpint(data
.n
, ==, 0);
457 /* timer_mod may well cause an event notifier to have gone off,
460 do {} while (aio_poll(ctx
, false));
462 g_assert(!aio_poll(ctx
, false));
463 g_assert_cmpint(data
.n
, ==, 0);
465 g_usleep(1 * G_USEC_PER_SEC
);
466 g_assert_cmpint(data
.n
, ==, 0);
468 g_assert(aio_poll(ctx
, false));
469 g_assert_cmpint(data
.n
, ==, 1);
471 /* timer_mod called by our callback */
472 do {} while (aio_poll(ctx
, false));
474 g_assert(!aio_poll(ctx
, false));
475 g_assert_cmpint(data
.n
, ==, 1);
477 g_assert(aio_poll(ctx
, true));
478 g_assert_cmpint(data
.n
, ==, 2);
480 /* As max is now 2, an event notifier should not have gone off */
482 g_assert(!aio_poll(ctx
, false));
483 g_assert_cmpint(data
.n
, ==, 2);
485 set_event_notifier(ctx
, &e
, NULL
);
486 event_notifier_cleanup(&e
);
488 timer_del(&data
.timer
);
491 /* Now the same tests, using the context as a GSource. They are
492 * very similar to the ones above, with g_main_context_iteration
493 * replacing aio_poll. However:
494 * - sometimes both the AioContext and the glib main loop wake
495 * themselves up. Hence, some "g_assert(!aio_poll(ctx, false));"
496 * are replaced by "while (g_main_context_iteration(NULL, false));".
497 * - there is no exact replacement for a blocking wait.
498 * "while (g_main_context_iteration(NULL, true)" seems to work,
499 * but it is not documented _why_ it works. For these tests a
500 * non-blocking loop like "while (g_main_context_iteration(NULL, false)"
501 * works well, and that's what I am using.
504 static void test_source_flush(void)
506 g_assert(!g_main_context_iteration(NULL
, false));
508 while (g_main_context_iteration(NULL
, false));
509 g_assert(!g_main_context_iteration(NULL
, false));
512 static void test_source_bh_schedule(void)
514 BHTestData data
= { .n
= 0 };
515 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
517 qemu_bh_schedule(data
.bh
);
518 g_assert_cmpint(data
.n
, ==, 0);
520 g_assert(g_main_context_iteration(NULL
, true));
521 g_assert_cmpint(data
.n
, ==, 1);
523 g_assert(!g_main_context_iteration(NULL
, false));
524 g_assert_cmpint(data
.n
, ==, 1);
525 qemu_bh_delete(data
.bh
);
528 static void test_source_bh_schedule10(void)
530 BHTestData data
= { .n
= 0, .max
= 10 };
531 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
533 qemu_bh_schedule(data
.bh
);
534 g_assert_cmpint(data
.n
, ==, 0);
536 g_assert(g_main_context_iteration(NULL
, false));
537 g_assert_cmpint(data
.n
, ==, 1);
539 g_assert(g_main_context_iteration(NULL
, true));
540 g_assert_cmpint(data
.n
, ==, 2);
542 while (g_main_context_iteration(NULL
, false));
543 g_assert_cmpint(data
.n
, ==, 10);
545 g_assert(!g_main_context_iteration(NULL
, false));
546 g_assert_cmpint(data
.n
, ==, 10);
547 qemu_bh_delete(data
.bh
);
550 static void test_source_bh_cancel(void)
552 BHTestData data
= { .n
= 0 };
553 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
555 qemu_bh_schedule(data
.bh
);
556 g_assert_cmpint(data
.n
, ==, 0);
558 qemu_bh_cancel(data
.bh
);
559 g_assert_cmpint(data
.n
, ==, 0);
561 while (g_main_context_iteration(NULL
, false));
562 g_assert_cmpint(data
.n
, ==, 0);
563 qemu_bh_delete(data
.bh
);
566 static void test_source_bh_delete(void)
568 BHTestData data
= { .n
= 0 };
569 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
571 qemu_bh_schedule(data
.bh
);
572 g_assert_cmpint(data
.n
, ==, 0);
574 qemu_bh_delete(data
.bh
);
575 g_assert_cmpint(data
.n
, ==, 0);
577 while (g_main_context_iteration(NULL
, false));
578 g_assert_cmpint(data
.n
, ==, 0);
581 static void test_source_bh_delete_from_cb(void)
583 BHTestData data1
= { .n
= 0, .max
= 1 };
585 data1
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data1
);
587 qemu_bh_schedule(data1
.bh
);
588 g_assert_cmpint(data1
.n
, ==, 0);
590 g_main_context_iteration(NULL
, true);
591 g_assert_cmpint(data1
.n
, ==, data1
.max
);
592 g_assert(data1
.bh
== NULL
);
594 assert(g_main_context_iteration(NULL
, false));
595 assert(!g_main_context_iteration(NULL
, false));
598 static void test_source_bh_delete_from_cb_many(void)
600 BHTestData data1
= { .n
= 0, .max
= 1 };
601 BHTestData data2
= { .n
= 0, .max
= 3 };
602 BHTestData data3
= { .n
= 0, .max
= 2 };
603 BHTestData data4
= { .n
= 0, .max
= 4 };
605 data1
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data1
);
606 data2
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data2
);
607 data3
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data3
);
608 data4
.bh
= aio_bh_new(ctx
, bh_delete_cb
, &data4
);
610 qemu_bh_schedule(data1
.bh
);
611 qemu_bh_schedule(data2
.bh
);
612 qemu_bh_schedule(data3
.bh
);
613 qemu_bh_schedule(data4
.bh
);
614 g_assert_cmpint(data1
.n
, ==, 0);
615 g_assert_cmpint(data2
.n
, ==, 0);
616 g_assert_cmpint(data3
.n
, ==, 0);
617 g_assert_cmpint(data4
.n
, ==, 0);
619 g_assert(g_main_context_iteration(NULL
, false));
620 g_assert_cmpint(data1
.n
, ==, 1);
621 g_assert_cmpint(data2
.n
, ==, 1);
622 g_assert_cmpint(data3
.n
, ==, 1);
623 g_assert_cmpint(data4
.n
, ==, 1);
624 g_assert(data1
.bh
== NULL
);
626 while (g_main_context_iteration(NULL
, false));
627 g_assert_cmpint(data1
.n
, ==, data1
.max
);
628 g_assert_cmpint(data2
.n
, ==, data2
.max
);
629 g_assert_cmpint(data3
.n
, ==, data3
.max
);
630 g_assert_cmpint(data4
.n
, ==, data4
.max
);
631 g_assert(data1
.bh
== NULL
);
632 g_assert(data2
.bh
== NULL
);
633 g_assert(data3
.bh
== NULL
);
634 g_assert(data4
.bh
== NULL
);
637 static void test_source_bh_flush(void)
639 BHTestData data
= { .n
= 0 };
640 data
.bh
= aio_bh_new(ctx
, bh_test_cb
, &data
);
642 qemu_bh_schedule(data
.bh
);
643 g_assert_cmpint(data
.n
, ==, 0);
645 g_assert(g_main_context_iteration(NULL
, true));
646 g_assert_cmpint(data
.n
, ==, 1);
648 g_assert(!g_main_context_iteration(NULL
, false));
649 g_assert_cmpint(data
.n
, ==, 1);
650 qemu_bh_delete(data
.bh
);
653 static void test_source_set_event_notifier(void)
655 EventNotifierTestData data
= { .n
= 0, .active
= 0 };
656 event_notifier_init(&data
.e
, false);
657 set_event_notifier(ctx
, &data
.e
, event_ready_cb
);
658 while (g_main_context_iteration(NULL
, false));
659 g_assert_cmpint(data
.n
, ==, 0);
661 set_event_notifier(ctx
, &data
.e
, NULL
);
662 while (g_main_context_iteration(NULL
, false));
663 g_assert_cmpint(data
.n
, ==, 0);
664 event_notifier_cleanup(&data
.e
);
667 static void test_source_wait_event_notifier(void)
669 EventNotifierTestData data
= { .n
= 0, .active
= 1 };
670 event_notifier_init(&data
.e
, false);
671 set_event_notifier(ctx
, &data
.e
, event_ready_cb
);
672 while (g_main_context_iteration(NULL
, false));
673 g_assert_cmpint(data
.n
, ==, 0);
674 g_assert_cmpint(data
.active
, ==, 1);
676 event_notifier_set(&data
.e
);
677 g_assert(g_main_context_iteration(NULL
, false));
678 g_assert_cmpint(data
.n
, ==, 1);
679 g_assert_cmpint(data
.active
, ==, 0);
681 while (g_main_context_iteration(NULL
, false));
682 g_assert_cmpint(data
.n
, ==, 1);
683 g_assert_cmpint(data
.active
, ==, 0);
685 set_event_notifier(ctx
, &data
.e
, NULL
);
686 while (g_main_context_iteration(NULL
, false));
687 g_assert_cmpint(data
.n
, ==, 1);
689 event_notifier_cleanup(&data
.e
);
692 static void test_source_flush_event_notifier(void)
694 EventNotifierTestData data
= { .n
= 0, .active
= 10, .auto_set
= true };
695 event_notifier_init(&data
.e
, false);
696 set_event_notifier(ctx
, &data
.e
, event_ready_cb
);
697 while (g_main_context_iteration(NULL
, false));
698 g_assert_cmpint(data
.n
, ==, 0);
699 g_assert_cmpint(data
.active
, ==, 10);
701 event_notifier_set(&data
.e
);
702 g_assert(g_main_context_iteration(NULL
, false));
703 g_assert_cmpint(data
.n
, ==, 1);
704 g_assert_cmpint(data
.active
, ==, 9);
705 g_assert(g_main_context_iteration(NULL
, false));
707 while (g_main_context_iteration(NULL
, false));
708 g_assert_cmpint(data
.n
, ==, 10);
709 g_assert_cmpint(data
.active
, ==, 0);
710 g_assert(!g_main_context_iteration(NULL
, false));
712 set_event_notifier(ctx
, &data
.e
, NULL
);
713 while (g_main_context_iteration(NULL
, false));
714 event_notifier_cleanup(&data
.e
);
717 static void test_source_wait_event_notifier_noflush(void)
719 EventNotifierTestData data
= { .n
= 0 };
720 EventNotifierTestData dummy
= { .n
= 0, .active
= 1 };
722 event_notifier_init(&data
.e
, false);
723 set_event_notifier(ctx
, &data
.e
, event_ready_cb
);
725 while (g_main_context_iteration(NULL
, false));
726 g_assert_cmpint(data
.n
, ==, 0);
728 /* Until there is an active descriptor, glib may or may not call
729 * event_ready_cb. Still, it must not block. */
730 event_notifier_set(&data
.e
);
731 g_main_context_iteration(NULL
, true);
734 /* An active event notifier forces aio_poll to look at EventNotifiers. */
735 event_notifier_init(&dummy
.e
, false);
736 set_event_notifier(ctx
, &dummy
.e
, event_ready_cb
);
738 event_notifier_set(&data
.e
);
739 g_assert(g_main_context_iteration(NULL
, false));
740 g_assert_cmpint(data
.n
, ==, 1);
741 g_assert(!g_main_context_iteration(NULL
, false));
742 g_assert_cmpint(data
.n
, ==, 1);
744 event_notifier_set(&data
.e
);
745 g_assert(g_main_context_iteration(NULL
, false));
746 g_assert_cmpint(data
.n
, ==, 2);
747 g_assert(!g_main_context_iteration(NULL
, false));
748 g_assert_cmpint(data
.n
, ==, 2);
750 event_notifier_set(&dummy
.e
);
751 while (g_main_context_iteration(NULL
, false));
752 g_assert_cmpint(data
.n
, ==, 2);
753 g_assert_cmpint(dummy
.n
, ==, 1);
754 g_assert_cmpint(dummy
.active
, ==, 0);
756 set_event_notifier(ctx
, &dummy
.e
, NULL
);
757 event_notifier_cleanup(&dummy
.e
);
759 set_event_notifier(ctx
, &data
.e
, NULL
);
760 while (g_main_context_iteration(NULL
, false));
761 g_assert_cmpint(data
.n
, ==, 2);
763 event_notifier_cleanup(&data
.e
);
766 static void test_source_timer_schedule(void)
768 TimerTestData data
= { .n
= 0, .ctx
= ctx
, .ns
= SCALE_MS
* 750LL,
770 .clock_type
= QEMU_CLOCK_REALTIME
};
774 /* aio_poll will not block to wait for timers to complete unless it has
775 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
777 event_notifier_init(&e
, false);
778 set_event_notifier(ctx
, &e
, dummy_io_handler_read
);
779 do {} while (g_main_context_iteration(NULL
, false));
781 aio_timer_init(ctx
, &data
.timer
, data
.clock_type
,
782 SCALE_NS
, timer_test_cb
, &data
);
783 expiry
= qemu_clock_get_ns(data
.clock_type
) +
785 timer_mod(&data
.timer
, expiry
);
787 g_assert_cmpint(data
.n
, ==, 0);
789 g_usleep(1 * G_USEC_PER_SEC
);
790 g_assert_cmpint(data
.n
, ==, 0);
792 g_assert(g_main_context_iteration(NULL
, true));
793 g_assert_cmpint(data
.n
, ==, 1);
797 g_main_context_iteration(NULL
, true);
800 g_assert_cmpint(data
.n
, ==, 2);
801 g_assert(qemu_clock_get_ns(data
.clock_type
) > expiry
);
803 set_event_notifier(ctx
, &e
, NULL
);
804 event_notifier_cleanup(&e
);
806 timer_del(&data
.timer
);
810 * Check that aio_co_enter() can chain many times
812 * Two coroutines should be able to invoke each other via aio_co_enter() many
813 * times without hitting a limit like stack exhaustion. In other words, the
814 * calls should be chained instead of nested.
823 static void coroutine_fn
chain(void *opaque
)
825 ChainData
*data
= opaque
;
827 for (data
->i
= 0; data
->i
< data
->max
; data
->i
++) {
828 /* Queue up the other coroutine... */
829 aio_co_enter(ctx
, data
->other
);
831 /* ...and give control to it */
832 qemu_coroutine_yield();
836 static void test_queue_chaining(void)
838 /* This number of iterations hit stack exhaustion in the past: */
839 ChainData data_a
= { .max
= 25000 };
840 ChainData data_b
= { .max
= 25000 };
842 data_b
.other
= qemu_coroutine_create(chain
, &data_a
);
843 data_a
.other
= qemu_coroutine_create(chain
, &data_b
);
845 qemu_coroutine_enter(data_b
.other
);
847 g_assert_cmpint(data_a
.i
, ==, data_a
.max
);
848 g_assert_cmpint(data_b
.i
, ==, data_b
.max
- 1);
850 /* Allow the second coroutine to terminate */
851 qemu_coroutine_enter(data_a
.other
);
853 g_assert_cmpint(data_b
.i
, ==, data_b
.max
);
856 static void co_check_current_thread(void *opaque
)
858 QemuThread
*main_thread
= opaque
;
859 assert(qemu_thread_is_self(main_thread
));
862 static void *test_aio_co_enter(void *co
)
865 * qemu_get_current_aio_context() should not to be the main thread
866 * AioContext, because this is a worker thread that has not taken
867 * the BQL. So aio_co_enter will schedule the coroutine in the
868 * main thread AioContext.
870 aio_co_enter(qemu_get_aio_context(), co
);
874 static void test_worker_thread_co_enter(void)
876 QemuThread this_thread
, worker_thread
;
879 qemu_thread_get_self(&this_thread
);
880 co
= qemu_coroutine_create(co_check_current_thread
, &this_thread
);
882 qemu_thread_create(&worker_thread
, "test_acquire_thread",
884 co
, QEMU_THREAD_JOINABLE
);
886 /* Test aio_co_enter from a worker thread. */
887 qemu_thread_join(&worker_thread
);
888 g_assert(aio_poll(ctx
, true));
889 g_assert(!aio_poll(ctx
, false));
894 int main(int argc
, char **argv
)
896 qemu_init_main_loop(&error_fatal
);
897 ctx
= qemu_get_aio_context();
899 while (g_main_context_iteration(NULL
, false));
901 g_test_init(&argc
, &argv
, NULL
);
902 g_test_add_func("/aio/acquire", test_acquire
);
903 g_test_add_func("/aio/bh/schedule", test_bh_schedule
);
904 g_test_add_func("/aio/bh/schedule10", test_bh_schedule10
);
905 g_test_add_func("/aio/bh/cancel", test_bh_cancel
);
906 g_test_add_func("/aio/bh/delete", test_bh_delete
);
907 g_test_add_func("/aio/bh/callback-delete/one", test_bh_delete_from_cb
);
908 g_test_add_func("/aio/bh/callback-delete/many", test_bh_delete_from_cb_many
);
909 g_test_add_func("/aio/bh/flush", test_bh_flush
);
910 g_test_add_func("/aio/event/add-remove", test_set_event_notifier
);
911 g_test_add_func("/aio/event/wait", test_wait_event_notifier
);
912 g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush
);
913 g_test_add_func("/aio/event/flush", test_flush_event_notifier
);
914 g_test_add_func("/aio/timer/schedule", test_timer_schedule
);
916 g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining
);
917 g_test_add_func("/aio/coroutine/worker-thread-co-enter", test_worker_thread_co_enter
);
919 g_test_add_func("/aio-gsource/flush", test_source_flush
);
920 g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule
);
921 g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10
);
922 g_test_add_func("/aio-gsource/bh/cancel", test_source_bh_cancel
);
923 g_test_add_func("/aio-gsource/bh/delete", test_source_bh_delete
);
924 g_test_add_func("/aio-gsource/bh/callback-delete/one", test_source_bh_delete_from_cb
);
925 g_test_add_func("/aio-gsource/bh/callback-delete/many", test_source_bh_delete_from_cb_many
);
926 g_test_add_func("/aio-gsource/bh/flush", test_source_bh_flush
);
927 g_test_add_func("/aio-gsource/event/add-remove", test_source_set_event_notifier
);
928 g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier
);
929 g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush
);
930 g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier
);
931 g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule
);