qapi: Replace ad hoc "since" documentation by member documentation
[qemu/armbru.git] / tests / unit / test-aio.c
blob321d7ab01af09953ebf20d49185176ab51ebad56
1 /*
2 * AioContext tests
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "block/aio.h"
15 #include "qapi/error.h"
16 #include "qemu/timer.h"
17 #include "qemu/sockets.h"
18 #include "qemu/error-report.h"
19 #include "qemu/coroutine-core.h"
20 #include "qemu/main-loop.h"
22 static AioContext *ctx;
24 typedef struct {
25 EventNotifier e;
26 int n;
27 int active;
28 bool auto_set;
29 } EventNotifierTestData;
31 /* Wait until event notifier becomes inactive */
32 static void wait_until_inactive(EventNotifierTestData *data)
34 while (data->active > 0) {
35 aio_poll(ctx, true);
39 /* Simple callbacks for testing. */
41 typedef struct {
42 QEMUBH *bh;
43 int n;
44 int max;
45 } BHTestData;
47 typedef struct {
48 QEMUTimer timer;
49 QEMUClockType clock_type;
50 int n;
51 int max;
52 int64_t ns;
53 AioContext *ctx;
54 } TimerTestData;
56 static void bh_test_cb(void *opaque)
58 BHTestData *data = opaque;
59 if (++data->n < data->max) {
60 qemu_bh_schedule(data->bh);
64 static void timer_test_cb(void *opaque)
66 TimerTestData *data = opaque;
67 if (++data->n < data->max) {
68 timer_mod(&data->timer,
69 qemu_clock_get_ns(data->clock_type) + data->ns);
73 static void dummy_io_handler_read(EventNotifier *e)
77 static void bh_delete_cb(void *opaque)
79 BHTestData *data = opaque;
80 if (++data->n < data->max) {
81 qemu_bh_schedule(data->bh);
82 } else {
83 qemu_bh_delete(data->bh);
84 data->bh = NULL;
88 static void event_ready_cb(EventNotifier *e)
90 EventNotifierTestData *data = container_of(e, EventNotifierTestData, e);
91 g_assert(event_notifier_test_and_clear(e));
92 data->n++;
93 if (data->active > 0) {
94 data->active--;
96 if (data->auto_set && data->active) {
97 event_notifier_set(e);
101 /* Tests using aio_*. */
103 typedef struct {
104 QemuMutex start_lock;
105 EventNotifier notifier;
106 bool thread_acquired;
107 } AcquireTestData;
109 static void *test_acquire_thread(void *opaque)
111 AcquireTestData *data = opaque;
113 /* Wait for other thread to let us start */
114 qemu_mutex_lock(&data->start_lock);
115 qemu_mutex_unlock(&data->start_lock);
117 /* event_notifier_set might be called either before or after
118 * the main thread's call to poll(). The test case's outcome
119 * should be the same in either case.
121 event_notifier_set(&data->notifier);
122 aio_context_acquire(ctx);
123 aio_context_release(ctx);
125 data->thread_acquired = true; /* success, we got here */
127 return NULL;
130 static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
131 EventNotifierHandler *handler)
133 aio_set_event_notifier(ctx, notifier, false, handler, NULL, NULL);
136 static void dummy_notifier_read(EventNotifier *n)
138 event_notifier_test_and_clear(n);
141 static void test_acquire(void)
143 QemuThread thread;
144 AcquireTestData data;
146 /* Dummy event notifier ensures aio_poll() will block */
147 event_notifier_init(&data.notifier, false);
148 set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
149 g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
151 qemu_mutex_init(&data.start_lock);
152 qemu_mutex_lock(&data.start_lock);
153 data.thread_acquired = false;
155 qemu_thread_create(&thread, "test_acquire_thread",
156 test_acquire_thread,
157 &data, QEMU_THREAD_JOINABLE);
159 /* Block in aio_poll(), let other thread kick us and acquire context */
160 aio_context_acquire(ctx);
161 qemu_mutex_unlock(&data.start_lock); /* let the thread run */
162 g_assert(aio_poll(ctx, true));
163 g_assert(!data.thread_acquired);
164 aio_context_release(ctx);
166 qemu_thread_join(&thread);
167 set_event_notifier(ctx, &data.notifier, NULL);
168 event_notifier_cleanup(&data.notifier);
170 g_assert(data.thread_acquired);
173 static void test_bh_schedule(void)
175 BHTestData data = { .n = 0 };
176 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
178 qemu_bh_schedule(data.bh);
179 g_assert_cmpint(data.n, ==, 0);
181 g_assert(aio_poll(ctx, true));
182 g_assert_cmpint(data.n, ==, 1);
184 g_assert(!aio_poll(ctx, false));
185 g_assert_cmpint(data.n, ==, 1);
186 qemu_bh_delete(data.bh);
189 static void test_bh_schedule10(void)
191 BHTestData data = { .n = 0, .max = 10 };
192 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
194 qemu_bh_schedule(data.bh);
195 g_assert_cmpint(data.n, ==, 0);
197 g_assert(aio_poll(ctx, false));
198 g_assert_cmpint(data.n, ==, 1);
200 g_assert(aio_poll(ctx, true));
201 g_assert_cmpint(data.n, ==, 2);
203 while (data.n < 10) {
204 aio_poll(ctx, true);
206 g_assert_cmpint(data.n, ==, 10);
208 g_assert(!aio_poll(ctx, false));
209 g_assert_cmpint(data.n, ==, 10);
210 qemu_bh_delete(data.bh);
213 static void test_bh_cancel(void)
215 BHTestData data = { .n = 0 };
216 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
218 qemu_bh_schedule(data.bh);
219 g_assert_cmpint(data.n, ==, 0);
221 qemu_bh_cancel(data.bh);
222 g_assert_cmpint(data.n, ==, 0);
224 g_assert(!aio_poll(ctx, false));
225 g_assert_cmpint(data.n, ==, 0);
226 qemu_bh_delete(data.bh);
229 static void test_bh_delete(void)
231 BHTestData data = { .n = 0 };
232 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
234 qemu_bh_schedule(data.bh);
235 g_assert_cmpint(data.n, ==, 0);
237 qemu_bh_delete(data.bh);
238 g_assert_cmpint(data.n, ==, 0);
240 g_assert(!aio_poll(ctx, false));
241 g_assert_cmpint(data.n, ==, 0);
244 static void test_bh_delete_from_cb(void)
246 BHTestData data1 = { .n = 0, .max = 1 };
248 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
250 qemu_bh_schedule(data1.bh);
251 g_assert_cmpint(data1.n, ==, 0);
253 while (data1.n < data1.max) {
254 aio_poll(ctx, true);
256 g_assert_cmpint(data1.n, ==, data1.max);
257 g_assert(data1.bh == NULL);
259 g_assert(!aio_poll(ctx, false));
262 static void test_bh_delete_from_cb_many(void)
264 BHTestData data1 = { .n = 0, .max = 1 };
265 BHTestData data2 = { .n = 0, .max = 3 };
266 BHTestData data3 = { .n = 0, .max = 2 };
267 BHTestData data4 = { .n = 0, .max = 4 };
269 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
270 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
271 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
272 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
274 qemu_bh_schedule(data1.bh);
275 qemu_bh_schedule(data2.bh);
276 qemu_bh_schedule(data3.bh);
277 qemu_bh_schedule(data4.bh);
278 g_assert_cmpint(data1.n, ==, 0);
279 g_assert_cmpint(data2.n, ==, 0);
280 g_assert_cmpint(data3.n, ==, 0);
281 g_assert_cmpint(data4.n, ==, 0);
283 g_assert(aio_poll(ctx, false));
284 g_assert_cmpint(data1.n, ==, 1);
285 g_assert_cmpint(data2.n, ==, 1);
286 g_assert_cmpint(data3.n, ==, 1);
287 g_assert_cmpint(data4.n, ==, 1);
288 g_assert(data1.bh == NULL);
290 while (data1.n < data1.max ||
291 data2.n < data2.max ||
292 data3.n < data3.max ||
293 data4.n < data4.max) {
294 aio_poll(ctx, true);
296 g_assert_cmpint(data1.n, ==, data1.max);
297 g_assert_cmpint(data2.n, ==, data2.max);
298 g_assert_cmpint(data3.n, ==, data3.max);
299 g_assert_cmpint(data4.n, ==, data4.max);
300 g_assert(data1.bh == NULL);
301 g_assert(data2.bh == NULL);
302 g_assert(data3.bh == NULL);
303 g_assert(data4.bh == NULL);
306 static void test_bh_flush(void)
308 BHTestData data = { .n = 0 };
309 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
311 qemu_bh_schedule(data.bh);
312 g_assert_cmpint(data.n, ==, 0);
314 g_assert(aio_poll(ctx, true));
315 g_assert_cmpint(data.n, ==, 1);
317 g_assert(!aio_poll(ctx, false));
318 g_assert_cmpint(data.n, ==, 1);
319 qemu_bh_delete(data.bh);
322 static void test_set_event_notifier(void)
324 EventNotifierTestData data = { .n = 0, .active = 0 };
325 event_notifier_init(&data.e, false);
326 set_event_notifier(ctx, &data.e, event_ready_cb);
327 g_assert(!aio_poll(ctx, false));
328 g_assert_cmpint(data.n, ==, 0);
330 set_event_notifier(ctx, &data.e, NULL);
331 g_assert(!aio_poll(ctx, false));
332 g_assert_cmpint(data.n, ==, 0);
333 event_notifier_cleanup(&data.e);
336 static void test_wait_event_notifier(void)
338 EventNotifierTestData data = { .n = 0, .active = 1 };
339 event_notifier_init(&data.e, false);
340 set_event_notifier(ctx, &data.e, event_ready_cb);
341 while (aio_poll(ctx, false));
342 g_assert_cmpint(data.n, ==, 0);
343 g_assert_cmpint(data.active, ==, 1);
345 event_notifier_set(&data.e);
346 g_assert(aio_poll(ctx, false));
347 g_assert_cmpint(data.n, ==, 1);
348 g_assert_cmpint(data.active, ==, 0);
350 g_assert(!aio_poll(ctx, false));
351 g_assert_cmpint(data.n, ==, 1);
352 g_assert_cmpint(data.active, ==, 0);
354 set_event_notifier(ctx, &data.e, NULL);
355 g_assert(!aio_poll(ctx, false));
356 g_assert_cmpint(data.n, ==, 1);
358 event_notifier_cleanup(&data.e);
361 static void test_flush_event_notifier(void)
363 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
364 event_notifier_init(&data.e, false);
365 set_event_notifier(ctx, &data.e, event_ready_cb);
366 while (aio_poll(ctx, false));
367 g_assert_cmpint(data.n, ==, 0);
368 g_assert_cmpint(data.active, ==, 10);
370 event_notifier_set(&data.e);
371 g_assert(aio_poll(ctx, false));
372 g_assert_cmpint(data.n, ==, 1);
373 g_assert_cmpint(data.active, ==, 9);
374 g_assert(aio_poll(ctx, false));
376 wait_until_inactive(&data);
377 g_assert_cmpint(data.n, ==, 10);
378 g_assert_cmpint(data.active, ==, 0);
379 g_assert(!aio_poll(ctx, false));
381 set_event_notifier(ctx, &data.e, NULL);
382 g_assert(!aio_poll(ctx, false));
383 event_notifier_cleanup(&data.e);
386 static void test_aio_external_client(void)
388 int i, j;
390 for (i = 1; i < 3; i++) {
391 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
392 event_notifier_init(&data.e, false);
393 aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL, NULL);
394 event_notifier_set(&data.e);
395 for (j = 0; j < i; j++) {
396 aio_disable_external(ctx);
398 for (j = 0; j < i; j++) {
399 assert(!aio_poll(ctx, false));
400 assert(event_notifier_test_and_clear(&data.e));
401 event_notifier_set(&data.e);
402 aio_enable_external(ctx);
404 assert(aio_poll(ctx, false));
405 set_event_notifier(ctx, &data.e, NULL);
406 event_notifier_cleanup(&data.e);
410 static void test_wait_event_notifier_noflush(void)
412 EventNotifierTestData data = { .n = 0 };
413 EventNotifierTestData dummy = { .n = 0, .active = 1 };
415 event_notifier_init(&data.e, false);
416 set_event_notifier(ctx, &data.e, event_ready_cb);
418 g_assert(!aio_poll(ctx, false));
419 g_assert_cmpint(data.n, ==, 0);
421 /* Until there is an active descriptor, aio_poll may or may not call
422 * event_ready_cb. Still, it must not block. */
423 event_notifier_set(&data.e);
424 g_assert(aio_poll(ctx, true));
425 data.n = 0;
427 /* An active event notifier forces aio_poll to look at EventNotifiers. */
428 event_notifier_init(&dummy.e, false);
429 set_event_notifier(ctx, &dummy.e, event_ready_cb);
431 event_notifier_set(&data.e);
432 g_assert(aio_poll(ctx, false));
433 g_assert_cmpint(data.n, ==, 1);
434 g_assert(!aio_poll(ctx, false));
435 g_assert_cmpint(data.n, ==, 1);
437 event_notifier_set(&data.e);
438 g_assert(aio_poll(ctx, false));
439 g_assert_cmpint(data.n, ==, 2);
440 g_assert(!aio_poll(ctx, false));
441 g_assert_cmpint(data.n, ==, 2);
443 event_notifier_set(&dummy.e);
444 wait_until_inactive(&dummy);
445 g_assert_cmpint(data.n, ==, 2);
446 g_assert_cmpint(dummy.n, ==, 1);
447 g_assert_cmpint(dummy.active, ==, 0);
449 set_event_notifier(ctx, &dummy.e, NULL);
450 event_notifier_cleanup(&dummy.e);
452 set_event_notifier(ctx, &data.e, NULL);
453 g_assert(!aio_poll(ctx, false));
454 g_assert_cmpint(data.n, ==, 2);
456 event_notifier_cleanup(&data.e);
459 static void test_timer_schedule(void)
461 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
462 .max = 2,
463 .clock_type = QEMU_CLOCK_REALTIME };
464 EventNotifier e;
466 /* aio_poll will not block to wait for timers to complete unless it has
467 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
469 event_notifier_init(&e, false);
470 set_event_notifier(ctx, &e, dummy_io_handler_read);
471 aio_poll(ctx, false);
473 aio_timer_init(ctx, &data.timer, data.clock_type,
474 SCALE_NS, timer_test_cb, &data);
475 timer_mod(&data.timer,
476 qemu_clock_get_ns(data.clock_type) +
477 data.ns);
479 g_assert_cmpint(data.n, ==, 0);
481 /* timer_mod may well cause an event notifer to have gone off,
482 * so clear that
484 do {} while (aio_poll(ctx, false));
486 g_assert(!aio_poll(ctx, false));
487 g_assert_cmpint(data.n, ==, 0);
489 g_usleep(1 * G_USEC_PER_SEC);
490 g_assert_cmpint(data.n, ==, 0);
492 g_assert(aio_poll(ctx, false));
493 g_assert_cmpint(data.n, ==, 1);
495 /* timer_mod called by our callback */
496 do {} while (aio_poll(ctx, false));
498 g_assert(!aio_poll(ctx, false));
499 g_assert_cmpint(data.n, ==, 1);
501 g_assert(aio_poll(ctx, true));
502 g_assert_cmpint(data.n, ==, 2);
504 /* As max is now 2, an event notifier should not have gone off */
506 g_assert(!aio_poll(ctx, false));
507 g_assert_cmpint(data.n, ==, 2);
509 set_event_notifier(ctx, &e, NULL);
510 event_notifier_cleanup(&e);
512 timer_del(&data.timer);
515 /* Now the same tests, using the context as a GSource. They are
516 * very similar to the ones above, with g_main_context_iteration
517 * replacing aio_poll. However:
518 * - sometimes both the AioContext and the glib main loop wake
519 * themselves up. Hence, some "g_assert(!aio_poll(ctx, false));"
520 * are replaced by "while (g_main_context_iteration(NULL, false));".
521 * - there is no exact replacement for a blocking wait.
522 * "while (g_main_context_iteration(NULL, true)" seems to work,
523 * but it is not documented _why_ it works. For these tests a
524 * non-blocking loop like "while (g_main_context_iteration(NULL, false)"
525 * works well, and that's what I am using.
528 static void test_source_flush(void)
530 g_assert(!g_main_context_iteration(NULL, false));
531 aio_notify(ctx);
532 while (g_main_context_iteration(NULL, false));
533 g_assert(!g_main_context_iteration(NULL, false));
536 static void test_source_bh_schedule(void)
538 BHTestData data = { .n = 0 };
539 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
541 qemu_bh_schedule(data.bh);
542 g_assert_cmpint(data.n, ==, 0);
544 g_assert(g_main_context_iteration(NULL, true));
545 g_assert_cmpint(data.n, ==, 1);
547 g_assert(!g_main_context_iteration(NULL, false));
548 g_assert_cmpint(data.n, ==, 1);
549 qemu_bh_delete(data.bh);
552 static void test_source_bh_schedule10(void)
554 BHTestData data = { .n = 0, .max = 10 };
555 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
557 qemu_bh_schedule(data.bh);
558 g_assert_cmpint(data.n, ==, 0);
560 g_assert(g_main_context_iteration(NULL, false));
561 g_assert_cmpint(data.n, ==, 1);
563 g_assert(g_main_context_iteration(NULL, true));
564 g_assert_cmpint(data.n, ==, 2);
566 while (g_main_context_iteration(NULL, false));
567 g_assert_cmpint(data.n, ==, 10);
569 g_assert(!g_main_context_iteration(NULL, false));
570 g_assert_cmpint(data.n, ==, 10);
571 qemu_bh_delete(data.bh);
574 static void test_source_bh_cancel(void)
576 BHTestData data = { .n = 0 };
577 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
579 qemu_bh_schedule(data.bh);
580 g_assert_cmpint(data.n, ==, 0);
582 qemu_bh_cancel(data.bh);
583 g_assert_cmpint(data.n, ==, 0);
585 while (g_main_context_iteration(NULL, false));
586 g_assert_cmpint(data.n, ==, 0);
587 qemu_bh_delete(data.bh);
590 static void test_source_bh_delete(void)
592 BHTestData data = { .n = 0 };
593 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
595 qemu_bh_schedule(data.bh);
596 g_assert_cmpint(data.n, ==, 0);
598 qemu_bh_delete(data.bh);
599 g_assert_cmpint(data.n, ==, 0);
601 while (g_main_context_iteration(NULL, false));
602 g_assert_cmpint(data.n, ==, 0);
605 static void test_source_bh_delete_from_cb(void)
607 BHTestData data1 = { .n = 0, .max = 1 };
609 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
611 qemu_bh_schedule(data1.bh);
612 g_assert_cmpint(data1.n, ==, 0);
614 g_main_context_iteration(NULL, true);
615 g_assert_cmpint(data1.n, ==, data1.max);
616 g_assert(data1.bh == NULL);
618 assert(g_main_context_iteration(NULL, false));
619 assert(!g_main_context_iteration(NULL, false));
622 static void test_source_bh_delete_from_cb_many(void)
624 BHTestData data1 = { .n = 0, .max = 1 };
625 BHTestData data2 = { .n = 0, .max = 3 };
626 BHTestData data3 = { .n = 0, .max = 2 };
627 BHTestData data4 = { .n = 0, .max = 4 };
629 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
630 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
631 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
632 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
634 qemu_bh_schedule(data1.bh);
635 qemu_bh_schedule(data2.bh);
636 qemu_bh_schedule(data3.bh);
637 qemu_bh_schedule(data4.bh);
638 g_assert_cmpint(data1.n, ==, 0);
639 g_assert_cmpint(data2.n, ==, 0);
640 g_assert_cmpint(data3.n, ==, 0);
641 g_assert_cmpint(data4.n, ==, 0);
643 g_assert(g_main_context_iteration(NULL, false));
644 g_assert_cmpint(data1.n, ==, 1);
645 g_assert_cmpint(data2.n, ==, 1);
646 g_assert_cmpint(data3.n, ==, 1);
647 g_assert_cmpint(data4.n, ==, 1);
648 g_assert(data1.bh == NULL);
650 while (g_main_context_iteration(NULL, false));
651 g_assert_cmpint(data1.n, ==, data1.max);
652 g_assert_cmpint(data2.n, ==, data2.max);
653 g_assert_cmpint(data3.n, ==, data3.max);
654 g_assert_cmpint(data4.n, ==, data4.max);
655 g_assert(data1.bh == NULL);
656 g_assert(data2.bh == NULL);
657 g_assert(data3.bh == NULL);
658 g_assert(data4.bh == NULL);
661 static void test_source_bh_flush(void)
663 BHTestData data = { .n = 0 };
664 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
666 qemu_bh_schedule(data.bh);
667 g_assert_cmpint(data.n, ==, 0);
669 g_assert(g_main_context_iteration(NULL, true));
670 g_assert_cmpint(data.n, ==, 1);
672 g_assert(!g_main_context_iteration(NULL, false));
673 g_assert_cmpint(data.n, ==, 1);
674 qemu_bh_delete(data.bh);
677 static void test_source_set_event_notifier(void)
679 EventNotifierTestData data = { .n = 0, .active = 0 };
680 event_notifier_init(&data.e, false);
681 set_event_notifier(ctx, &data.e, event_ready_cb);
682 while (g_main_context_iteration(NULL, false));
683 g_assert_cmpint(data.n, ==, 0);
685 set_event_notifier(ctx, &data.e, NULL);
686 while (g_main_context_iteration(NULL, false));
687 g_assert_cmpint(data.n, ==, 0);
688 event_notifier_cleanup(&data.e);
691 static void test_source_wait_event_notifier(void)
693 EventNotifierTestData data = { .n = 0, .active = 1 };
694 event_notifier_init(&data.e, false);
695 set_event_notifier(ctx, &data.e, event_ready_cb);
696 while (g_main_context_iteration(NULL, false));
697 g_assert_cmpint(data.n, ==, 0);
698 g_assert_cmpint(data.active, ==, 1);
700 event_notifier_set(&data.e);
701 g_assert(g_main_context_iteration(NULL, false));
702 g_assert_cmpint(data.n, ==, 1);
703 g_assert_cmpint(data.active, ==, 0);
705 while (g_main_context_iteration(NULL, false));
706 g_assert_cmpint(data.n, ==, 1);
707 g_assert_cmpint(data.active, ==, 0);
709 set_event_notifier(ctx, &data.e, NULL);
710 while (g_main_context_iteration(NULL, false));
711 g_assert_cmpint(data.n, ==, 1);
713 event_notifier_cleanup(&data.e);
716 static void test_source_flush_event_notifier(void)
718 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
719 event_notifier_init(&data.e, false);
720 set_event_notifier(ctx, &data.e, event_ready_cb);
721 while (g_main_context_iteration(NULL, false));
722 g_assert_cmpint(data.n, ==, 0);
723 g_assert_cmpint(data.active, ==, 10);
725 event_notifier_set(&data.e);
726 g_assert(g_main_context_iteration(NULL, false));
727 g_assert_cmpint(data.n, ==, 1);
728 g_assert_cmpint(data.active, ==, 9);
729 g_assert(g_main_context_iteration(NULL, false));
731 while (g_main_context_iteration(NULL, false));
732 g_assert_cmpint(data.n, ==, 10);
733 g_assert_cmpint(data.active, ==, 0);
734 g_assert(!g_main_context_iteration(NULL, false));
736 set_event_notifier(ctx, &data.e, NULL);
737 while (g_main_context_iteration(NULL, false));
738 event_notifier_cleanup(&data.e);
741 static void test_source_wait_event_notifier_noflush(void)
743 EventNotifierTestData data = { .n = 0 };
744 EventNotifierTestData dummy = { .n = 0, .active = 1 };
746 event_notifier_init(&data.e, false);
747 set_event_notifier(ctx, &data.e, event_ready_cb);
749 while (g_main_context_iteration(NULL, false));
750 g_assert_cmpint(data.n, ==, 0);
752 /* Until there is an active descriptor, glib may or may not call
753 * event_ready_cb. Still, it must not block. */
754 event_notifier_set(&data.e);
755 g_main_context_iteration(NULL, true);
756 data.n = 0;
758 /* An active event notifier forces aio_poll to look at EventNotifiers. */
759 event_notifier_init(&dummy.e, false);
760 set_event_notifier(ctx, &dummy.e, event_ready_cb);
762 event_notifier_set(&data.e);
763 g_assert(g_main_context_iteration(NULL, false));
764 g_assert_cmpint(data.n, ==, 1);
765 g_assert(!g_main_context_iteration(NULL, false));
766 g_assert_cmpint(data.n, ==, 1);
768 event_notifier_set(&data.e);
769 g_assert(g_main_context_iteration(NULL, false));
770 g_assert_cmpint(data.n, ==, 2);
771 g_assert(!g_main_context_iteration(NULL, false));
772 g_assert_cmpint(data.n, ==, 2);
774 event_notifier_set(&dummy.e);
775 while (g_main_context_iteration(NULL, false));
776 g_assert_cmpint(data.n, ==, 2);
777 g_assert_cmpint(dummy.n, ==, 1);
778 g_assert_cmpint(dummy.active, ==, 0);
780 set_event_notifier(ctx, &dummy.e, NULL);
781 event_notifier_cleanup(&dummy.e);
783 set_event_notifier(ctx, &data.e, NULL);
784 while (g_main_context_iteration(NULL, false));
785 g_assert_cmpint(data.n, ==, 2);
787 event_notifier_cleanup(&data.e);
790 static void test_source_timer_schedule(void)
792 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
793 .max = 2,
794 .clock_type = QEMU_CLOCK_REALTIME };
795 EventNotifier e;
796 int64_t expiry;
798 /* aio_poll will not block to wait for timers to complete unless it has
799 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
801 event_notifier_init(&e, false);
802 set_event_notifier(ctx, &e, dummy_io_handler_read);
803 do {} while (g_main_context_iteration(NULL, false));
805 aio_timer_init(ctx, &data.timer, data.clock_type,
806 SCALE_NS, timer_test_cb, &data);
807 expiry = qemu_clock_get_ns(data.clock_type) +
808 data.ns;
809 timer_mod(&data.timer, expiry);
811 g_assert_cmpint(data.n, ==, 0);
813 g_usleep(1 * G_USEC_PER_SEC);
814 g_assert_cmpint(data.n, ==, 0);
816 g_assert(g_main_context_iteration(NULL, true));
817 g_assert_cmpint(data.n, ==, 1);
818 expiry += data.ns;
820 while (data.n < 2) {
821 g_main_context_iteration(NULL, true);
824 g_assert_cmpint(data.n, ==, 2);
825 g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
827 set_event_notifier(ctx, &e, NULL);
828 event_notifier_cleanup(&e);
830 timer_del(&data.timer);
834 * Check that aio_co_enter() can chain many times
836 * Two coroutines should be able to invoke each other via aio_co_enter() many
837 * times without hitting a limit like stack exhaustion. In other words, the
838 * calls should be chained instead of nested.
841 typedef struct {
842 Coroutine *other;
843 unsigned i;
844 unsigned max;
845 } ChainData;
847 static void coroutine_fn chain(void *opaque)
849 ChainData *data = opaque;
851 for (data->i = 0; data->i < data->max; data->i++) {
852 /* Queue up the other coroutine... */
853 aio_co_enter(ctx, data->other);
855 /* ...and give control to it */
856 qemu_coroutine_yield();
860 static void test_queue_chaining(void)
862 /* This number of iterations hit stack exhaustion in the past: */
863 ChainData data_a = { .max = 25000 };
864 ChainData data_b = { .max = 25000 };
866 data_b.other = qemu_coroutine_create(chain, &data_a);
867 data_a.other = qemu_coroutine_create(chain, &data_b);
869 qemu_coroutine_enter(data_b.other);
871 g_assert_cmpint(data_a.i, ==, data_a.max);
872 g_assert_cmpint(data_b.i, ==, data_b.max - 1);
874 /* Allow the second coroutine to terminate */
875 qemu_coroutine_enter(data_a.other);
877 g_assert_cmpint(data_b.i, ==, data_b.max);
880 static void co_check_current_thread(void *opaque)
882 QemuThread *main_thread = opaque;
883 assert(qemu_thread_is_self(main_thread));
886 static void *test_aio_co_enter(void *co)
889 * qemu_get_current_aio_context() should not to be the main thread
890 * AioContext, because this is a worker thread that has not taken
891 * the BQL. So aio_co_enter will schedule the coroutine in the
892 * main thread AioContext.
894 aio_co_enter(qemu_get_aio_context(), co);
895 return NULL;
898 static void test_worker_thread_co_enter(void)
900 QemuThread this_thread, worker_thread;
901 Coroutine *co;
903 qemu_thread_get_self(&this_thread);
904 co = qemu_coroutine_create(co_check_current_thread, &this_thread);
906 qemu_thread_create(&worker_thread, "test_acquire_thread",
907 test_aio_co_enter,
908 co, QEMU_THREAD_JOINABLE);
910 /* Test aio_co_enter from a worker thread. */
911 qemu_thread_join(&worker_thread);
912 g_assert(aio_poll(ctx, true));
913 g_assert(!aio_poll(ctx, false));
916 /* End of tests. */
918 int main(int argc, char **argv)
920 qemu_init_main_loop(&error_fatal);
921 ctx = qemu_get_aio_context();
923 while (g_main_context_iteration(NULL, false));
925 g_test_init(&argc, &argv, NULL);
926 g_test_add_func("/aio/acquire", test_acquire);
927 g_test_add_func("/aio/bh/schedule", test_bh_schedule);
928 g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
929 g_test_add_func("/aio/bh/cancel", test_bh_cancel);
930 g_test_add_func("/aio/bh/delete", test_bh_delete);
931 g_test_add_func("/aio/bh/callback-delete/one", test_bh_delete_from_cb);
932 g_test_add_func("/aio/bh/callback-delete/many", test_bh_delete_from_cb_many);
933 g_test_add_func("/aio/bh/flush", test_bh_flush);
934 g_test_add_func("/aio/event/add-remove", test_set_event_notifier);
935 g_test_add_func("/aio/event/wait", test_wait_event_notifier);
936 g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
937 g_test_add_func("/aio/event/flush", test_flush_event_notifier);
938 g_test_add_func("/aio/external-client", test_aio_external_client);
939 g_test_add_func("/aio/timer/schedule", test_timer_schedule);
941 g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining);
942 g_test_add_func("/aio/coroutine/worker-thread-co-enter", test_worker_thread_co_enter);
944 g_test_add_func("/aio-gsource/flush", test_source_flush);
945 g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
946 g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
947 g_test_add_func("/aio-gsource/bh/cancel", test_source_bh_cancel);
948 g_test_add_func("/aio-gsource/bh/delete", test_source_bh_delete);
949 g_test_add_func("/aio-gsource/bh/callback-delete/one", test_source_bh_delete_from_cb);
950 g_test_add_func("/aio-gsource/bh/callback-delete/many", test_source_bh_delete_from_cb_many);
951 g_test_add_func("/aio-gsource/bh/flush", test_source_bh_flush);
952 g_test_add_func("/aio-gsource/event/add-remove", test_source_set_event_notifier);
953 g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
954 g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
955 g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
956 g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
957 return g_test_run();