4 * Copyright IBM, Corp. 2011
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/coroutine_int.h"
18 * Check that qemu_in_coroutine() works
21 static void coroutine_fn
verify_in_coroutine(void *opaque
)
23 g_assert(qemu_in_coroutine());
26 static void test_in_coroutine(void)
30 g_assert(!qemu_in_coroutine());
32 coroutine
= qemu_coroutine_create(verify_in_coroutine
, NULL
);
33 qemu_coroutine_enter(coroutine
);
37 * Check that qemu_coroutine_self() works
40 static void coroutine_fn
verify_self(void *opaque
)
42 Coroutine
**p_co
= opaque
;
43 g_assert(qemu_coroutine_self() == *p_co
);
46 static void test_self(void)
50 coroutine
= qemu_coroutine_create(verify_self
, &coroutine
);
51 qemu_coroutine_enter(coroutine
);
55 * Check that qemu_coroutine_entered() works
58 static void coroutine_fn
verify_entered_step_2(void *opaque
)
60 Coroutine
*caller
= (Coroutine
*)opaque
;
62 g_assert(qemu_coroutine_entered(caller
));
63 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
64 qemu_coroutine_yield();
66 /* Once more to check it still works after yielding */
67 g_assert(qemu_coroutine_entered(caller
));
68 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
71 static void coroutine_fn
verify_entered_step_1(void *opaque
)
73 Coroutine
*self
= qemu_coroutine_self();
76 g_assert(qemu_coroutine_entered(self
));
78 coroutine
= qemu_coroutine_create(verify_entered_step_2
, self
);
79 g_assert(!qemu_coroutine_entered(coroutine
));
80 qemu_coroutine_enter(coroutine
);
81 g_assert(!qemu_coroutine_entered(coroutine
));
82 qemu_coroutine_enter(coroutine
);
85 static void test_entered(void)
89 coroutine
= qemu_coroutine_create(verify_entered_step_1
, NULL
);
90 g_assert(!qemu_coroutine_entered(coroutine
));
91 qemu_coroutine_enter(coroutine
);
95 * Check that coroutines may nest multiple levels
99 unsigned int n_enter
; /* num coroutines entered */
100 unsigned int n_return
; /* num coroutines returned */
101 unsigned int max
; /* maximum level of nesting */
104 static void coroutine_fn
nest(void *opaque
)
106 NestData
*nd
= opaque
;
110 if (nd
->n_enter
< nd
->max
) {
113 child
= qemu_coroutine_create(nest
, nd
);
114 qemu_coroutine_enter(child
);
120 static void test_nesting(void)
129 root
= qemu_coroutine_create(nest
, &nd
);
130 qemu_coroutine_enter(root
);
132 /* Must enter and return from max nesting level */
133 g_assert_cmpint(nd
.n_enter
, ==, nd
.max
);
134 g_assert_cmpint(nd
.n_return
, ==, nd
.max
);
138 * Check that yield/enter transfer control correctly
141 static void coroutine_fn
yield_5_times(void *opaque
)
146 for (i
= 0; i
< 5; i
++) {
147 qemu_coroutine_yield();
152 static void test_yield(void)
154 Coroutine
*coroutine
;
156 int i
= -1; /* one extra time to return from coroutine */
158 coroutine
= qemu_coroutine_create(yield_5_times
, &done
);
160 qemu_coroutine_enter(coroutine
);
163 g_assert_cmpint(i
, ==, 5); /* coroutine must yield 5 times */
166 static void coroutine_fn
c2_fn(void *opaque
)
168 qemu_coroutine_yield();
171 static void coroutine_fn
c1_fn(void *opaque
)
173 Coroutine
*c2
= opaque
;
174 qemu_coroutine_enter(c2
);
177 static void test_no_dangling_access(void)
183 c2
= qemu_coroutine_create(c2_fn
, NULL
);
184 c1
= qemu_coroutine_create(c1_fn
, c2
);
186 qemu_coroutine_enter(c1
);
188 /* c1 shouldn't be used any more now; make sure we segfault if it is */
190 memset(c1
, 0xff, sizeof(Coroutine
));
191 qemu_coroutine_enter(c2
);
193 /* Must restore the coroutine now to avoid corrupted pool */
198 static int done_count
;
200 static void coroutine_fn
mutex_fn(void *opaque
)
203 qemu_co_mutex_lock(m
);
206 qemu_coroutine_yield();
208 qemu_co_mutex_unlock(m
);
212 static void coroutine_fn
lockable_fn(void *opaque
)
214 QemuLockable
*x
= opaque
;
215 qemu_lockable_lock(x
);
218 qemu_coroutine_yield();
220 qemu_lockable_unlock(x
);
224 static void do_test_co_mutex(CoroutineEntry
*entry
, void *opaque
)
226 Coroutine
*c1
= qemu_coroutine_create(entry
, opaque
);
227 Coroutine
*c2
= qemu_coroutine_create(entry
, opaque
);
230 qemu_coroutine_enter(c1
);
232 qemu_coroutine_enter(c2
);
234 /* Unlock queues c2. It is then started automatically when c1 yields or
237 qemu_coroutine_enter(c1
);
238 g_assert_cmpint(done_count
, ==, 1);
241 qemu_coroutine_enter(c2
);
242 g_assert_cmpint(done_count
, ==, 2);
246 static void test_co_mutex(void)
250 qemu_co_mutex_init(&m
);
251 do_test_co_mutex(mutex_fn
, &m
);
254 static void test_co_mutex_lockable(void)
257 CoMutex
*null_pointer
= NULL
;
259 qemu_co_mutex_init(&m
);
260 do_test_co_mutex(lockable_fn
, QEMU_MAKE_LOCKABLE(&m
));
262 g_assert(QEMU_MAKE_LOCKABLE(null_pointer
) == NULL
);
265 static CoRwlock rwlock
;
267 /* Test that readers are properly sent back to the queue when upgrading,
268 * even if they are the sole readers. The test scenario is as follows:
272 * |--------------+------------+
278 * | <queued> | <dequeued> |
284 static void coroutine_fn
rwlock_yield_upgrade(void *opaque
)
286 qemu_co_rwlock_rdlock(&rwlock
);
287 qemu_coroutine_yield();
289 qemu_co_rwlock_upgrade(&rwlock
);
290 qemu_co_rwlock_unlock(&rwlock
);
292 *(bool *)opaque
= true;
295 static void coroutine_fn
rwlock_wrlock_yield(void *opaque
)
297 qemu_co_rwlock_wrlock(&rwlock
);
298 qemu_coroutine_yield();
300 qemu_co_rwlock_unlock(&rwlock
);
301 *(bool *)opaque
= true;
304 static void test_co_rwlock_upgrade(void)
306 bool c1_done
= false;
307 bool c2_done
= false;
310 qemu_co_rwlock_init(&rwlock
);
311 c1
= qemu_coroutine_create(rwlock_yield_upgrade
, &c1_done
);
312 c2
= qemu_coroutine_create(rwlock_wrlock_yield
, &c2_done
);
314 qemu_coroutine_enter(c1
);
315 qemu_coroutine_enter(c2
);
317 /* c1 now should go to sleep. */
318 qemu_coroutine_enter(c1
);
321 qemu_coroutine_enter(c2
);
326 static void coroutine_fn
rwlock_rdlock_yield(void *opaque
)
328 qemu_co_rwlock_rdlock(&rwlock
);
329 qemu_coroutine_yield();
331 qemu_co_rwlock_unlock(&rwlock
);
332 qemu_coroutine_yield();
334 *(bool *)opaque
= true;
337 static void coroutine_fn
rwlock_wrlock_downgrade(void *opaque
)
339 qemu_co_rwlock_wrlock(&rwlock
);
341 qemu_co_rwlock_downgrade(&rwlock
);
342 qemu_co_rwlock_unlock(&rwlock
);
343 *(bool *)opaque
= true;
346 static void coroutine_fn
rwlock_rdlock(void *opaque
)
348 qemu_co_rwlock_rdlock(&rwlock
);
350 qemu_co_rwlock_unlock(&rwlock
);
351 *(bool *)opaque
= true;
354 static void coroutine_fn
rwlock_wrlock(void *opaque
)
356 qemu_co_rwlock_wrlock(&rwlock
);
358 qemu_co_rwlock_unlock(&rwlock
);
359 *(bool *)opaque
= true;
363 * Check that downgrading a reader-writer lock does not cause a hang.
365 * Four coroutines are used to produce a situation where there are
366 * both reader and writer hopefuls waiting to acquire an rwlock that
367 * is held by a reader.
369 * The correct sequence of operations we aim to provoke can be
372 * | c1 | c2 | c3 | c4 |
373 * |--------+------------+------------+------------|
384 * | | <dequeued> | | |
385 * | | downgrade | | |
386 * | | | <dequeued> | |
390 * | | | | <dequeued> |
393 static void test_co_rwlock_downgrade(void)
395 bool c1_done
= false;
396 bool c2_done
= false;
397 bool c3_done
= false;
398 bool c4_done
= false;
399 Coroutine
*c1
, *c2
, *c3
, *c4
;
401 qemu_co_rwlock_init(&rwlock
);
403 c1
= qemu_coroutine_create(rwlock_rdlock_yield
, &c1_done
);
404 c2
= qemu_coroutine_create(rwlock_wrlock_downgrade
, &c2_done
);
405 c3
= qemu_coroutine_create(rwlock_rdlock
, &c3_done
);
406 c4
= qemu_coroutine_create(rwlock_wrlock
, &c4_done
);
408 qemu_coroutine_enter(c1
);
409 qemu_coroutine_enter(c2
);
410 qemu_coroutine_enter(c3
);
411 qemu_coroutine_enter(c4
);
413 qemu_coroutine_enter(c1
);
419 qemu_coroutine_enter(c1
);
425 * Check that creation, enter, and return work
428 static void coroutine_fn
set_and_exit(void *opaque
)
435 static void test_lifecycle(void)
437 Coroutine
*coroutine
;
440 /* Create, enter, and return from coroutine */
441 coroutine
= qemu_coroutine_create(set_and_exit
, &done
);
442 qemu_coroutine_enter(coroutine
);
443 g_assert(done
); /* expect done to be true (first time) */
445 /* Repeat to check that no state affects this test */
447 coroutine
= qemu_coroutine_create(set_and_exit
, &done
);
448 qemu_coroutine_enter(coroutine
);
449 g_assert(done
); /* expect done to be true (second time) */
453 #define RECORD_SIZE 10 /* Leave some room for expansion */
454 struct coroutine_position
{
458 static struct coroutine_position records
[RECORD_SIZE
];
459 static unsigned record_pos
;
461 static void record_push(int func
, int state
)
463 struct coroutine_position
*cp
= &records
[record_pos
++];
464 g_assert_cmpint(record_pos
, <, RECORD_SIZE
);
469 static void coroutine_fn
co_order_test(void *opaque
)
472 g_assert(qemu_in_coroutine());
473 qemu_coroutine_yield();
475 g_assert(qemu_in_coroutine());
478 static void do_order_test(void)
482 co
= qemu_coroutine_create(co_order_test
, NULL
);
484 qemu_coroutine_enter(co
);
486 g_assert(!qemu_in_coroutine());
487 qemu_coroutine_enter(co
);
489 g_assert(!qemu_in_coroutine());
492 static void test_order(void)
495 const struct coroutine_position expected_pos
[] = {
496 {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3}
499 g_assert_cmpint(record_pos
, ==, 5);
500 for (i
= 0; i
< record_pos
; i
++) {
501 g_assert_cmpint(records
[i
].func
, ==, expected_pos
[i
].func
);
502 g_assert_cmpint(records
[i
].state
, ==, expected_pos
[i
].state
);
506 * Lifecycle benchmark
509 static void coroutine_fn
empty_coroutine(void *opaque
)
514 static void perf_lifecycle(void)
516 Coroutine
*coroutine
;
522 g_test_timer_start();
523 for (i
= 0; i
< max
; i
++) {
524 coroutine
= qemu_coroutine_create(empty_coroutine
, NULL
);
525 qemu_coroutine_enter(coroutine
);
527 duration
= g_test_timer_elapsed();
529 g_test_message("Lifecycle %u iterations: %f s", max
, duration
);
532 static void perf_nesting(void)
534 unsigned int i
, maxcycles
, maxnesting
;
541 g_test_timer_start();
542 for (i
= 0; i
< maxcycles
; i
++) {
548 root
= qemu_coroutine_create(nest
, &nd
);
549 qemu_coroutine_enter(root
);
551 duration
= g_test_timer_elapsed();
553 g_test_message("Nesting %u iterations of %u depth each: %f s",
554 maxcycles
, maxnesting
, duration
);
561 static void coroutine_fn
yield_loop(void *opaque
)
563 unsigned int *counter
= opaque
;
565 while ((*counter
) > 0) {
567 qemu_coroutine_yield();
571 static void perf_yield(void)
573 unsigned int i
, maxcycles
;
576 maxcycles
= 100000000;
578 Coroutine
*coroutine
= qemu_coroutine_create(yield_loop
, &i
);
580 g_test_timer_start();
582 qemu_coroutine_enter(coroutine
);
584 duration
= g_test_timer_elapsed();
586 g_test_message("Yield %u iterations: %f s", maxcycles
, duration
);
589 static __attribute__((noinline
)) void dummy(unsigned *i
)
594 static void perf_baseline(void)
596 unsigned int i
, maxcycles
;
599 maxcycles
= 100000000;
602 g_test_timer_start();
606 duration
= g_test_timer_elapsed();
608 g_test_message("Function call %u iterations: %f s", maxcycles
, duration
);
611 static __attribute__((noinline
)) void coroutine_fn
perf_cost_func(void *opaque
)
613 qemu_coroutine_yield();
616 static void perf_cost(void)
618 const unsigned long maxcycles
= 40000000;
624 g_test_timer_start();
625 while (i
++ < maxcycles
) {
626 co
= qemu_coroutine_create(perf_cost_func
, &i
);
627 qemu_coroutine_enter(co
);
628 qemu_coroutine_enter(co
);
630 duration
= g_test_timer_elapsed();
631 ops
= (long)(maxcycles
/ (duration
* 1000));
633 g_test_message("Run operation %lu iterations %f s, %luK operations/s, "
634 "%luns per coroutine",
637 (unsigned long)(1000000000.0 * duration
/ maxcycles
));
640 int main(int argc
, char **argv
)
642 g_test_init(&argc
, &argv
, NULL
);
644 /* This test assumes there is a freelist and marks freed coroutine memory
645 * with a sentinel value. If there is no freelist this would legitimately
648 if (IS_ENABLED(CONFIG_COROUTINE_POOL
)) {
649 g_test_add_func("/basic/no-dangling-access", test_no_dangling_access
);
652 g_test_add_func("/basic/lifecycle", test_lifecycle
);
653 g_test_add_func("/basic/yield", test_yield
);
654 g_test_add_func("/basic/nesting", test_nesting
);
655 g_test_add_func("/basic/self", test_self
);
656 g_test_add_func("/basic/entered", test_entered
);
657 g_test_add_func("/basic/in_coroutine", test_in_coroutine
);
658 g_test_add_func("/basic/order", test_order
);
659 g_test_add_func("/locking/co-mutex", test_co_mutex
);
660 g_test_add_func("/locking/co-mutex/lockable", test_co_mutex_lockable
);
661 g_test_add_func("/locking/co-rwlock/upgrade", test_co_rwlock_upgrade
);
662 g_test_add_func("/locking/co-rwlock/downgrade", test_co_rwlock_downgrade
);
664 g_test_add_func("/perf/lifecycle", perf_lifecycle
);
665 g_test_add_func("/perf/nesting", perf_nesting
);
666 g_test_add_func("/perf/yield", perf_yield
);
667 g_test_add_func("/perf/function-call", perf_baseline
);
668 g_test_add_func("/perf/cost", perf_cost
);