spapr: only update SDR1 once per-cpu during CAS
[qemu.git] / tests / test-coroutine.c
blobabd97c23c126f6c98db563e9ee11876c0f4a1fb0
1 /*
2 * Coroutine tests
4 * Copyright IBM, Corp. 2011
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/coroutine.h"
16 #include "qemu/coroutine_int.h"
19 * Check that qemu_in_coroutine() works
22 static void coroutine_fn verify_in_coroutine(void *opaque)
24 g_assert(qemu_in_coroutine());
27 static void test_in_coroutine(void)
29 Coroutine *coroutine;
31 g_assert(!qemu_in_coroutine());
33 coroutine = qemu_coroutine_create(verify_in_coroutine, NULL);
34 qemu_coroutine_enter(coroutine);
38 * Check that qemu_coroutine_self() works
41 static void coroutine_fn verify_self(void *opaque)
43 Coroutine **p_co = opaque;
44 g_assert(qemu_coroutine_self() == *p_co);
47 static void test_self(void)
49 Coroutine *coroutine;
51 coroutine = qemu_coroutine_create(verify_self, &coroutine);
52 qemu_coroutine_enter(coroutine);
56 * Check that qemu_coroutine_entered() works
59 static void coroutine_fn verify_entered_step_2(void *opaque)
61 Coroutine *caller = (Coroutine *)opaque;
63 g_assert(qemu_coroutine_entered(caller));
64 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
65 qemu_coroutine_yield();
67 /* Once more to check it still works after yielding */
68 g_assert(qemu_coroutine_entered(caller));
69 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
70 qemu_coroutine_yield();
73 static void coroutine_fn verify_entered_step_1(void *opaque)
75 Coroutine *self = qemu_coroutine_self();
76 Coroutine *coroutine;
78 g_assert(qemu_coroutine_entered(self));
80 coroutine = qemu_coroutine_create(verify_entered_step_2, self);
81 g_assert(!qemu_coroutine_entered(coroutine));
82 qemu_coroutine_enter(coroutine);
83 g_assert(!qemu_coroutine_entered(coroutine));
84 qemu_coroutine_enter(coroutine);
87 static void test_entered(void)
89 Coroutine *coroutine;
91 coroutine = qemu_coroutine_create(verify_entered_step_1, NULL);
92 g_assert(!qemu_coroutine_entered(coroutine));
93 qemu_coroutine_enter(coroutine);
97 * Check that coroutines may nest multiple levels
100 typedef struct {
101 unsigned int n_enter; /* num coroutines entered */
102 unsigned int n_return; /* num coroutines returned */
103 unsigned int max; /* maximum level of nesting */
104 } NestData;
106 static void coroutine_fn nest(void *opaque)
108 NestData *nd = opaque;
110 nd->n_enter++;
112 if (nd->n_enter < nd->max) {
113 Coroutine *child;
115 child = qemu_coroutine_create(nest, nd);
116 qemu_coroutine_enter(child);
119 nd->n_return++;
122 static void test_nesting(void)
124 Coroutine *root;
125 NestData nd = {
126 .n_enter = 0,
127 .n_return = 0,
128 .max = 128,
131 root = qemu_coroutine_create(nest, &nd);
132 qemu_coroutine_enter(root);
134 /* Must enter and return from max nesting level */
135 g_assert_cmpint(nd.n_enter, ==, nd.max);
136 g_assert_cmpint(nd.n_return, ==, nd.max);
140 * Check that yield/enter transfer control correctly
143 static void coroutine_fn yield_5_times(void *opaque)
145 bool *done = opaque;
146 int i;
148 for (i = 0; i < 5; i++) {
149 qemu_coroutine_yield();
151 *done = true;
154 static void test_yield(void)
156 Coroutine *coroutine;
157 bool done = false;
158 int i = -1; /* one extra time to return from coroutine */
160 coroutine = qemu_coroutine_create(yield_5_times, &done);
161 while (!done) {
162 qemu_coroutine_enter(coroutine);
163 i++;
165 g_assert_cmpint(i, ==, 5); /* coroutine must yield 5 times */
168 static void coroutine_fn c2_fn(void *opaque)
170 qemu_coroutine_yield();
173 static void coroutine_fn c1_fn(void *opaque)
175 Coroutine *c2 = opaque;
176 qemu_coroutine_enter(c2);
179 static void test_co_queue(void)
181 Coroutine *c1;
182 Coroutine *c2;
183 Coroutine tmp;
185 c2 = qemu_coroutine_create(c2_fn, NULL);
186 c1 = qemu_coroutine_create(c1_fn, c2);
188 qemu_coroutine_enter(c1);
190 /* c1 shouldn't be used any more now; make sure we segfault if it is */
191 tmp = *c1;
192 memset(c1, 0xff, sizeof(Coroutine));
193 qemu_coroutine_enter(c2);
195 /* Must restore the coroutine now to avoid corrupted pool */
196 *c1 = tmp;
200 * Check that creation, enter, and return work
203 static void coroutine_fn set_and_exit(void *opaque)
205 bool *done = opaque;
207 *done = true;
210 static void test_lifecycle(void)
212 Coroutine *coroutine;
213 bool done = false;
215 /* Create, enter, and return from coroutine */
216 coroutine = qemu_coroutine_create(set_and_exit, &done);
217 qemu_coroutine_enter(coroutine);
218 g_assert(done); /* expect done to be true (first time) */
220 /* Repeat to check that no state affects this test */
221 done = false;
222 coroutine = qemu_coroutine_create(set_and_exit, &done);
223 qemu_coroutine_enter(coroutine);
224 g_assert(done); /* expect done to be true (second time) */
228 #define RECORD_SIZE 10 /* Leave some room for expansion */
229 struct coroutine_position {
230 int func;
231 int state;
233 static struct coroutine_position records[RECORD_SIZE];
234 static unsigned record_pos;
236 static void record_push(int func, int state)
238 struct coroutine_position *cp = &records[record_pos++];
239 g_assert_cmpint(record_pos, <, RECORD_SIZE);
240 cp->func = func;
241 cp->state = state;
244 static void coroutine_fn co_order_test(void *opaque)
246 record_push(2, 1);
247 g_assert(qemu_in_coroutine());
248 qemu_coroutine_yield();
249 record_push(2, 2);
250 g_assert(qemu_in_coroutine());
253 static void do_order_test(void)
255 Coroutine *co;
257 co = qemu_coroutine_create(co_order_test, NULL);
258 record_push(1, 1);
259 qemu_coroutine_enter(co);
260 record_push(1, 2);
261 g_assert(!qemu_in_coroutine());
262 qemu_coroutine_enter(co);
263 record_push(1, 3);
264 g_assert(!qemu_in_coroutine());
267 static void test_order(void)
269 int i;
270 const struct coroutine_position expected_pos[] = {
271 {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3}
273 do_order_test();
274 g_assert_cmpint(record_pos, ==, 5);
275 for (i = 0; i < record_pos; i++) {
276 g_assert_cmpint(records[i].func , ==, expected_pos[i].func );
277 g_assert_cmpint(records[i].state, ==, expected_pos[i].state);
281 * Lifecycle benchmark
284 static void coroutine_fn empty_coroutine(void *opaque)
286 /* Do nothing */
289 static void perf_lifecycle(void)
291 Coroutine *coroutine;
292 unsigned int i, max;
293 double duration;
295 max = 1000000;
297 g_test_timer_start();
298 for (i = 0; i < max; i++) {
299 coroutine = qemu_coroutine_create(empty_coroutine, NULL);
300 qemu_coroutine_enter(coroutine);
302 duration = g_test_timer_elapsed();
304 g_test_message("Lifecycle %u iterations: %f s\n", max, duration);
307 static void perf_nesting(void)
309 unsigned int i, maxcycles, maxnesting;
310 double duration;
312 maxcycles = 10000;
313 maxnesting = 1000;
314 Coroutine *root;
316 g_test_timer_start();
317 for (i = 0; i < maxcycles; i++) {
318 NestData nd = {
319 .n_enter = 0,
320 .n_return = 0,
321 .max = maxnesting,
323 root = qemu_coroutine_create(nest, &nd);
324 qemu_coroutine_enter(root);
326 duration = g_test_timer_elapsed();
328 g_test_message("Nesting %u iterations of %u depth each: %f s\n",
329 maxcycles, maxnesting, duration);
333 * Yield benchmark
336 static void coroutine_fn yield_loop(void *opaque)
338 unsigned int *counter = opaque;
340 while ((*counter) > 0) {
341 (*counter)--;
342 qemu_coroutine_yield();
346 static void perf_yield(void)
348 unsigned int i, maxcycles;
349 double duration;
351 maxcycles = 100000000;
352 i = maxcycles;
353 Coroutine *coroutine = qemu_coroutine_create(yield_loop, &i);
355 g_test_timer_start();
356 while (i > 0) {
357 qemu_coroutine_enter(coroutine);
359 duration = g_test_timer_elapsed();
361 g_test_message("Yield %u iterations: %f s\n",
362 maxcycles, duration);
365 static __attribute__((noinline)) void dummy(unsigned *i)
367 (*i)--;
370 static void perf_baseline(void)
372 unsigned int i, maxcycles;
373 double duration;
375 maxcycles = 100000000;
376 i = maxcycles;
378 g_test_timer_start();
379 while (i > 0) {
380 dummy(&i);
382 duration = g_test_timer_elapsed();
384 g_test_message("Function call %u iterations: %f s\n",
385 maxcycles, duration);
388 static __attribute__((noinline)) void perf_cost_func(void *opaque)
390 qemu_coroutine_yield();
393 static void perf_cost(void)
395 const unsigned long maxcycles = 40000000;
396 unsigned long i = 0;
397 double duration;
398 unsigned long ops;
399 Coroutine *co;
401 g_test_timer_start();
402 while (i++ < maxcycles) {
403 co = qemu_coroutine_create(perf_cost_func, &i);
404 qemu_coroutine_enter(co);
405 qemu_coroutine_enter(co);
407 duration = g_test_timer_elapsed();
408 ops = (long)(maxcycles / (duration * 1000));
410 g_test_message("Run operation %lu iterations %f s, %luK operations/s, "
411 "%luns per coroutine",
412 maxcycles,
413 duration, ops,
414 (unsigned long)(1000000000.0 * duration / maxcycles));
417 int main(int argc, char **argv)
419 g_test_init(&argc, &argv, NULL);
421 /* This test assumes there is a freelist and marks freed coroutine memory
422 * with a sentinel value. If there is no freelist this would legitimately
423 * crash, so skip it.
425 if (CONFIG_COROUTINE_POOL) {
426 g_test_add_func("/basic/co_queue", test_co_queue);
429 g_test_add_func("/basic/lifecycle", test_lifecycle);
430 g_test_add_func("/basic/yield", test_yield);
431 g_test_add_func("/basic/nesting", test_nesting);
432 g_test_add_func("/basic/self", test_self);
433 g_test_add_func("/basic/entered", test_entered);
434 g_test_add_func("/basic/in_coroutine", test_in_coroutine);
435 g_test_add_func("/basic/order", test_order);
436 if (g_test_perf()) {
437 g_test_add_func("/perf/lifecycle", perf_lifecycle);
438 g_test_add_func("/perf/nesting", perf_nesting);
439 g_test_add_func("/perf/yield", perf_yield);
440 g_test_add_func("/perf/function-call", perf_baseline);
441 g_test_add_func("/perf/cost", perf_cost);
443 return g_test_run();