translate-all: use cpu_in_exclusive_work_context() in tb_flush
[qemu/ar7.git] / util / async.c
blobb1fa5319e5bc7830d50108f911398def1133010d
1 /*
2 * Data plane event loop
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2009-2017 QEMU contributors
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include "qapi/error.h"
28 #include "block/aio.h"
29 #include "block/thread-pool.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/atomic.h"
32 #include "block/raw-aio.h"
33 #include "qemu/coroutine_int.h"
34 #include "trace.h"
36 /***********************************************************/
37 /* bottom halves (can be seen as timers which expire ASAP) */
39 struct QEMUBH {
40 AioContext *ctx;
41 QEMUBHFunc *cb;
42 void *opaque;
43 QEMUBH *next;
44 bool scheduled;
45 bool idle;
46 bool deleted;
49 void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
51 QEMUBH *bh;
52 bh = g_new(QEMUBH, 1);
53 *bh = (QEMUBH){
54 .ctx = ctx,
55 .cb = cb,
56 .opaque = opaque,
58 qemu_lockcnt_lock(&ctx->list_lock);
59 bh->next = ctx->first_bh;
60 bh->scheduled = 1;
61 bh->deleted = 1;
62 /* Make sure that the members are ready before putting bh into list */
63 smp_wmb();
64 ctx->first_bh = bh;
65 qemu_lockcnt_unlock(&ctx->list_lock);
66 aio_notify(ctx);
69 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
71 QEMUBH *bh;
72 bh = g_new(QEMUBH, 1);
73 *bh = (QEMUBH){
74 .ctx = ctx,
75 .cb = cb,
76 .opaque = opaque,
78 qemu_lockcnt_lock(&ctx->list_lock);
79 bh->next = ctx->first_bh;
80 /* Make sure that the members are ready before putting bh into list */
81 smp_wmb();
82 ctx->first_bh = bh;
83 qemu_lockcnt_unlock(&ctx->list_lock);
84 return bh;
87 void aio_bh_call(QEMUBH *bh)
89 bh->cb(bh->opaque);
92 /* Multiple occurrences of aio_bh_poll cannot be called concurrently.
93 * The count in ctx->list_lock is incremented before the call, and is
94 * not affected by the call.
96 int aio_bh_poll(AioContext *ctx)
98 QEMUBH *bh, **bhp, *next;
99 int ret;
100 bool deleted = false;
102 ret = 0;
103 for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
104 next = atomic_rcu_read(&bh->next);
105 /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
106 * implicit memory barrier ensures that the callback sees all writes
107 * done by the scheduling thread. It also ensures that the scheduling
108 * thread sees the zero before bh->cb has run, and thus will call
109 * aio_notify again if necessary.
111 if (atomic_xchg(&bh->scheduled, 0)) {
112 /* Idle BHs don't count as progress */
113 if (!bh->idle) {
114 ret = 1;
116 bh->idle = 0;
117 aio_bh_call(bh);
119 if (bh->deleted) {
120 deleted = true;
124 /* remove deleted bhs */
125 if (!deleted) {
126 return ret;
129 if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
130 bhp = &ctx->first_bh;
131 while (*bhp) {
132 bh = *bhp;
133 if (bh->deleted && !bh->scheduled) {
134 *bhp = bh->next;
135 g_free(bh);
136 } else {
137 bhp = &bh->next;
140 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
142 return ret;
145 void qemu_bh_schedule_idle(QEMUBH *bh)
147 bh->idle = 1;
148 /* Make sure that idle & any writes needed by the callback are done
149 * before the locations are read in the aio_bh_poll.
151 atomic_mb_set(&bh->scheduled, 1);
154 void qemu_bh_schedule(QEMUBH *bh)
156 AioContext *ctx;
158 ctx = bh->ctx;
159 bh->idle = 0;
160 /* The memory barrier implicit in atomic_xchg makes sure that:
161 * 1. idle & any writes needed by the callback are done before the
162 * locations are read in the aio_bh_poll.
163 * 2. ctx is loaded before scheduled is set and the callback has a chance
164 * to execute.
166 if (atomic_xchg(&bh->scheduled, 1) == 0) {
167 aio_notify(ctx);
172 /* This func is async.
174 void qemu_bh_cancel(QEMUBH *bh)
176 atomic_mb_set(&bh->scheduled, 0);
179 /* This func is async.The bottom half will do the delete action at the finial
180 * end.
182 void qemu_bh_delete(QEMUBH *bh)
184 bh->scheduled = 0;
185 bh->deleted = 1;
188 int64_t
189 aio_compute_timeout(AioContext *ctx)
191 int64_t deadline;
192 int timeout = -1;
193 QEMUBH *bh;
195 for (bh = atomic_rcu_read(&ctx->first_bh); bh;
196 bh = atomic_rcu_read(&bh->next)) {
197 if (bh->scheduled) {
198 if (bh->idle) {
199 /* idle bottom halves will be polled at least
200 * every 10ms */
201 timeout = 10000000;
202 } else {
203 /* non-idle bottom halves will be executed
204 * immediately */
205 return 0;
210 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
211 if (deadline == 0) {
212 return 0;
213 } else {
214 return qemu_soonest_timeout(timeout, deadline);
218 static gboolean
219 aio_ctx_prepare(GSource *source, gint *timeout)
221 AioContext *ctx = (AioContext *) source;
223 atomic_or(&ctx->notify_me, 1);
225 /* We assume there is no timeout already supplied */
226 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
228 if (aio_prepare(ctx)) {
229 *timeout = 0;
232 return *timeout == 0;
235 static gboolean
236 aio_ctx_check(GSource *source)
238 AioContext *ctx = (AioContext *) source;
239 QEMUBH *bh;
241 atomic_and(&ctx->notify_me, ~1);
242 aio_notify_accept(ctx);
244 for (bh = ctx->first_bh; bh; bh = bh->next) {
245 if (bh->scheduled) {
246 return true;
249 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
252 static gboolean
253 aio_ctx_dispatch(GSource *source,
254 GSourceFunc callback,
255 gpointer user_data)
257 AioContext *ctx = (AioContext *) source;
259 assert(callback == NULL);
260 aio_dispatch(ctx);
261 return true;
264 static void
265 aio_ctx_finalize(GSource *source)
267 AioContext *ctx = (AioContext *) source;
269 thread_pool_free(ctx->thread_pool);
271 #ifdef CONFIG_LINUX_AIO
272 if (ctx->linux_aio) {
273 laio_detach_aio_context(ctx->linux_aio, ctx);
274 laio_cleanup(ctx->linux_aio);
275 ctx->linux_aio = NULL;
277 #endif
279 assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
280 qemu_bh_delete(ctx->co_schedule_bh);
282 qemu_lockcnt_lock(&ctx->list_lock);
283 assert(!qemu_lockcnt_count(&ctx->list_lock));
284 while (ctx->first_bh) {
285 QEMUBH *next = ctx->first_bh->next;
287 /* qemu_bh_delete() must have been called on BHs in this AioContext */
288 assert(ctx->first_bh->deleted);
290 g_free(ctx->first_bh);
291 ctx->first_bh = next;
293 qemu_lockcnt_unlock(&ctx->list_lock);
295 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
296 event_notifier_cleanup(&ctx->notifier);
297 qemu_rec_mutex_destroy(&ctx->lock);
298 qemu_lockcnt_destroy(&ctx->list_lock);
299 timerlistgroup_deinit(&ctx->tlg);
300 aio_context_destroy(ctx);
303 static GSourceFuncs aio_source_funcs = {
304 aio_ctx_prepare,
305 aio_ctx_check,
306 aio_ctx_dispatch,
307 aio_ctx_finalize
310 GSource *aio_get_g_source(AioContext *ctx)
312 g_source_ref(&ctx->source);
313 return &ctx->source;
316 ThreadPool *aio_get_thread_pool(AioContext *ctx)
318 if (!ctx->thread_pool) {
319 ctx->thread_pool = thread_pool_new(ctx);
321 return ctx->thread_pool;
324 #ifdef CONFIG_LINUX_AIO
325 LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
327 if (!ctx->linux_aio) {
328 ctx->linux_aio = laio_init(errp);
329 if (ctx->linux_aio) {
330 laio_attach_aio_context(ctx->linux_aio, ctx);
333 return ctx->linux_aio;
336 LinuxAioState *aio_get_linux_aio(AioContext *ctx)
338 assert(ctx->linux_aio);
339 return ctx->linux_aio;
341 #endif
343 void aio_notify(AioContext *ctx)
345 /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
346 * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
348 smp_mb();
349 if (ctx->notify_me) {
350 event_notifier_set(&ctx->notifier);
351 atomic_mb_set(&ctx->notified, true);
355 void aio_notify_accept(AioContext *ctx)
357 if (atomic_xchg(&ctx->notified, false)
358 #ifdef WIN32
359 || true
360 #endif
362 event_notifier_test_and_clear(&ctx->notifier);
366 static void aio_timerlist_notify(void *opaque, QEMUClockType type)
368 aio_notify(opaque);
371 static void event_notifier_dummy_cb(EventNotifier *e)
375 /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
376 static bool event_notifier_poll(void *opaque)
378 EventNotifier *e = opaque;
379 AioContext *ctx = container_of(e, AioContext, notifier);
381 return atomic_read(&ctx->notified);
384 static void co_schedule_bh_cb(void *opaque)
386 AioContext *ctx = opaque;
387 QSLIST_HEAD(, Coroutine) straight, reversed;
389 QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
390 QSLIST_INIT(&straight);
392 while (!QSLIST_EMPTY(&reversed)) {
393 Coroutine *co = QSLIST_FIRST(&reversed);
394 QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
395 QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
398 while (!QSLIST_EMPTY(&straight)) {
399 Coroutine *co = QSLIST_FIRST(&straight);
400 QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
401 trace_aio_co_schedule_bh_cb(ctx, co);
402 aio_context_acquire(ctx);
404 /* Protected by write barrier in qemu_aio_coroutine_enter */
405 atomic_set(&co->scheduled, NULL);
406 qemu_aio_coroutine_enter(ctx, co);
407 aio_context_release(ctx);
411 AioContext *aio_context_new(Error **errp)
413 int ret;
414 AioContext *ctx;
416 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
417 aio_context_setup(ctx);
419 ret = event_notifier_init(&ctx->notifier, false);
420 if (ret < 0) {
421 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
422 goto fail;
424 g_source_set_can_recurse(&ctx->source, true);
425 qemu_lockcnt_init(&ctx->list_lock);
427 ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
428 QSLIST_INIT(&ctx->scheduled_coroutines);
430 aio_set_event_notifier(ctx, &ctx->notifier,
431 false,
432 event_notifier_dummy_cb,
433 event_notifier_poll);
434 #ifdef CONFIG_LINUX_AIO
435 ctx->linux_aio = NULL;
436 #endif
437 ctx->thread_pool = NULL;
438 qemu_rec_mutex_init(&ctx->lock);
439 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
441 ctx->poll_ns = 0;
442 ctx->poll_max_ns = 0;
443 ctx->poll_grow = 0;
444 ctx->poll_shrink = 0;
446 return ctx;
447 fail:
448 g_source_destroy(&ctx->source);
449 return NULL;
452 void aio_co_schedule(AioContext *ctx, Coroutine *co)
454 trace_aio_co_schedule(ctx, co);
455 const char *scheduled = atomic_cmpxchg(&co->scheduled, NULL,
456 __func__);
458 if (scheduled) {
459 fprintf(stderr,
460 "%s: Co-routine was already scheduled in '%s'\n",
461 __func__, scheduled);
462 abort();
465 /* The coroutine might run and release the last ctx reference before we
466 * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until
467 * we're done.
469 aio_context_ref(ctx);
471 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
472 co, co_scheduled_next);
473 qemu_bh_schedule(ctx->co_schedule_bh);
475 aio_context_unref(ctx);
478 void aio_co_wake(struct Coroutine *co)
480 AioContext *ctx;
482 /* Read coroutine before co->ctx. Matches smp_wmb in
483 * qemu_coroutine_enter.
485 smp_read_barrier_depends();
486 ctx = atomic_read(&co->ctx);
488 aio_co_enter(ctx, co);
491 void aio_co_enter(AioContext *ctx, struct Coroutine *co)
493 if (ctx != qemu_get_current_aio_context()) {
494 aio_co_schedule(ctx, co);
495 return;
498 if (qemu_in_coroutine()) {
499 Coroutine *self = qemu_coroutine_self();
500 assert(self != co);
501 QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
502 } else {
503 aio_context_acquire(ctx);
504 qemu_aio_coroutine_enter(ctx, co);
505 aio_context_release(ctx);
509 void aio_context_ref(AioContext *ctx)
511 g_source_ref(&ctx->source);
514 void aio_context_unref(AioContext *ctx)
516 g_source_unref(&ctx->source);
519 void aio_context_acquire(AioContext *ctx)
521 qemu_rec_mutex_lock(&ctx->lock);
524 void aio_context_release(AioContext *ctx)
526 qemu_rec_mutex_unlock(&ctx->lock);