4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
26 #include "block/aio.h"
27 #include "block/thread-pool.h"
28 #include "qemu/main-loop.h"
29 #include "qemu/atomic.h"
31 /***********************************************************/
32 /* bottom halves (can be seen as timers which expire ASAP) */
44 QEMUBH
*aio_bh_new(AioContext
*ctx
, QEMUBHFunc
*cb
, void *opaque
)
47 bh
= g_new(QEMUBH
, 1);
53 qemu_mutex_lock(&ctx
->bh_lock
);
54 bh
->next
= ctx
->first_bh
;
55 /* Make sure that the members are ready before putting bh into list */
58 qemu_mutex_unlock(&ctx
->bh_lock
);
62 void aio_bh_call(QEMUBH
*bh
)
67 /* Multiple occurrences of aio_bh_poll cannot be called concurrently */
68 int aio_bh_poll(AioContext
*ctx
)
70 QEMUBH
*bh
, **bhp
, *next
;
76 for (bh
= ctx
->first_bh
; bh
; bh
= next
) {
77 /* Make sure that fetching bh happens before accessing its members */
78 smp_read_barrier_depends();
80 /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
81 * implicit memory barrier ensures that the callback sees all writes
82 * done by the scheduling thread. It also ensures that the scheduling
83 * thread sees the zero before bh->cb has run, and thus will call
84 * aio_notify again if necessary.
86 if (!bh
->deleted
&& atomic_xchg(&bh
->scheduled
, 0)) {
87 /* Idle BHs and the notify BH don't count as progress */
88 if (!bh
->idle
&& bh
!= ctx
->notify_dummy_bh
) {
98 /* remove deleted bhs */
99 if (!ctx
->walking_bh
) {
100 qemu_mutex_lock(&ctx
->bh_lock
);
101 bhp
= &ctx
->first_bh
;
111 qemu_mutex_unlock(&ctx
->bh_lock
);
117 void qemu_bh_schedule_idle(QEMUBH
*bh
)
120 /* Make sure that idle & any writes needed by the callback are done
121 * before the locations are read in the aio_bh_poll.
123 atomic_mb_set(&bh
->scheduled
, 1);
126 void qemu_bh_schedule(QEMUBH
*bh
)
132 /* The memory barrier implicit in atomic_xchg makes sure that:
133 * 1. idle & any writes needed by the callback are done before the
134 * locations are read in the aio_bh_poll.
135 * 2. ctx is loaded before scheduled is set and the callback has a chance
138 if (atomic_xchg(&bh
->scheduled
, 1) == 0) {
144 /* This func is async.
146 void qemu_bh_cancel(QEMUBH
*bh
)
151 /* This func is async.The bottom half will do the delete action at the finial
154 void qemu_bh_delete(QEMUBH
*bh
)
161 aio_compute_timeout(AioContext
*ctx
)
167 for (bh
= ctx
->first_bh
; bh
; bh
= bh
->next
) {
168 if (!bh
->deleted
&& bh
->scheduled
) {
170 /* idle bottom halves will be polled at least
174 /* non-idle bottom halves will be executed
181 deadline
= timerlistgroup_deadline_ns(&ctx
->tlg
);
185 return qemu_soonest_timeout(timeout
, deadline
);
190 aio_ctx_prepare(GSource
*source
, gint
*timeout
)
192 AioContext
*ctx
= (AioContext
*) source
;
194 atomic_or(&ctx
->notify_me
, 1);
196 /* We assume there is no timeout already supplied */
197 *timeout
= qemu_timeout_ns_to_ms(aio_compute_timeout(ctx
));
199 if (aio_prepare(ctx
)) {
203 return *timeout
== 0;
207 aio_ctx_check(GSource
*source
)
209 AioContext
*ctx
= (AioContext
*) source
;
212 atomic_and(&ctx
->notify_me
, ~1);
213 aio_notify_accept(ctx
);
215 for (bh
= ctx
->first_bh
; bh
; bh
= bh
->next
) {
216 if (!bh
->deleted
&& bh
->scheduled
) {
220 return aio_pending(ctx
) || (timerlistgroup_deadline_ns(&ctx
->tlg
) == 0);
224 aio_ctx_dispatch(GSource
*source
,
225 GSourceFunc callback
,
228 AioContext
*ctx
= (AioContext
*) source
;
230 assert(callback
== NULL
);
236 aio_ctx_finalize(GSource
*source
)
238 AioContext
*ctx
= (AioContext
*) source
;
240 qemu_bh_delete(ctx
->notify_dummy_bh
);
241 thread_pool_free(ctx
->thread_pool
);
243 qemu_mutex_lock(&ctx
->bh_lock
);
244 while (ctx
->first_bh
) {
245 QEMUBH
*next
= ctx
->first_bh
->next
;
247 /* qemu_bh_delete() must have been called on BHs in this AioContext */
248 assert(ctx
->first_bh
->deleted
);
250 g_free(ctx
->first_bh
);
251 ctx
->first_bh
= next
;
253 qemu_mutex_unlock(&ctx
->bh_lock
);
255 aio_set_event_notifier(ctx
, &ctx
->notifier
, false, NULL
);
256 event_notifier_cleanup(&ctx
->notifier
);
257 rfifolock_destroy(&ctx
->lock
);
258 qemu_mutex_destroy(&ctx
->bh_lock
);
259 timerlistgroup_deinit(&ctx
->tlg
);
262 static GSourceFuncs aio_source_funcs
= {
269 GSource
*aio_get_g_source(AioContext
*ctx
)
271 g_source_ref(&ctx
->source
);
275 ThreadPool
*aio_get_thread_pool(AioContext
*ctx
)
277 if (!ctx
->thread_pool
) {
278 ctx
->thread_pool
= thread_pool_new(ctx
);
280 return ctx
->thread_pool
;
283 void aio_notify(AioContext
*ctx
)
285 /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
286 * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
289 if (ctx
->notify_me
) {
290 event_notifier_set(&ctx
->notifier
);
291 atomic_mb_set(&ctx
->notified
, true);
295 void aio_notify_accept(AioContext
*ctx
)
297 if (atomic_xchg(&ctx
->notified
, false)) {
298 event_notifier_test_and_clear(&ctx
->notifier
);
302 static void aio_timerlist_notify(void *opaque
)
307 static void aio_rfifolock_cb(void *opaque
)
309 AioContext
*ctx
= opaque
;
311 /* Kick owner thread in case they are blocked in aio_poll() */
312 qemu_bh_schedule(ctx
->notify_dummy_bh
);
315 static void notify_dummy_bh(void *opaque
)
317 /* Do nothing, we were invoked just to force the event loop to iterate */
320 static void event_notifier_dummy_cb(EventNotifier
*e
)
324 AioContext
*aio_context_new(Error
**errp
)
328 Error
*local_err
= NULL
;
330 ctx
= (AioContext
*) g_source_new(&aio_source_funcs
, sizeof(AioContext
));
331 aio_context_setup(ctx
, &local_err
);
333 error_propagate(errp
, local_err
);
336 ret
= event_notifier_init(&ctx
->notifier
, false);
338 error_setg_errno(errp
, -ret
, "Failed to initialize event notifier");
341 g_source_set_can_recurse(&ctx
->source
, true);
342 aio_set_event_notifier(ctx
, &ctx
->notifier
,
344 (EventNotifierHandler
*)
345 event_notifier_dummy_cb
);
346 ctx
->thread_pool
= NULL
;
347 qemu_mutex_init(&ctx
->bh_lock
);
348 rfifolock_init(&ctx
->lock
, aio_rfifolock_cb
, ctx
);
349 timerlistgroup_init(&ctx
->tlg
, aio_timerlist_notify
, ctx
);
351 ctx
->notify_dummy_bh
= aio_bh_new(ctx
, notify_dummy_bh
, NULL
);
355 g_source_destroy(&ctx
->source
);
359 void aio_context_ref(AioContext
*ctx
)
361 g_source_ref(&ctx
->source
);
364 void aio_context_unref(AioContext
*ctx
)
366 g_source_unref(&ctx
->source
);
369 void aio_context_acquire(AioContext
*ctx
)
371 rfifolock_lock(&ctx
->lock
);
374 void aio_context_release(AioContext
*ctx
)
376 rfifolock_unlock(&ctx
->lock
);