4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
27 #include "block/aio.h"
28 #include "block/thread-pool.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/atomic.h"
32 /***********************************************************/
33 /* bottom halves (can be seen as timers which expire ASAP) */
45 QEMUBH
*aio_bh_new(AioContext
*ctx
, QEMUBHFunc
*cb
, void *opaque
)
48 bh
= g_new(QEMUBH
, 1);
54 qemu_mutex_lock(&ctx
->bh_lock
);
55 bh
->next
= ctx
->first_bh
;
56 /* Make sure that the members are ready before putting bh into list */
59 qemu_mutex_unlock(&ctx
->bh_lock
);
63 void aio_bh_call(QEMUBH
*bh
)
68 /* Multiple occurrences of aio_bh_poll cannot be called concurrently */
69 int aio_bh_poll(AioContext
*ctx
)
71 QEMUBH
*bh
, **bhp
, *next
;
77 for (bh
= ctx
->first_bh
; bh
; bh
= next
) {
78 /* Make sure that fetching bh happens before accessing its members */
79 smp_read_barrier_depends();
81 /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
82 * implicit memory barrier ensures that the callback sees all writes
83 * done by the scheduling thread. It also ensures that the scheduling
84 * thread sees the zero before bh->cb has run, and thus will call
85 * aio_notify again if necessary.
87 if (!bh
->deleted
&& atomic_xchg(&bh
->scheduled
, 0)) {
88 /* Idle BHs and the notify BH don't count as progress */
89 if (!bh
->idle
&& bh
!= ctx
->notify_dummy_bh
) {
99 /* remove deleted bhs */
100 if (!ctx
->walking_bh
) {
101 qemu_mutex_lock(&ctx
->bh_lock
);
102 bhp
= &ctx
->first_bh
;
112 qemu_mutex_unlock(&ctx
->bh_lock
);
118 void qemu_bh_schedule_idle(QEMUBH
*bh
)
121 /* Make sure that idle & any writes needed by the callback are done
122 * before the locations are read in the aio_bh_poll.
124 atomic_mb_set(&bh
->scheduled
, 1);
127 void qemu_bh_schedule(QEMUBH
*bh
)
133 /* The memory barrier implicit in atomic_xchg makes sure that:
134 * 1. idle & any writes needed by the callback are done before the
135 * locations are read in the aio_bh_poll.
136 * 2. ctx is loaded before scheduled is set and the callback has a chance
139 if (atomic_xchg(&bh
->scheduled
, 1) == 0) {
145 /* This func is async.
147 void qemu_bh_cancel(QEMUBH
*bh
)
152 /* This func is async.The bottom half will do the delete action at the finial
155 void qemu_bh_delete(QEMUBH
*bh
)
162 aio_compute_timeout(AioContext
*ctx
)
168 for (bh
= ctx
->first_bh
; bh
; bh
= bh
->next
) {
169 if (!bh
->deleted
&& bh
->scheduled
) {
171 /* idle bottom halves will be polled at least
175 /* non-idle bottom halves will be executed
182 deadline
= timerlistgroup_deadline_ns(&ctx
->tlg
);
186 return qemu_soonest_timeout(timeout
, deadline
);
191 aio_ctx_prepare(GSource
*source
, gint
*timeout
)
193 AioContext
*ctx
= (AioContext
*) source
;
195 atomic_or(&ctx
->notify_me
, 1);
197 /* We assume there is no timeout already supplied */
198 *timeout
= qemu_timeout_ns_to_ms(aio_compute_timeout(ctx
));
200 if (aio_prepare(ctx
)) {
204 return *timeout
== 0;
208 aio_ctx_check(GSource
*source
)
210 AioContext
*ctx
= (AioContext
*) source
;
213 atomic_and(&ctx
->notify_me
, ~1);
214 aio_notify_accept(ctx
);
216 for (bh
= ctx
->first_bh
; bh
; bh
= bh
->next
) {
217 if (!bh
->deleted
&& bh
->scheduled
) {
221 return aio_pending(ctx
) || (timerlistgroup_deadline_ns(&ctx
->tlg
) == 0);
225 aio_ctx_dispatch(GSource
*source
,
226 GSourceFunc callback
,
229 AioContext
*ctx
= (AioContext
*) source
;
231 assert(callback
== NULL
);
237 aio_ctx_finalize(GSource
*source
)
239 AioContext
*ctx
= (AioContext
*) source
;
241 qemu_bh_delete(ctx
->notify_dummy_bh
);
242 thread_pool_free(ctx
->thread_pool
);
244 qemu_mutex_lock(&ctx
->bh_lock
);
245 while (ctx
->first_bh
) {
246 QEMUBH
*next
= ctx
->first_bh
->next
;
248 /* qemu_bh_delete() must have been called on BHs in this AioContext */
249 assert(ctx
->first_bh
->deleted
);
251 g_free(ctx
->first_bh
);
252 ctx
->first_bh
= next
;
254 qemu_mutex_unlock(&ctx
->bh_lock
);
256 aio_set_event_notifier(ctx
, &ctx
->notifier
, false, NULL
);
257 event_notifier_cleanup(&ctx
->notifier
);
258 rfifolock_destroy(&ctx
->lock
);
259 qemu_mutex_destroy(&ctx
->bh_lock
);
260 timerlistgroup_deinit(&ctx
->tlg
);
263 static GSourceFuncs aio_source_funcs
= {
270 GSource
*aio_get_g_source(AioContext
*ctx
)
272 g_source_ref(&ctx
->source
);
276 ThreadPool
*aio_get_thread_pool(AioContext
*ctx
)
278 if (!ctx
->thread_pool
) {
279 ctx
->thread_pool
= thread_pool_new(ctx
);
281 return ctx
->thread_pool
;
284 void aio_notify(AioContext
*ctx
)
286 /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
287 * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
290 if (ctx
->notify_me
) {
291 event_notifier_set(&ctx
->notifier
);
292 atomic_mb_set(&ctx
->notified
, true);
296 void aio_notify_accept(AioContext
*ctx
)
298 if (atomic_xchg(&ctx
->notified
, false)) {
299 event_notifier_test_and_clear(&ctx
->notifier
);
303 static void aio_timerlist_notify(void *opaque
)
308 static void aio_rfifolock_cb(void *opaque
)
310 AioContext
*ctx
= opaque
;
312 /* Kick owner thread in case they are blocked in aio_poll() */
313 qemu_bh_schedule(ctx
->notify_dummy_bh
);
316 static void notify_dummy_bh(void *opaque
)
318 /* Do nothing, we were invoked just to force the event loop to iterate */
321 static void event_notifier_dummy_cb(EventNotifier
*e
)
325 AioContext
*aio_context_new(Error
**errp
)
329 Error
*local_err
= NULL
;
331 ctx
= (AioContext
*) g_source_new(&aio_source_funcs
, sizeof(AioContext
));
332 aio_context_setup(ctx
, &local_err
);
334 error_propagate(errp
, local_err
);
337 ret
= event_notifier_init(&ctx
->notifier
, false);
339 error_setg_errno(errp
, -ret
, "Failed to initialize event notifier");
342 g_source_set_can_recurse(&ctx
->source
, true);
343 aio_set_event_notifier(ctx
, &ctx
->notifier
,
345 (EventNotifierHandler
*)
346 event_notifier_dummy_cb
);
347 ctx
->thread_pool
= NULL
;
348 qemu_mutex_init(&ctx
->bh_lock
);
349 rfifolock_init(&ctx
->lock
, aio_rfifolock_cb
, ctx
);
350 timerlistgroup_init(&ctx
->tlg
, aio_timerlist_notify
, ctx
);
352 ctx
->notify_dummy_bh
= aio_bh_new(ctx
, notify_dummy_bh
, NULL
);
356 g_source_destroy(&ctx
->source
);
360 void aio_context_ref(AioContext
*ctx
)
362 g_source_ref(&ctx
->source
);
365 void aio_context_unref(AioContext
*ctx
)
367 g_source_unref(&ctx
->source
);
370 void aio_context_acquire(AioContext
*ctx
)
372 rfifolock_lock(&ctx
->lock
);
375 void aio_context_release(AioContext
*ctx
)
377 rfifolock_unlock(&ctx
->lock
);