4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
26 #include "block/aio.h"
27 #include "block/thread-pool.h"
28 #include "qemu/main-loop.h"
29 #include "qemu/atomic.h"
31 /***********************************************************/
32 /* bottom halves (can be seen as timers which expire ASAP) */
44 QEMUBH
*aio_bh_new(AioContext
*ctx
, QEMUBHFunc
*cb
, void *opaque
)
47 bh
= g_malloc0(sizeof(QEMUBH
));
51 qemu_mutex_lock(&ctx
->bh_lock
);
52 bh
->next
= ctx
->first_bh
;
53 /* Make sure that the members are ready before putting bh into list */
56 qemu_mutex_unlock(&ctx
->bh_lock
);
60 /* Multiple occurrences of aio_bh_poll cannot be called concurrently */
61 int aio_bh_poll(AioContext
*ctx
)
63 QEMUBH
*bh
, **bhp
, *next
;
69 for (bh
= ctx
->first_bh
; bh
; bh
= next
) {
70 /* Make sure that fetching bh happens before accessing its members */
71 smp_read_barrier_depends();
73 if (!bh
->deleted
&& bh
->scheduled
) {
75 /* Paired with write barrier in bh schedule to ensure reading for
76 * idle & callbacks coming after bh's scheduling.
88 /* remove deleted bhs */
89 if (!ctx
->walking_bh
) {
90 qemu_mutex_lock(&ctx
->bh_lock
);
101 qemu_mutex_unlock(&ctx
->bh_lock
);
107 void qemu_bh_schedule_idle(QEMUBH
*bh
)
112 /* Make sure that idle & any writes needed by the callback are done
113 * before the locations are read in the aio_bh_poll.
119 void qemu_bh_schedule(QEMUBH
*bh
)
128 * 1. idle & any writes needed by the callback are done before the
129 * locations are read in the aio_bh_poll.
130 * 2. ctx is loaded before scheduled is set and the callback has a chance
139 /* This func is async.
141 void qemu_bh_cancel(QEMUBH
*bh
)
146 /* This func is async.The bottom half will do the delete action at the finial
149 void qemu_bh_delete(QEMUBH
*bh
)
156 aio_compute_timeout(AioContext
*ctx
)
162 for (bh
= ctx
->first_bh
; bh
; bh
= bh
->next
) {
163 if (!bh
->deleted
&& bh
->scheduled
) {
165 /* idle bottom halves will be polled at least
169 /* non-idle bottom halves will be executed
176 deadline
= timerlistgroup_deadline_ns(&ctx
->tlg
);
180 return qemu_soonest_timeout(timeout
, deadline
);
185 aio_ctx_prepare(GSource
*source
, gint
*timeout
)
187 AioContext
*ctx
= (AioContext
*) source
;
189 /* We assume there is no timeout already supplied */
190 *timeout
= qemu_timeout_ns_to_ms(aio_compute_timeout(ctx
));
191 return *timeout
== 0;
195 aio_ctx_check(GSource
*source
)
197 AioContext
*ctx
= (AioContext
*) source
;
200 for (bh
= ctx
->first_bh
; bh
; bh
= bh
->next
) {
201 if (!bh
->deleted
&& bh
->scheduled
) {
205 return aio_pending(ctx
) || (timerlistgroup_deadline_ns(&ctx
->tlg
) == 0);
209 aio_ctx_dispatch(GSource
*source
,
210 GSourceFunc callback
,
213 AioContext
*ctx
= (AioContext
*) source
;
215 assert(callback
== NULL
);
216 aio_poll(ctx
, false);
221 aio_ctx_finalize(GSource
*source
)
223 AioContext
*ctx
= (AioContext
*) source
;
225 thread_pool_free(ctx
->thread_pool
);
226 aio_set_event_notifier(ctx
, &ctx
->notifier
, NULL
);
227 event_notifier_cleanup(&ctx
->notifier
);
228 rfifolock_destroy(&ctx
->lock
);
229 qemu_mutex_destroy(&ctx
->bh_lock
);
230 g_array_free(ctx
->pollfds
, TRUE
);
231 timerlistgroup_deinit(&ctx
->tlg
);
234 static GSourceFuncs aio_source_funcs
= {
241 GSource
*aio_get_g_source(AioContext
*ctx
)
243 g_source_ref(&ctx
->source
);
247 ThreadPool
*aio_get_thread_pool(AioContext
*ctx
)
249 if (!ctx
->thread_pool
) {
250 ctx
->thread_pool
= thread_pool_new(ctx
);
252 return ctx
->thread_pool
;
255 void aio_set_dispatching(AioContext
*ctx
, bool dispatching
)
257 ctx
->dispatching
= dispatching
;
259 /* Write ctx->dispatching before reading e.g. bh->scheduled.
260 * Optimization: this is only needed when we're entering the "unsafe"
261 * phase where other threads must call event_notifier_set.
267 void aio_notify(AioContext
*ctx
)
269 /* Write e.g. bh->scheduled before reading ctx->dispatching. */
271 if (!ctx
->dispatching
) {
272 event_notifier_set(&ctx
->notifier
);
276 static void aio_timerlist_notify(void *opaque
)
281 static void aio_rfifolock_cb(void *opaque
)
283 /* Kick owner thread in case they are blocked in aio_poll() */
287 AioContext
*aio_context_new(void)
290 ctx
= (AioContext
*) g_source_new(&aio_source_funcs
, sizeof(AioContext
));
291 ctx
->pollfds
= g_array_new(FALSE
, FALSE
, sizeof(GPollFD
));
292 ctx
->thread_pool
= NULL
;
293 qemu_mutex_init(&ctx
->bh_lock
);
294 rfifolock_init(&ctx
->lock
, aio_rfifolock_cb
, ctx
);
295 event_notifier_init(&ctx
->notifier
, false);
296 aio_set_event_notifier(ctx
, &ctx
->notifier
,
297 (EventNotifierHandler
*)
298 event_notifier_test_and_clear
);
299 timerlistgroup_init(&ctx
->tlg
, aio_timerlist_notify
, ctx
);
304 void aio_context_ref(AioContext
*ctx
)
306 g_source_ref(&ctx
->source
);
309 void aio_context_unref(AioContext
*ctx
)
311 g_source_unref(&ctx
->source
);
314 void aio_context_acquire(AioContext
*ctx
)
316 rfifolock_lock(&ctx
->lock
);
319 void aio_context_release(AioContext
*ctx
)
321 rfifolock_unlock(&ctx
->lock
);