4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
27 #include "main-loop.h"
29 /***********************************************************/
30 /* bottom halves (can be seen as timers which expire ASAP) */
41 QEMUBH
*aio_bh_new(AioContext
*ctx
, QEMUBHFunc
*cb
, void *opaque
)
44 bh
= g_malloc0(sizeof(QEMUBH
));
47 bh
->next
= ctx
->first_bh
;
52 int aio_bh_poll(AioContext
*ctx
)
54 QEMUBH
*bh
, **bhp
, *next
;
60 for (bh
= ctx
->first_bh
; bh
; bh
= next
) {
62 if (!bh
->deleted
&& bh
->scheduled
) {
73 /* remove deleted bhs */
74 if (!ctx
->walking_bh
) {
90 void qemu_bh_schedule_idle(QEMUBH
*bh
)
98 void qemu_bh_schedule(QEMUBH
*bh
)
104 /* stop the currently executing CPU to execute the BH ASAP */
108 void qemu_bh_cancel(QEMUBH
*bh
)
113 void qemu_bh_delete(QEMUBH
*bh
)
119 void aio_bh_update_timeout(AioContext
*ctx
, uint32_t *timeout
)
123 for (bh
= ctx
->first_bh
; bh
; bh
= bh
->next
) {
124 if (!bh
->deleted
&& bh
->scheduled
) {
126 /* idle bottom halves will be polled at least
128 *timeout
= MIN(10, *timeout
);
130 /* non-idle bottom halves will be executed
140 AioContext
*aio_context_new(void)
142 return g_new0(AioContext
, 1);
145 void aio_flush(AioContext
*ctx
)
147 while (aio_poll(ctx
, true));