parallels: dirty BAT properly for continuous allocations
[qemu/cris-port.git] / main-loop.c
blobdf28670606d6bb5cf325bdc868e2579f6ad7c1c6
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu-common.h"
26 #include "qemu/timer.h"
27 #include "qemu/sockets.h" // struct in_addr needed for libslirp.h
28 #include "sysemu/qtest.h"
29 #include "slirp/libslirp.h"
30 #include "qemu/main-loop.h"
31 #include "block/aio.h"
33 #ifndef _WIN32
35 #include "qemu/compatfd.h"
37 /* If we have signalfd, we mask out the signals we want to handle and then
38 * use signalfd to listen for them. We rely on whatever the current signal
39 * handler is to dispatch the signals when we receive them.
41 static void sigfd_handler(void *opaque)
43 int fd = (intptr_t)opaque;
44 struct qemu_signalfd_siginfo info;
45 struct sigaction action;
46 ssize_t len;
48 while (1) {
49 do {
50 len = read(fd, &info, sizeof(info));
51 } while (len == -1 && errno == EINTR);
53 if (len == -1 && errno == EAGAIN) {
54 break;
57 if (len != sizeof(info)) {
58 printf("read from sigfd returned %zd: %m\n", len);
59 return;
62 sigaction(info.ssi_signo, NULL, &action);
63 if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
64 action.sa_sigaction(info.ssi_signo,
65 (siginfo_t *)&info, NULL);
66 } else if (action.sa_handler) {
67 action.sa_handler(info.ssi_signo);
72 static int qemu_signal_init(void)
74 int sigfd;
75 sigset_t set;
78 * SIG_IPI must be blocked in the main thread and must not be caught
79 * by sigwait() in the signal thread. Otherwise, the cpu thread will
80 * not catch it reliably.
82 sigemptyset(&set);
83 sigaddset(&set, SIG_IPI);
84 sigaddset(&set, SIGIO);
85 sigaddset(&set, SIGALRM);
86 sigaddset(&set, SIGBUS);
87 /* SIGINT cannot be handled via signalfd, so that ^C can be used
88 * to interrupt QEMU when it is being run under gdb. SIGHUP and
89 * SIGTERM are also handled asynchronously, even though it is not
90 * strictly necessary, because they use the same handler as SIGINT.
92 pthread_sigmask(SIG_BLOCK, &set, NULL);
94 sigdelset(&set, SIG_IPI);
95 sigfd = qemu_signalfd(&set);
96 if (sigfd == -1) {
97 fprintf(stderr, "failed to create signalfd\n");
98 return -errno;
101 fcntl_setfl(sigfd, O_NONBLOCK);
103 qemu_set_fd_handler(sigfd, sigfd_handler, NULL, (void *)(intptr_t)sigfd);
105 return 0;
108 #else /* _WIN32 */
110 static int qemu_signal_init(void)
112 return 0;
114 #endif
116 static AioContext *qemu_aio_context;
117 static QEMUBH *qemu_notify_bh;
119 static void notify_event_cb(void *opaque)
121 /* No need to do anything; this bottom half is only used to
122 * kick the kernel out of ppoll/poll/WaitForMultipleObjects.
126 AioContext *qemu_get_aio_context(void)
128 return qemu_aio_context;
131 void qemu_notify_event(void)
133 if (!qemu_aio_context) {
134 return;
136 qemu_bh_schedule(qemu_notify_bh);
139 static GArray *gpollfds;
141 int qemu_init_main_loop(Error **errp)
143 int ret;
144 GSource *src;
145 Error *local_error = NULL;
147 init_clocks();
149 ret = qemu_signal_init();
150 if (ret) {
151 return ret;
154 qemu_aio_context = aio_context_new(&local_error);
155 qemu_notify_bh = qemu_bh_new(notify_event_cb, NULL);
156 if (!qemu_aio_context) {
157 error_propagate(errp, local_error);
158 return -EMFILE;
160 gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
161 src = aio_get_g_source(qemu_aio_context);
162 g_source_attach(src, NULL);
163 g_source_unref(src);
164 src = iohandler_get_g_source();
165 g_source_attach(src, NULL);
166 g_source_unref(src);
167 return 0;
170 static int max_priority;
172 #ifndef _WIN32
173 static int glib_pollfds_idx;
174 static int glib_n_poll_fds;
176 static void glib_pollfds_fill(int64_t *cur_timeout)
178 GMainContext *context = g_main_context_default();
179 int timeout = 0;
180 int64_t timeout_ns;
181 int n;
183 g_main_context_prepare(context, &max_priority);
185 glib_pollfds_idx = gpollfds->len;
186 n = glib_n_poll_fds;
187 do {
188 GPollFD *pfds;
189 glib_n_poll_fds = n;
190 g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds);
191 pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
192 n = g_main_context_query(context, max_priority, &timeout, pfds,
193 glib_n_poll_fds);
194 } while (n != glib_n_poll_fds);
196 if (timeout < 0) {
197 timeout_ns = -1;
198 } else {
199 timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
202 *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
205 static void glib_pollfds_poll(void)
207 GMainContext *context = g_main_context_default();
208 GPollFD *pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
210 if (g_main_context_check(context, max_priority, pfds, glib_n_poll_fds)) {
211 g_main_context_dispatch(context);
215 #define MAX_MAIN_LOOP_SPIN (1000)
217 static int os_host_main_loop_wait(int64_t timeout)
219 int ret;
220 static int spin_counter;
222 glib_pollfds_fill(&timeout);
224 /* If the I/O thread is very busy or we are incorrectly busy waiting in
225 * the I/O thread, this can lead to starvation of the BQL such that the
226 * VCPU threads never run. To make sure we can detect the later case,
227 * print a message to the screen. If we run into this condition, create
228 * a fake timeout in order to give the VCPU threads a chance to run.
230 if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) {
231 static bool notified;
233 if (!notified && !qtest_enabled()) {
234 fprintf(stderr,
235 "main-loop: WARNING: I/O thread spun for %d iterations\n",
236 MAX_MAIN_LOOP_SPIN);
237 notified = true;
240 timeout = SCALE_MS;
243 if (timeout) {
244 spin_counter = 0;
245 qemu_mutex_unlock_iothread();
246 } else {
247 spin_counter++;
250 ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
252 if (timeout) {
253 qemu_mutex_lock_iothread();
256 glib_pollfds_poll();
257 return ret;
259 #else
260 /***********************************************************/
261 /* Polling handling */
263 typedef struct PollingEntry {
264 PollingFunc *func;
265 void *opaque;
266 struct PollingEntry *next;
267 } PollingEntry;
269 static PollingEntry *first_polling_entry;
271 int qemu_add_polling_cb(PollingFunc *func, void *opaque)
273 PollingEntry **ppe, *pe;
274 pe = g_malloc0(sizeof(PollingEntry));
275 pe->func = func;
276 pe->opaque = opaque;
277 for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
278 *ppe = pe;
279 return 0;
282 void qemu_del_polling_cb(PollingFunc *func, void *opaque)
284 PollingEntry **ppe, *pe;
285 for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
286 pe = *ppe;
287 if (pe->func == func && pe->opaque == opaque) {
288 *ppe = pe->next;
289 g_free(pe);
290 break;
295 /***********************************************************/
296 /* Wait objects support */
297 typedef struct WaitObjects {
298 int num;
299 int revents[MAXIMUM_WAIT_OBJECTS + 1];
300 HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
301 WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1];
302 void *opaque[MAXIMUM_WAIT_OBJECTS + 1];
303 } WaitObjects;
305 static WaitObjects wait_objects = {0};
307 int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
309 WaitObjects *w = &wait_objects;
310 if (w->num >= MAXIMUM_WAIT_OBJECTS) {
311 return -1;
313 w->events[w->num] = handle;
314 w->func[w->num] = func;
315 w->opaque[w->num] = opaque;
316 w->revents[w->num] = 0;
317 w->num++;
318 return 0;
321 void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
323 int i, found;
324 WaitObjects *w = &wait_objects;
326 found = 0;
327 for (i = 0; i < w->num; i++) {
328 if (w->events[i] == handle) {
329 found = 1;
331 if (found) {
332 w->events[i] = w->events[i + 1];
333 w->func[i] = w->func[i + 1];
334 w->opaque[i] = w->opaque[i + 1];
335 w->revents[i] = w->revents[i + 1];
338 if (found) {
339 w->num--;
343 void qemu_fd_register(int fd)
345 WSAEventSelect(fd, event_notifier_get_handle(&qemu_aio_context->notifier),
346 FD_READ | FD_ACCEPT | FD_CLOSE |
347 FD_CONNECT | FD_WRITE | FD_OOB);
350 static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds,
351 fd_set *xfds)
353 int nfds = -1;
354 int i;
356 for (i = 0; i < pollfds->len; i++) {
357 GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
358 int fd = pfd->fd;
359 int events = pfd->events;
360 if (events & G_IO_IN) {
361 FD_SET(fd, rfds);
362 nfds = MAX(nfds, fd);
364 if (events & G_IO_OUT) {
365 FD_SET(fd, wfds);
366 nfds = MAX(nfds, fd);
368 if (events & G_IO_PRI) {
369 FD_SET(fd, xfds);
370 nfds = MAX(nfds, fd);
373 return nfds;
376 static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds,
377 fd_set *wfds, fd_set *xfds)
379 int i;
381 for (i = 0; i < pollfds->len; i++) {
382 GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
383 int fd = pfd->fd;
384 int revents = 0;
386 if (FD_ISSET(fd, rfds)) {
387 revents |= G_IO_IN;
389 if (FD_ISSET(fd, wfds)) {
390 revents |= G_IO_OUT;
392 if (FD_ISSET(fd, xfds)) {
393 revents |= G_IO_PRI;
395 pfd->revents = revents & pfd->events;
399 static int os_host_main_loop_wait(int64_t timeout)
401 GMainContext *context = g_main_context_default();
402 GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
403 int select_ret = 0;
404 int g_poll_ret, ret, i, n_poll_fds;
405 PollingEntry *pe;
406 WaitObjects *w = &wait_objects;
407 gint poll_timeout;
408 int64_t poll_timeout_ns;
409 static struct timeval tv0;
410 fd_set rfds, wfds, xfds;
411 int nfds;
413 /* XXX: need to suppress polling by better using win32 events */
414 ret = 0;
415 for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
416 ret |= pe->func(pe->opaque);
418 if (ret != 0) {
419 return ret;
422 FD_ZERO(&rfds);
423 FD_ZERO(&wfds);
424 FD_ZERO(&xfds);
425 nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds);
426 if (nfds >= 0) {
427 select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
428 if (select_ret != 0) {
429 timeout = 0;
431 if (select_ret > 0) {
432 pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds);
436 g_main_context_prepare(context, &max_priority);
437 n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
438 poll_fds, ARRAY_SIZE(poll_fds));
439 g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds));
441 for (i = 0; i < w->num; i++) {
442 poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i];
443 poll_fds[n_poll_fds + i].events = G_IO_IN;
446 if (poll_timeout < 0) {
447 poll_timeout_ns = -1;
448 } else {
449 poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS;
452 poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
454 qemu_mutex_unlock_iothread();
455 g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns);
457 qemu_mutex_lock_iothread();
458 if (g_poll_ret > 0) {
459 for (i = 0; i < w->num; i++) {
460 w->revents[i] = poll_fds[n_poll_fds + i].revents;
462 for (i = 0; i < w->num; i++) {
463 if (w->revents[i] && w->func[i]) {
464 w->func[i](w->opaque[i]);
469 if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) {
470 g_main_context_dispatch(context);
473 return select_ret || g_poll_ret;
475 #endif
477 int main_loop_wait(int nonblocking)
479 int ret;
480 uint32_t timeout = UINT32_MAX;
481 int64_t timeout_ns;
483 if (nonblocking) {
484 timeout = 0;
487 /* poll any events */
488 g_array_set_size(gpollfds, 0); /* reset for new iteration */
489 /* XXX: separate device handlers from system ones */
490 #ifdef CONFIG_SLIRP
491 slirp_pollfds_fill(gpollfds, &timeout);
492 #endif
494 if (timeout == UINT32_MAX) {
495 timeout_ns = -1;
496 } else {
497 timeout_ns = (uint64_t)timeout * (int64_t)(SCALE_MS);
500 timeout_ns = qemu_soonest_timeout(timeout_ns,
501 timerlistgroup_deadline_ns(
502 &main_loop_tlg));
504 ret = os_host_main_loop_wait(timeout_ns);
505 #ifdef CONFIG_SLIRP
506 slirp_pollfds_poll(gpollfds, (ret < 0));
507 #endif
509 /* CPU thread can infinitely wait for event after
510 missing the warp */
511 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
512 qemu_clock_run_all_timers();
514 return ret;
517 /* Functions to operate on the main QEMU AioContext. */
519 QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
521 return aio_bh_new(qemu_aio_context, cb, opaque);