qemu-timer: Remove unneeded include statement (w32)
[qemu.git] / qemu-timer.c
blobf7716975742cd9b6dcf20999ea90563c6df97fc0
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "sysemu.h"
26 #include "net.h"
27 #include "monitor.h"
28 #include "console.h"
30 #include "hw/hw.h"
32 #include <unistd.h>
33 #include <fcntl.h>
34 #include <time.h>
35 #include <errno.h>
36 #include <sys/time.h>
37 #include <signal.h>
38 #ifdef __FreeBSD__
39 #include <sys/param.h>
40 #endif
42 #ifdef __linux__
43 #include <sys/ioctl.h>
44 #include <linux/rtc.h>
45 /* For the benefit of older linux systems which don't supply it,
46 we use a local copy of hpet.h. */
47 /* #include <linux/hpet.h> */
48 #include "hpet.h"
49 #endif
51 #ifdef _WIN32
52 #include <windows.h>
53 #include <mmsystem.h>
54 #endif
56 #include "qemu-timer.h"
58 /* Conversion factor from emulated instructions to virtual clock ticks. */
59 int icount_time_shift;
60 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
61 #define MAX_ICOUNT_SHIFT 10
62 /* Compensate for varying guest execution speed. */
63 int64_t qemu_icount_bias;
64 static QEMUTimer *icount_rt_timer;
65 static QEMUTimer *icount_vm_timer;
67 /***********************************************************/
68 /* guest cycle counter */
70 typedef struct TimersState {
71 int64_t cpu_ticks_prev;
72 int64_t cpu_ticks_offset;
73 int64_t cpu_clock_offset;
74 int32_t cpu_ticks_enabled;
75 int64_t dummy;
76 } TimersState;
78 TimersState timers_state;
80 /* return the host CPU cycle counter and handle stop/restart */
81 int64_t cpu_get_ticks(void)
83 if (use_icount) {
84 return cpu_get_icount();
86 if (!timers_state.cpu_ticks_enabled) {
87 return timers_state.cpu_ticks_offset;
88 } else {
89 int64_t ticks;
90 ticks = cpu_get_real_ticks();
91 if (timers_state.cpu_ticks_prev > ticks) {
92 /* Note: non increasing ticks may happen if the host uses
93 software suspend */
94 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
96 timers_state.cpu_ticks_prev = ticks;
97 return ticks + timers_state.cpu_ticks_offset;
101 /* return the host CPU monotonic timer and handle stop/restart */
102 static int64_t cpu_get_clock(void)
104 int64_t ti;
105 if (!timers_state.cpu_ticks_enabled) {
106 return timers_state.cpu_clock_offset;
107 } else {
108 ti = get_clock();
109 return ti + timers_state.cpu_clock_offset;
113 #ifndef CONFIG_IOTHREAD
114 static int64_t qemu_icount_delta(void)
116 if (!use_icount) {
117 return 5000 * (int64_t) 1000000;
118 } else if (use_icount == 1) {
119 /* When not using an adaptive execution frequency
120 we tend to get badly out of sync with real time,
121 so just delay for a reasonable amount of time. */
122 return 0;
123 } else {
124 return cpu_get_icount() - cpu_get_clock();
127 #endif
129 /* enable cpu_get_ticks() */
130 void cpu_enable_ticks(void)
132 if (!timers_state.cpu_ticks_enabled) {
133 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
134 timers_state.cpu_clock_offset -= get_clock();
135 timers_state.cpu_ticks_enabled = 1;
139 /* disable cpu_get_ticks() : the clock is stopped. You must not call
140 cpu_get_ticks() after that. */
141 void cpu_disable_ticks(void)
143 if (timers_state.cpu_ticks_enabled) {
144 timers_state.cpu_ticks_offset = cpu_get_ticks();
145 timers_state.cpu_clock_offset = cpu_get_clock();
146 timers_state.cpu_ticks_enabled = 0;
150 /***********************************************************/
151 /* timers */
153 #define QEMU_CLOCK_REALTIME 0
154 #define QEMU_CLOCK_VIRTUAL 1
155 #define QEMU_CLOCK_HOST 2
157 struct QEMUClock {
158 int type;
159 int enabled;
161 QEMUTimer *warp_timer;
164 struct QEMUTimer {
165 QEMUClock *clock;
166 int64_t expire_time; /* in nanoseconds */
167 int scale;
168 QEMUTimerCB *cb;
169 void *opaque;
170 struct QEMUTimer *next;
173 struct qemu_alarm_timer {
174 char const *name;
175 int (*start)(struct qemu_alarm_timer *t);
176 void (*stop)(struct qemu_alarm_timer *t);
177 void (*rearm)(struct qemu_alarm_timer *t);
178 void *priv;
180 char expired;
181 char pending;
184 static struct qemu_alarm_timer *alarm_timer;
186 static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t current_time)
188 return timer_head && (timer_head->expire_time <= current_time);
191 int qemu_alarm_pending(void)
193 return alarm_timer->pending;
196 static inline int alarm_has_dynticks(struct qemu_alarm_timer *t)
198 return !!t->rearm;
201 static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
203 if (!alarm_has_dynticks(t))
204 return;
206 t->rearm(t);
209 /* TODO: MIN_TIMER_REARM_NS should be optimized */
210 #define MIN_TIMER_REARM_NS 250000
212 #ifdef _WIN32
214 static int win32_start_timer(struct qemu_alarm_timer *t);
215 static void win32_stop_timer(struct qemu_alarm_timer *t);
216 static void win32_rearm_timer(struct qemu_alarm_timer *t);
218 #else
220 static int unix_start_timer(struct qemu_alarm_timer *t);
221 static void unix_stop_timer(struct qemu_alarm_timer *t);
223 #ifdef __linux__
225 static int dynticks_start_timer(struct qemu_alarm_timer *t);
226 static void dynticks_stop_timer(struct qemu_alarm_timer *t);
227 static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
229 static int hpet_start_timer(struct qemu_alarm_timer *t);
230 static void hpet_stop_timer(struct qemu_alarm_timer *t);
232 static int rtc_start_timer(struct qemu_alarm_timer *t);
233 static void rtc_stop_timer(struct qemu_alarm_timer *t);
235 #endif /* __linux__ */
237 #endif /* _WIN32 */
239 /* Correlation between real and virtual time is always going to be
240 fairly approximate, so ignore small variation.
241 When the guest is idle real and virtual time will be aligned in
242 the IO wait loop. */
243 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
245 static void icount_adjust(void)
247 int64_t cur_time;
248 int64_t cur_icount;
249 int64_t delta;
250 static int64_t last_delta;
251 /* If the VM is not running, then do nothing. */
252 if (!vm_running)
253 return;
255 cur_time = cpu_get_clock();
256 cur_icount = qemu_get_clock_ns(vm_clock);
257 delta = cur_icount - cur_time;
258 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
259 if (delta > 0
260 && last_delta + ICOUNT_WOBBLE < delta * 2
261 && icount_time_shift > 0) {
262 /* The guest is getting too far ahead. Slow time down. */
263 icount_time_shift--;
265 if (delta < 0
266 && last_delta - ICOUNT_WOBBLE > delta * 2
267 && icount_time_shift < MAX_ICOUNT_SHIFT) {
268 /* The guest is getting too far behind. Speed time up. */
269 icount_time_shift++;
271 last_delta = delta;
272 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
275 static void icount_adjust_rt(void * opaque)
277 qemu_mod_timer(icount_rt_timer,
278 qemu_get_clock_ms(rt_clock) + 1000);
279 icount_adjust();
282 static void icount_adjust_vm(void * opaque)
284 qemu_mod_timer(icount_vm_timer,
285 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
286 icount_adjust();
289 int64_t qemu_icount_round(int64_t count)
291 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
294 static struct qemu_alarm_timer alarm_timers[] = {
295 #ifndef _WIN32
296 #ifdef __linux__
297 {"dynticks", dynticks_start_timer,
298 dynticks_stop_timer, dynticks_rearm_timer, NULL},
299 /* HPET - if available - is preferred */
300 {"hpet", hpet_start_timer, hpet_stop_timer, NULL, NULL},
301 /* ...otherwise try RTC */
302 {"rtc", rtc_start_timer, rtc_stop_timer, NULL, NULL},
303 #endif
304 {"unix", unix_start_timer, unix_stop_timer, NULL, NULL},
305 #else
306 {"dynticks", win32_start_timer,
307 win32_stop_timer, win32_rearm_timer, NULL},
308 {"win32", win32_start_timer,
309 win32_stop_timer, NULL, NULL},
310 #endif
311 {NULL, }
314 static void show_available_alarms(void)
316 int i;
318 printf("Available alarm timers, in order of precedence:\n");
319 for (i = 0; alarm_timers[i].name; i++)
320 printf("%s\n", alarm_timers[i].name);
323 void configure_alarms(char const *opt)
325 int i;
326 int cur = 0;
327 int count = ARRAY_SIZE(alarm_timers) - 1;
328 char *arg;
329 char *name;
330 struct qemu_alarm_timer tmp;
332 if (!strcmp(opt, "?")) {
333 show_available_alarms();
334 exit(0);
337 arg = qemu_strdup(opt);
339 /* Reorder the array */
340 name = strtok(arg, ",");
341 while (name) {
342 for (i = 0; i < count && alarm_timers[i].name; i++) {
343 if (!strcmp(alarm_timers[i].name, name))
344 break;
347 if (i == count) {
348 fprintf(stderr, "Unknown clock %s\n", name);
349 goto next;
352 if (i < cur)
353 /* Ignore */
354 goto next;
356 /* Swap */
357 tmp = alarm_timers[i];
358 alarm_timers[i] = alarm_timers[cur];
359 alarm_timers[cur] = tmp;
361 cur++;
362 next:
363 name = strtok(NULL, ",");
366 qemu_free(arg);
368 if (cur) {
369 /* Disable remaining timers */
370 for (i = cur; i < count; i++)
371 alarm_timers[i].name = NULL;
372 } else {
373 show_available_alarms();
374 exit(1);
378 #define QEMU_NUM_CLOCKS 3
380 QEMUClock *rt_clock;
381 QEMUClock *vm_clock;
382 QEMUClock *host_clock;
384 static QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
386 static QEMUClock *qemu_new_clock(int type)
388 QEMUClock *clock;
389 clock = qemu_mallocz(sizeof(QEMUClock));
390 clock->type = type;
391 clock->enabled = 1;
392 return clock;
395 void qemu_clock_enable(QEMUClock *clock, int enabled)
397 clock->enabled = enabled;
400 static int64_t vm_clock_warp_start;
402 static void icount_warp_rt(void *opaque)
404 if (vm_clock_warp_start == -1) {
405 return;
408 if (vm_running) {
409 int64_t clock = qemu_get_clock_ns(rt_clock);
410 int64_t warp_delta = clock - vm_clock_warp_start;
411 if (use_icount == 1) {
412 qemu_icount_bias += warp_delta;
413 } else {
415 * In adaptive mode, do not let the vm_clock run too
416 * far ahead of real time.
418 int64_t cur_time = cpu_get_clock();
419 int64_t cur_icount = qemu_get_clock_ns(vm_clock);
420 int64_t delta = cur_time - cur_icount;
421 qemu_icount_bias += MIN(warp_delta, delta);
423 if (qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
424 qemu_get_clock_ns(vm_clock))) {
425 qemu_notify_event();
428 vm_clock_warp_start = -1;
431 void qemu_clock_warp(QEMUClock *clock)
433 int64_t deadline;
435 if (!clock->warp_timer) {
436 return;
440 * There are too many global variables to make the "warp" behavior
441 * applicable to other clocks. But a clock argument removes the
442 * need for if statements all over the place.
444 assert(clock == vm_clock);
447 * If the CPUs have been sleeping, advance the vm_clock timer now. This
448 * ensures that the deadline for the timer is computed correctly below.
449 * This also makes sure that the insn counter is synchronized before the
450 * CPU starts running, in case the CPU is woken by an event other than
451 * the earliest vm_clock timer.
453 icount_warp_rt(NULL);
454 if (!all_cpu_threads_idle() || !active_timers[clock->type]) {
455 qemu_del_timer(clock->warp_timer);
456 return;
459 vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
460 deadline = qemu_next_icount_deadline();
461 if (deadline > 0) {
463 * Ensure the vm_clock proceeds even when the virtual CPU goes to
464 * sleep. Otherwise, the CPU might be waiting for a future timer
465 * interrupt to wake it up, but the interrupt never comes because
466 * the vCPU isn't running any insns and thus doesn't advance the
467 * vm_clock.
469 * An extreme solution for this problem would be to never let VCPUs
470 * sleep in icount mode if there is a pending vm_clock timer; rather
471 * time could just advance to the next vm_clock event. Instead, we
472 * do stop VCPUs and only advance vm_clock after some "real" time,
473 * (related to the time left until the next event) has passed. This
474 * rt_clock timer will do this. This avoids that the warps are too
475 * visible externally---for example, you will not be sending network
476 * packets continously instead of every 100ms.
478 qemu_mod_timer(clock->warp_timer, vm_clock_warp_start + deadline);
479 } else {
480 qemu_notify_event();
484 QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
485 QEMUTimerCB *cb, void *opaque)
487 QEMUTimer *ts;
489 ts = qemu_mallocz(sizeof(QEMUTimer));
490 ts->clock = clock;
491 ts->cb = cb;
492 ts->opaque = opaque;
493 ts->scale = scale;
494 return ts;
497 void qemu_free_timer(QEMUTimer *ts)
499 qemu_free(ts);
502 /* stop a timer, but do not dealloc it */
503 void qemu_del_timer(QEMUTimer *ts)
505 QEMUTimer **pt, *t;
507 /* NOTE: this code must be signal safe because
508 qemu_timer_expired() can be called from a signal. */
509 pt = &active_timers[ts->clock->type];
510 for(;;) {
511 t = *pt;
512 if (!t)
513 break;
514 if (t == ts) {
515 *pt = t->next;
516 break;
518 pt = &t->next;
522 /* modify the current timer so that it will be fired when current_time
523 >= expire_time. The corresponding callback will be called. */
524 static void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time)
526 QEMUTimer **pt, *t;
528 qemu_del_timer(ts);
530 /* add the timer in the sorted list */
531 /* NOTE: this code must be signal safe because
532 qemu_timer_expired() can be called from a signal. */
533 pt = &active_timers[ts->clock->type];
534 for(;;) {
535 t = *pt;
536 if (!qemu_timer_expired_ns(t, expire_time)) {
537 break;
539 pt = &t->next;
541 ts->expire_time = expire_time;
542 ts->next = *pt;
543 *pt = ts;
545 /* Rearm if necessary */
546 if (pt == &active_timers[ts->clock->type]) {
547 if (!alarm_timer->pending) {
548 qemu_rearm_alarm_timer(alarm_timer);
550 /* Interrupt execution to force deadline recalculation. */
551 qemu_clock_warp(ts->clock);
552 if (use_icount) {
553 qemu_notify_event();
558 /* modify the current timer so that it will be fired when current_time
559 >= expire_time. The corresponding callback will be called. */
560 void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
562 qemu_mod_timer_ns(ts, expire_time * ts->scale);
565 int qemu_timer_pending(QEMUTimer *ts)
567 QEMUTimer *t;
568 for(t = active_timers[ts->clock->type]; t != NULL; t = t->next) {
569 if (t == ts)
570 return 1;
572 return 0;
575 int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
577 return qemu_timer_expired_ns(timer_head, current_time * timer_head->scale);
580 static void qemu_run_timers(QEMUClock *clock)
582 QEMUTimer **ptimer_head, *ts;
583 int64_t current_time;
585 if (!clock->enabled)
586 return;
588 current_time = qemu_get_clock_ns(clock);
589 ptimer_head = &active_timers[clock->type];
590 for(;;) {
591 ts = *ptimer_head;
592 if (!qemu_timer_expired_ns(ts, current_time)) {
593 break;
595 /* remove timer from the list before calling the callback */
596 *ptimer_head = ts->next;
597 ts->next = NULL;
599 /* run the callback (the timer list can be modified) */
600 ts->cb(ts->opaque);
604 int64_t qemu_get_clock_ns(QEMUClock *clock)
606 switch(clock->type) {
607 case QEMU_CLOCK_REALTIME:
608 return get_clock();
609 default:
610 case QEMU_CLOCK_VIRTUAL:
611 if (use_icount) {
612 return cpu_get_icount();
613 } else {
614 return cpu_get_clock();
616 case QEMU_CLOCK_HOST:
617 return get_clock_realtime();
621 void init_clocks(void)
623 rt_clock = qemu_new_clock(QEMU_CLOCK_REALTIME);
624 vm_clock = qemu_new_clock(QEMU_CLOCK_VIRTUAL);
625 host_clock = qemu_new_clock(QEMU_CLOCK_HOST);
627 rtc_clock = host_clock;
630 /* save a timer */
631 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts)
633 uint64_t expire_time;
635 if (qemu_timer_pending(ts)) {
636 expire_time = ts->expire_time;
637 } else {
638 expire_time = -1;
640 qemu_put_be64(f, expire_time);
643 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts)
645 uint64_t expire_time;
647 expire_time = qemu_get_be64(f);
648 if (expire_time != -1) {
649 qemu_mod_timer_ns(ts, expire_time);
650 } else {
651 qemu_del_timer(ts);
655 static const VMStateDescription vmstate_timers = {
656 .name = "timer",
657 .version_id = 2,
658 .minimum_version_id = 1,
659 .minimum_version_id_old = 1,
660 .fields = (VMStateField []) {
661 VMSTATE_INT64(cpu_ticks_offset, TimersState),
662 VMSTATE_INT64(dummy, TimersState),
663 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
664 VMSTATE_END_OF_LIST()
668 void configure_icount(const char *option)
670 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
671 if (!option)
672 return;
674 #ifdef CONFIG_IOTHREAD
675 vm_clock->warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
676 #endif
678 if (strcmp(option, "auto") != 0) {
679 icount_time_shift = strtol(option, NULL, 0);
680 use_icount = 1;
681 return;
684 use_icount = 2;
686 /* 125MIPS seems a reasonable initial guess at the guest speed.
687 It will be corrected fairly quickly anyway. */
688 icount_time_shift = 3;
690 /* Have both realtime and virtual time triggers for speed adjustment.
691 The realtime trigger catches emulated time passing too slowly,
692 the virtual time trigger catches emulated time passing too fast.
693 Realtime triggers occur even when idle, so use them less frequently
694 than VM triggers. */
695 icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
696 qemu_mod_timer(icount_rt_timer,
697 qemu_get_clock_ms(rt_clock) + 1000);
698 icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
699 qemu_mod_timer(icount_vm_timer,
700 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
703 void qemu_run_all_timers(void)
705 alarm_timer->pending = 0;
707 /* rearm timer, if not periodic */
708 if (alarm_timer->expired) {
709 alarm_timer->expired = 0;
710 qemu_rearm_alarm_timer(alarm_timer);
713 /* vm time timers */
714 if (vm_running) {
715 qemu_run_timers(vm_clock);
718 qemu_run_timers(rt_clock);
719 qemu_run_timers(host_clock);
722 static int64_t qemu_next_alarm_deadline(void);
724 #ifdef _WIN32
725 static void CALLBACK host_alarm_handler(PVOID lpParam, BOOLEAN unused)
726 #else
727 static void host_alarm_handler(int host_signum)
728 #endif
730 struct qemu_alarm_timer *t = alarm_timer;
731 if (!t)
732 return;
734 #if 0
735 #define DISP_FREQ 1000
737 static int64_t delta_min = INT64_MAX;
738 static int64_t delta_max, delta_cum, last_clock, delta, ti;
739 static int count;
740 ti = qemu_get_clock_ns(vm_clock);
741 if (last_clock != 0) {
742 delta = ti - last_clock;
743 if (delta < delta_min)
744 delta_min = delta;
745 if (delta > delta_max)
746 delta_max = delta;
747 delta_cum += delta;
748 if (++count == DISP_FREQ) {
749 printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
750 muldiv64(delta_min, 1000000, get_ticks_per_sec()),
751 muldiv64(delta_max, 1000000, get_ticks_per_sec()),
752 muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
753 (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
754 count = 0;
755 delta_min = INT64_MAX;
756 delta_max = 0;
757 delta_cum = 0;
760 last_clock = ti;
762 #endif
763 if (alarm_has_dynticks(t) ||
764 qemu_next_alarm_deadline () <= 0) {
765 t->expired = alarm_has_dynticks(t);
766 t->pending = 1;
767 qemu_notify_event();
771 int64_t qemu_next_icount_deadline(void)
773 /* To avoid problems with overflow limit this to 2^32. */
774 int64_t delta = INT32_MAX;
776 assert(use_icount);
777 if (active_timers[QEMU_CLOCK_VIRTUAL]) {
778 delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
779 qemu_get_clock_ns(vm_clock);
782 if (delta < 0)
783 delta = 0;
785 return delta;
788 static int64_t qemu_next_alarm_deadline(void)
790 int64_t delta;
791 int64_t rtdelta;
793 if (!use_icount && active_timers[QEMU_CLOCK_VIRTUAL]) {
794 delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
795 qemu_get_clock_ns(vm_clock);
796 } else {
797 delta = INT32_MAX;
799 if (active_timers[QEMU_CLOCK_HOST]) {
800 int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time -
801 qemu_get_clock_ns(host_clock);
802 if (hdelta < delta)
803 delta = hdelta;
805 if (active_timers[QEMU_CLOCK_REALTIME]) {
806 rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time -
807 qemu_get_clock_ns(rt_clock));
808 if (rtdelta < delta)
809 delta = rtdelta;
812 return delta;
815 #if defined(__linux__)
817 #define RTC_FREQ 1024
819 static void enable_sigio_timer(int fd)
821 struct sigaction act;
823 /* timer signal */
824 sigfillset(&act.sa_mask);
825 act.sa_flags = 0;
826 act.sa_handler = host_alarm_handler;
828 sigaction(SIGIO, &act, NULL);
829 fcntl_setfl(fd, O_ASYNC);
830 fcntl(fd, F_SETOWN, getpid());
833 static int hpet_start_timer(struct qemu_alarm_timer *t)
835 struct hpet_info info;
836 int r, fd;
838 fd = qemu_open("/dev/hpet", O_RDONLY);
839 if (fd < 0)
840 return -1;
842 /* Set frequency */
843 r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
844 if (r < 0) {
845 fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
846 "error, but for better emulation accuracy type:\n"
847 "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
848 goto fail;
851 /* Check capabilities */
852 r = ioctl(fd, HPET_INFO, &info);
853 if (r < 0)
854 goto fail;
856 /* Enable periodic mode */
857 r = ioctl(fd, HPET_EPI, 0);
858 if (info.hi_flags && (r < 0))
859 goto fail;
861 /* Enable interrupt */
862 r = ioctl(fd, HPET_IE_ON, 0);
863 if (r < 0)
864 goto fail;
866 enable_sigio_timer(fd);
867 t->priv = (void *)(long)fd;
869 return 0;
870 fail:
871 close(fd);
872 return -1;
875 static void hpet_stop_timer(struct qemu_alarm_timer *t)
877 int fd = (long)t->priv;
879 close(fd);
882 static int rtc_start_timer(struct qemu_alarm_timer *t)
884 int rtc_fd;
885 unsigned long current_rtc_freq = 0;
887 TFR(rtc_fd = qemu_open("/dev/rtc", O_RDONLY));
888 if (rtc_fd < 0)
889 return -1;
890 ioctl(rtc_fd, RTC_IRQP_READ, &current_rtc_freq);
891 if (current_rtc_freq != RTC_FREQ &&
892 ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
893 fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
894 "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
895 "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
896 goto fail;
898 if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
899 fail:
900 close(rtc_fd);
901 return -1;
904 enable_sigio_timer(rtc_fd);
906 t->priv = (void *)(long)rtc_fd;
908 return 0;
911 static void rtc_stop_timer(struct qemu_alarm_timer *t)
913 int rtc_fd = (long)t->priv;
915 close(rtc_fd);
918 static int dynticks_start_timer(struct qemu_alarm_timer *t)
920 struct sigevent ev;
921 timer_t host_timer;
922 struct sigaction act;
924 sigfillset(&act.sa_mask);
925 act.sa_flags = 0;
926 act.sa_handler = host_alarm_handler;
928 sigaction(SIGALRM, &act, NULL);
931 * Initialize ev struct to 0 to avoid valgrind complaining
932 * about uninitialized data in timer_create call
934 memset(&ev, 0, sizeof(ev));
935 ev.sigev_value.sival_int = 0;
936 ev.sigev_notify = SIGEV_SIGNAL;
937 ev.sigev_signo = SIGALRM;
939 if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
940 perror("timer_create");
942 /* disable dynticks */
943 fprintf(stderr, "Dynamic Ticks disabled\n");
945 return -1;
948 t->priv = (void *)(long)host_timer;
950 return 0;
953 static void dynticks_stop_timer(struct qemu_alarm_timer *t)
955 timer_t host_timer = (timer_t)(long)t->priv;
957 timer_delete(host_timer);
960 static void dynticks_rearm_timer(struct qemu_alarm_timer *t)
962 timer_t host_timer = (timer_t)(long)t->priv;
963 struct itimerspec timeout;
964 int64_t nearest_delta_ns = INT64_MAX;
965 int64_t current_ns;
967 assert(alarm_has_dynticks(t));
968 if (!active_timers[QEMU_CLOCK_REALTIME] &&
969 !active_timers[QEMU_CLOCK_VIRTUAL] &&
970 !active_timers[QEMU_CLOCK_HOST])
971 return;
973 nearest_delta_ns = qemu_next_alarm_deadline();
974 if (nearest_delta_ns < MIN_TIMER_REARM_NS)
975 nearest_delta_ns = MIN_TIMER_REARM_NS;
977 /* check whether a timer is already running */
978 if (timer_gettime(host_timer, &timeout)) {
979 perror("gettime");
980 fprintf(stderr, "Internal timer error: aborting\n");
981 exit(1);
983 current_ns = timeout.it_value.tv_sec * 1000000000LL + timeout.it_value.tv_nsec;
984 if (current_ns && current_ns <= nearest_delta_ns)
985 return;
987 timeout.it_interval.tv_sec = 0;
988 timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
989 timeout.it_value.tv_sec = nearest_delta_ns / 1000000000;
990 timeout.it_value.tv_nsec = nearest_delta_ns % 1000000000;
991 if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
992 perror("settime");
993 fprintf(stderr, "Internal timer error: aborting\n");
994 exit(1);
998 #endif /* defined(__linux__) */
1000 #if !defined(_WIN32)
1002 static int unix_start_timer(struct qemu_alarm_timer *t)
1004 struct sigaction act;
1005 struct itimerval itv;
1006 int err;
1008 /* timer signal */
1009 sigfillset(&act.sa_mask);
1010 act.sa_flags = 0;
1011 act.sa_handler = host_alarm_handler;
1013 sigaction(SIGALRM, &act, NULL);
1015 itv.it_interval.tv_sec = 0;
1016 /* for i386 kernel 2.6 to get 1 ms */
1017 itv.it_interval.tv_usec = 999;
1018 itv.it_value.tv_sec = 0;
1019 itv.it_value.tv_usec = 10 * 1000;
1021 err = setitimer(ITIMER_REAL, &itv, NULL);
1022 if (err)
1023 return -1;
1025 return 0;
1028 static void unix_stop_timer(struct qemu_alarm_timer *t)
1030 struct itimerval itv;
1032 memset(&itv, 0, sizeof(itv));
1033 setitimer(ITIMER_REAL, &itv, NULL);
1036 #endif /* !defined(_WIN32) */
1039 #ifdef _WIN32
1041 static int win32_start_timer(struct qemu_alarm_timer *t)
1043 HANDLE hTimer;
1044 BOOLEAN success;
1046 /* If you call ChangeTimerQueueTimer on a one-shot timer (its period
1047 is zero) that has already expired, the timer is not updated. Since
1048 creating a new timer is relatively expensive, set a bogus one-hour
1049 interval in the dynticks case. */
1050 success = CreateTimerQueueTimer(&hTimer,
1051 NULL,
1052 host_alarm_handler,
1055 alarm_has_dynticks(t) ? 3600000 : 1,
1056 WT_EXECUTEINTIMERTHREAD);
1058 if (!success) {
1059 fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
1060 GetLastError());
1061 return -1;
1064 t->priv = (PVOID) hTimer;
1065 return 0;
1068 static void win32_stop_timer(struct qemu_alarm_timer *t)
1070 HANDLE hTimer = t->priv;
1072 if (hTimer) {
1073 DeleteTimerQueueTimer(NULL, hTimer, NULL);
1077 static void win32_rearm_timer(struct qemu_alarm_timer *t)
1079 HANDLE hTimer = t->priv;
1080 int nearest_delta_ms;
1081 BOOLEAN success;
1083 assert(alarm_has_dynticks(t));
1084 if (!active_timers[QEMU_CLOCK_REALTIME] &&
1085 !active_timers[QEMU_CLOCK_VIRTUAL] &&
1086 !active_timers[QEMU_CLOCK_HOST])
1087 return;
1089 nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000;
1090 if (nearest_delta_ms < 1) {
1091 nearest_delta_ms = 1;
1093 success = ChangeTimerQueueTimer(NULL,
1094 hTimer,
1095 nearest_delta_ms,
1096 3600000);
1098 if (!success) {
1099 fprintf(stderr, "Failed to rearm win32 alarm timer: %ld\n",
1100 GetLastError());
1101 exit(-1);
1106 #endif /* _WIN32 */
1108 static void alarm_timer_on_change_state_rearm(void *opaque, int running, int reason)
1110 if (running)
1111 qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque);
1114 int init_timer_alarm(void)
1116 struct qemu_alarm_timer *t = NULL;
1117 int i, err = -1;
1119 for (i = 0; alarm_timers[i].name; i++) {
1120 t = &alarm_timers[i];
1122 err = t->start(t);
1123 if (!err)
1124 break;
1127 if (err) {
1128 err = -ENOENT;
1129 goto fail;
1132 /* first event is at time 0 */
1133 t->pending = 1;
1134 alarm_timer = t;
1135 qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t);
1137 return 0;
1139 fail:
1140 return err;
1143 void quit_timers(void)
1145 struct qemu_alarm_timer *t = alarm_timer;
1146 alarm_timer = NULL;
1147 t->stop(t);
1150 int qemu_calculate_timeout(void)
1152 #ifndef CONFIG_IOTHREAD
1153 int timeout;
1155 if (!vm_running)
1156 timeout = 5000;
1157 else {
1158 /* XXX: use timeout computed from timers */
1159 int64_t add;
1160 int64_t delta;
1161 /* Advance virtual time to the next event. */
1162 delta = qemu_icount_delta();
1163 if (delta > 0) {
1164 /* If virtual time is ahead of real time then just
1165 wait for IO. */
1166 timeout = (delta + 999999) / 1000000;
1167 } else {
1168 /* Wait for either IO to occur or the next
1169 timer event. */
1170 add = qemu_next_icount_deadline();
1171 /* We advance the timer before checking for IO.
1172 Limit the amount we advance so that early IO
1173 activity won't get the guest too far ahead. */
1174 if (add > 10000000)
1175 add = 10000000;
1176 delta += add;
1177 qemu_icount += qemu_icount_round (add);
1178 timeout = delta / 1000000;
1179 if (timeout < 0)
1180 timeout = 0;
1184 return timeout;
1185 #else /* CONFIG_IOTHREAD */
1186 return 1000;
1187 #endif