aio / timers: Make qemu_run_timers and qemu_run_all_timers return progress
[qemu/ar7.git] / include / qemu / timer.h
blob962eca8fa8d0237f565fedceaf16f0f525524d33
1 #ifndef QEMU_TIMER_H
2 #define QEMU_TIMER_H
4 #include "qemu-common.h"
5 #include "qemu/main-loop.h"
6 #include "qemu/notify.h"
8 /* timers */
10 #define SCALE_MS 1000000
11 #define SCALE_US 1000
12 #define SCALE_NS 1
14 #define QEMU_CLOCK_REALTIME 0
15 #define QEMU_CLOCK_VIRTUAL 1
16 #define QEMU_CLOCK_HOST 2
18 typedef struct QEMUClock QEMUClock;
19 typedef void QEMUTimerCB(void *opaque);
21 /* The real time clock should be used only for stuff which does not
22 change the virtual machine state, as it is run even if the virtual
23 machine is stopped. The real time clock has a frequency of 1000
24 Hz. */
25 extern QEMUClock *rt_clock;
27 /* The virtual clock is only run during the emulation. It is stopped
28 when the virtual machine is stopped. Virtual timers use a high
29 precision clock, usually cpu cycles (use ticks_per_sec). */
30 extern QEMUClock *vm_clock;
32 /* The host clock should be use for device models that emulate accurate
33 real time sources. It will continue to run when the virtual machine
34 is suspended, and it will reflect system time changes the host may
35 undergo (e.g. due to NTP). The host clock has the same precision as
36 the virtual clock. */
37 extern QEMUClock *host_clock;
39 int64_t qemu_get_clock_ns(QEMUClock *clock);
40 int64_t qemu_clock_has_timers(QEMUClock *clock);
41 int64_t qemu_clock_expired(QEMUClock *clock);
42 int64_t qemu_clock_deadline(QEMUClock *clock);
44 /**
45 * qemu_clock_deadline_ns:
46 * @clock: the clock to operate on
48 * Calculate the timeout of the earliest expiring timer
49 * in nanoseconds, or -1 if no timer is set to expire.
51 * Returns: time until expiry in nanoseconds or -1
53 int64_t qemu_clock_deadline_ns(QEMUClock *clock);
55 /**
56 * qemu_timeout_ns_to_ms:
57 * @ns: nanosecond timeout value
59 * Convert a nanosecond timeout value (or -1) to
60 * a millisecond value (or -1), always rounding up.
62 * Returns: millisecond timeout value
64 int qemu_timeout_ns_to_ms(int64_t ns);
66 /**
67 * qemu_poll_ns:
68 * @fds: Array of file descriptors
69 * @nfds: number of file descriptors
70 * @timeout: timeout in nanoseconds
72 * Perform a poll like g_poll but with a timeout in nanoseconds.
73 * See g_poll documentation for further details.
75 * Returns: number of fds ready
77 int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
78 void qemu_clock_enable(QEMUClock *clock, bool enabled);
79 void qemu_clock_warp(QEMUClock *clock);
81 void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
82 void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
83 Notifier *notifier);
85 QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
86 QEMUTimerCB *cb, void *opaque);
87 void qemu_free_timer(QEMUTimer *ts);
88 void qemu_del_timer(QEMUTimer *ts);
89 void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
90 void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
91 bool timer_pending(QEMUTimer *ts);
92 bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
93 uint64_t timer_expire_time_ns(QEMUTimer *ts);
95 /**
96 * qemu_run_timers:
97 * @clock: clock on which to operate
99 * Run all the timers associated with a clock.
101 * Returns: true if any timer ran.
103 bool qemu_run_timers(QEMUClock *clock);
106 * qemu_run_all_timers:
108 * Run all the timers associated with every clock.
110 * Returns: true if any timer ran.
112 bool qemu_run_all_timers(void);
114 void configure_alarms(char const *opt);
115 void init_clocks(void);
116 int init_timer_alarm(void);
118 int64_t cpu_get_ticks(void);
119 void cpu_enable_ticks(void);
120 void cpu_disable_ticks(void);
123 * qemu_soonest_timeout:
124 * @timeout1: first timeout in nanoseconds (or -1 for infinite)
125 * @timeout2: second timeout in nanoseconds (or -1 for infinite)
127 * Calculates the soonest of two timeout values. -1 means infinite, which
128 * is later than any other value.
130 * Returns: soonest timeout value in nanoseconds (or -1 for infinite)
132 static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
134 /* we can abuse the fact that -1 (which means infinite) is a maximal
135 * value when cast to unsigned. As this is disgusting, it's kept in
136 * one inline function.
138 return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
141 static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
142 void *opaque)
144 return qemu_new_timer(clock, SCALE_NS, cb, opaque);
147 static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
148 void *opaque)
150 return qemu_new_timer(clock, SCALE_MS, cb, opaque);
153 static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
155 return qemu_get_clock_ns(clock) / SCALE_MS;
158 static inline int64_t get_ticks_per_sec(void)
160 return 1000000000LL;
163 /* real time host monotonic timer */
164 static inline int64_t get_clock_realtime(void)
166 struct timeval tv;
168 gettimeofday(&tv, NULL);
169 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
172 /* Warning: don't insert tracepoints into these functions, they are
173 also used by simpletrace backend and tracepoints would cause
174 an infinite recursion! */
175 #ifdef _WIN32
176 extern int64_t clock_freq;
178 static inline int64_t get_clock(void)
180 LARGE_INTEGER ti;
181 QueryPerformanceCounter(&ti);
182 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
185 #else
187 extern int use_rt_clock;
189 static inline int64_t get_clock(void)
191 #ifdef CLOCK_MONOTONIC
192 if (use_rt_clock) {
193 struct timespec ts;
194 clock_gettime(CLOCK_MONOTONIC, &ts);
195 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
196 } else
197 #endif
199 /* XXX: using gettimeofday leads to problems if the date
200 changes, so it should be avoided. */
201 return get_clock_realtime();
204 #endif
206 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
207 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
209 /* icount */
210 int64_t cpu_get_icount(void);
211 int64_t cpu_get_clock(void);
213 /*******************************************/
214 /* host CPU ticks (if available) */
216 #if defined(_ARCH_PPC)
218 static inline int64_t cpu_get_real_ticks(void)
220 int64_t retval;
221 #ifdef _ARCH_PPC64
222 /* This reads timebase in one 64bit go and includes Cell workaround from:
223 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
225 __asm__ __volatile__ ("mftb %0\n\t"
226 "cmpwi %0,0\n\t"
227 "beq- $-8"
228 : "=r" (retval));
229 #else
230 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
231 unsigned long junk;
232 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
233 "mfspr %L0,268\n\t" /* mftb */
234 "mfspr %0,269\n\t" /* mftbu */
235 "cmpw %0,%1\n\t"
236 "bne $-16"
237 : "=r" (retval), "=r" (junk));
238 #endif
239 return retval;
242 #elif defined(__i386__)
244 static inline int64_t cpu_get_real_ticks(void)
246 int64_t val;
247 asm volatile ("rdtsc" : "=A" (val));
248 return val;
251 #elif defined(__x86_64__)
253 static inline int64_t cpu_get_real_ticks(void)
255 uint32_t low,high;
256 int64_t val;
257 asm volatile("rdtsc" : "=a" (low), "=d" (high));
258 val = high;
259 val <<= 32;
260 val |= low;
261 return val;
264 #elif defined(__hppa__)
266 static inline int64_t cpu_get_real_ticks(void)
268 int val;
269 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
270 return val;
273 #elif defined(__ia64)
275 static inline int64_t cpu_get_real_ticks(void)
277 int64_t val;
278 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
279 return val;
282 #elif defined(__s390__)
284 static inline int64_t cpu_get_real_ticks(void)
286 int64_t val;
287 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
288 return val;
291 #elif defined(__sparc__)
293 static inline int64_t cpu_get_real_ticks (void)
295 #if defined(_LP64)
296 uint64_t rval;
297 asm volatile("rd %%tick,%0" : "=r"(rval));
298 return rval;
299 #else
300 /* We need an %o or %g register for this. For recent enough gcc
301 there is an "h" constraint for that. Don't bother with that. */
302 union {
303 uint64_t i64;
304 struct {
305 uint32_t high;
306 uint32_t low;
307 } i32;
308 } rval;
309 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
310 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
311 return rval.i64;
312 #endif
315 #elif defined(__mips__) && \
316 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
318 * binutils wants to use rdhwr only on mips32r2
319 * but as linux kernel emulate it, it's fine
320 * to use it.
323 #define MIPS_RDHWR(rd, value) { \
324 __asm__ __volatile__ (".set push\n\t" \
325 ".set mips32r2\n\t" \
326 "rdhwr %0, "rd"\n\t" \
327 ".set pop" \
328 : "=r" (value)); \
331 static inline int64_t cpu_get_real_ticks(void)
333 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
334 uint32_t count;
335 static uint32_t cyc_per_count = 0;
337 if (!cyc_per_count) {
338 MIPS_RDHWR("$3", cyc_per_count);
341 MIPS_RDHWR("$2", count);
342 return (int64_t)(count * cyc_per_count);
345 #elif defined(__alpha__)
347 static inline int64_t cpu_get_real_ticks(void)
349 uint64_t cc;
350 uint32_t cur, ofs;
352 asm volatile("rpcc %0" : "=r"(cc));
353 cur = cc;
354 ofs = cc >> 32;
355 return cur - ofs;
358 #else
359 /* The host CPU doesn't have an easily accessible cycle counter.
360 Just return a monotonically increasing value. This will be
361 totally wrong, but hopefully better than nothing. */
362 static inline int64_t cpu_get_real_ticks (void)
364 static int64_t ticks = 0;
365 return ticks++;
367 #endif
369 #ifdef CONFIG_PROFILER
370 static inline int64_t profile_getclock(void)
372 return cpu_get_real_ticks();
375 extern int64_t qemu_time, qemu_time_start;
376 extern int64_t tlb_flush_time;
377 extern int64_t dev_time;
378 #endif
380 #endif