q35: Suppress SMM BIOS initialization under KVM
[qemu.git] / qemu-timer.h
blobda7e97cd5aaa82b055222ae7775a82c404a034d3
1 #ifndef QEMU_TIMER_H
2 #define QEMU_TIMER_H
4 #include "qemu-common.h"
5 #include "main-loop.h"
6 #include "notify.h"
8 #ifdef __FreeBSD__
9 #include <sys/param.h>
10 #endif
12 /* timers */
14 #define SCALE_MS 1000000
15 #define SCALE_US 1000
16 #define SCALE_NS 1
18 typedef struct QEMUClock QEMUClock;
19 typedef void QEMUTimerCB(void *opaque);
21 /* The real time clock should be used only for stuff which does not
22 change the virtual machine state, as it is run even if the virtual
23 machine is stopped. The real time clock has a frequency of 1000
24 Hz. */
25 extern QEMUClock *rt_clock;
27 /* The virtual clock is only run during the emulation. It is stopped
28 when the virtual machine is stopped. Virtual timers use a high
29 precision clock, usually cpu cycles (use ticks_per_sec). */
30 extern QEMUClock *vm_clock;
32 /* The host clock should be use for device models that emulate accurate
33 real time sources. It will continue to run when the virtual machine
34 is suspended, and it will reflect system time changes the host may
35 undergo (e.g. due to NTP). The host clock has the same precision as
36 the virtual clock. */
37 extern QEMUClock *host_clock;
39 int64_t qemu_get_clock_ns(QEMUClock *clock);
40 int64_t qemu_clock_has_timers(QEMUClock *clock);
41 int64_t qemu_clock_expired(QEMUClock *clock);
42 int64_t qemu_clock_deadline(QEMUClock *clock);
43 void qemu_clock_enable(QEMUClock *clock, bool enabled);
44 void qemu_clock_warp(QEMUClock *clock);
46 void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
47 void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
48 Notifier *notifier);
50 QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
51 QEMUTimerCB *cb, void *opaque);
52 void qemu_free_timer(QEMUTimer *ts);
53 void qemu_del_timer(QEMUTimer *ts);
54 void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
55 void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
56 bool qemu_timer_pending(QEMUTimer *ts);
57 bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
58 uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
60 void qemu_run_timers(QEMUClock *clock);
61 void qemu_run_all_timers(void);
62 void configure_alarms(char const *opt);
63 void init_clocks(void);
64 int init_timer_alarm(void);
66 int64_t cpu_get_ticks(void);
67 void cpu_enable_ticks(void);
68 void cpu_disable_ticks(void);
70 static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
71 void *opaque)
73 return qemu_new_timer(clock, SCALE_NS, cb, opaque);
76 static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
77 void *opaque)
79 return qemu_new_timer(clock, SCALE_MS, cb, opaque);
82 static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
84 return qemu_get_clock_ns(clock) / SCALE_MS;
87 static inline int64_t get_ticks_per_sec(void)
89 return 1000000000LL;
92 /* real time host monotonic timer */
93 static inline int64_t get_clock_realtime(void)
95 struct timeval tv;
97 gettimeofday(&tv, NULL);
98 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
101 /* Warning: don't insert tracepoints into these functions, they are
102 also used by simpletrace backend and tracepoints would cause
103 an infinite recursion! */
104 #ifdef _WIN32
105 extern int64_t clock_freq;
107 static inline int64_t get_clock(void)
109 LARGE_INTEGER ti;
110 QueryPerformanceCounter(&ti);
111 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
114 #else
116 extern int use_rt_clock;
118 static inline int64_t get_clock(void)
120 #if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
121 || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
122 if (use_rt_clock) {
123 struct timespec ts;
124 clock_gettime(CLOCK_MONOTONIC, &ts);
125 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
126 } else
127 #endif
129 /* XXX: using gettimeofday leads to problems if the date
130 changes, so it should be avoided. */
131 return get_clock_realtime();
134 #endif
136 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
137 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
139 /* icount */
140 int64_t cpu_get_icount(void);
141 int64_t cpu_get_clock(void);
143 /*******************************************/
144 /* host CPU ticks (if available) */
146 #if defined(_ARCH_PPC)
148 static inline int64_t cpu_get_real_ticks(void)
150 int64_t retval;
151 #ifdef _ARCH_PPC64
152 /* This reads timebase in one 64bit go and includes Cell workaround from:
153 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
155 __asm__ __volatile__ ("mftb %0\n\t"
156 "cmpwi %0,0\n\t"
157 "beq- $-8"
158 : "=r" (retval));
159 #else
160 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
161 unsigned long junk;
162 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
163 "mfspr %L0,268\n\t" /* mftb */
164 "mfspr %0,269\n\t" /* mftbu */
165 "cmpw %0,%1\n\t"
166 "bne $-16"
167 : "=r" (retval), "=r" (junk));
168 #endif
169 return retval;
172 #elif defined(__i386__)
174 static inline int64_t cpu_get_real_ticks(void)
176 int64_t val;
177 asm volatile ("rdtsc" : "=A" (val));
178 return val;
181 #elif defined(__x86_64__)
183 static inline int64_t cpu_get_real_ticks(void)
185 uint32_t low,high;
186 int64_t val;
187 asm volatile("rdtsc" : "=a" (low), "=d" (high));
188 val = high;
189 val <<= 32;
190 val |= low;
191 return val;
194 #elif defined(__hppa__)
196 static inline int64_t cpu_get_real_ticks(void)
198 int val;
199 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
200 return val;
203 #elif defined(__ia64)
205 static inline int64_t cpu_get_real_ticks(void)
207 int64_t val;
208 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
209 return val;
212 #elif defined(__s390__)
214 static inline int64_t cpu_get_real_ticks(void)
216 int64_t val;
217 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
218 return val;
221 #elif defined(__sparc__)
223 static inline int64_t cpu_get_real_ticks (void)
225 #if defined(_LP64)
226 uint64_t rval;
227 asm volatile("rd %%tick,%0" : "=r"(rval));
228 return rval;
229 #else
230 /* We need an %o or %g register for this. For recent enough gcc
231 there is an "h" constraint for that. Don't bother with that. */
232 union {
233 uint64_t i64;
234 struct {
235 uint32_t high;
236 uint32_t low;
237 } i32;
238 } rval;
239 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
240 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
241 return rval.i64;
242 #endif
245 #elif defined(__mips__) && \
246 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
248 * binutils wants to use rdhwr only on mips32r2
249 * but as linux kernel emulate it, it's fine
250 * to use it.
253 #define MIPS_RDHWR(rd, value) { \
254 __asm__ __volatile__ (".set push\n\t" \
255 ".set mips32r2\n\t" \
256 "rdhwr %0, "rd"\n\t" \
257 ".set pop" \
258 : "=r" (value)); \
261 static inline int64_t cpu_get_real_ticks(void)
263 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
264 uint32_t count;
265 static uint32_t cyc_per_count = 0;
267 if (!cyc_per_count) {
268 MIPS_RDHWR("$3", cyc_per_count);
271 MIPS_RDHWR("$2", count);
272 return (int64_t)(count * cyc_per_count);
275 #elif defined(__alpha__)
277 static inline int64_t cpu_get_real_ticks(void)
279 uint64_t cc;
280 uint32_t cur, ofs;
282 asm volatile("rpcc %0" : "=r"(cc));
283 cur = cc;
284 ofs = cc >> 32;
285 return cur - ofs;
288 #else
289 /* The host CPU doesn't have an easily accessible cycle counter.
290 Just return a monotonically increasing value. This will be
291 totally wrong, but hopefully better than nothing. */
292 static inline int64_t cpu_get_real_ticks (void)
294 static int64_t ticks = 0;
295 return ticks++;
297 #endif
299 #ifdef CONFIG_PROFILER
300 static inline int64_t profile_getclock(void)
302 return cpu_get_real_ticks();
305 extern int64_t qemu_time, qemu_time_start;
306 extern int64_t tlb_flush_time;
307 extern int64_t dev_time;
308 #endif
310 #endif