tcg: signal-free qemu_cpu_kick
[qemu.git] / cpus.c
blob4f3374e201c8c72c8235763ddcf4ec62e869fb80
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/sysemu.h"
32 #include "exec/gdbstub.h"
33 #include "sysemu/dma.h"
34 #include "sysemu/kvm.h"
35 #include "qmp-commands.h"
37 #include "qemu/thread.h"
38 #include "sysemu/cpus.h"
39 #include "sysemu/qtest.h"
40 #include "qemu/main-loop.h"
41 #include "qemu/bitmap.h"
42 #include "qemu/seqlock.h"
43 #include "qapi-event.h"
44 #include "hw/nmi.h"
46 #ifndef _WIN32
47 #include "qemu/compatfd.h"
48 #endif
50 #ifdef CONFIG_LINUX
52 #include <sys/prctl.h>
54 #ifndef PR_MCE_KILL
55 #define PR_MCE_KILL 33
56 #endif
58 #ifndef PR_MCE_KILL_SET
59 #define PR_MCE_KILL_SET 1
60 #endif
62 #ifndef PR_MCE_KILL_EARLY
63 #define PR_MCE_KILL_EARLY 1
64 #endif
66 #endif /* CONFIG_LINUX */
68 static CPUState *next_cpu;
69 int64_t max_delay;
70 int64_t max_advance;
72 bool cpu_is_stopped(CPUState *cpu)
74 return cpu->stopped || !runstate_is_running();
77 static bool cpu_thread_is_idle(CPUState *cpu)
79 if (cpu->stop || cpu->queued_work_first) {
80 return false;
82 if (cpu_is_stopped(cpu)) {
83 return true;
85 if (!cpu->halted || cpu_has_work(cpu) ||
86 kvm_halt_in_kernel()) {
87 return false;
89 return true;
92 static bool all_cpu_threads_idle(void)
94 CPUState *cpu;
96 CPU_FOREACH(cpu) {
97 if (!cpu_thread_is_idle(cpu)) {
98 return false;
101 return true;
104 /***********************************************************/
105 /* guest cycle counter */
107 /* Protected by TimersState seqlock */
109 static bool icount_sleep = true;
110 static int64_t vm_clock_warp_start = -1;
111 /* Conversion factor from emulated instructions to virtual clock ticks. */
112 static int icount_time_shift;
113 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
114 #define MAX_ICOUNT_SHIFT 10
116 static QEMUTimer *icount_rt_timer;
117 static QEMUTimer *icount_vm_timer;
118 static QEMUTimer *icount_warp_timer;
120 typedef struct TimersState {
121 /* Protected by BQL. */
122 int64_t cpu_ticks_prev;
123 int64_t cpu_ticks_offset;
125 /* cpu_clock_offset can be read out of BQL, so protect it with
126 * this lock.
128 QemuSeqLock vm_clock_seqlock;
129 int64_t cpu_clock_offset;
130 int32_t cpu_ticks_enabled;
131 int64_t dummy;
133 /* Compensate for varying guest execution speed. */
134 int64_t qemu_icount_bias;
135 /* Only written by TCG thread */
136 int64_t qemu_icount;
137 } TimersState;
139 static TimersState timers_state;
141 int64_t cpu_get_icount_raw(void)
143 int64_t icount;
144 CPUState *cpu = current_cpu;
146 icount = timers_state.qemu_icount;
147 if (cpu) {
148 if (!cpu->can_do_io) {
149 fprintf(stderr, "Bad icount read\n");
150 exit(1);
152 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
154 return icount;
157 /* Return the virtual CPU time, based on the instruction counter. */
158 static int64_t cpu_get_icount_locked(void)
160 int64_t icount = cpu_get_icount_raw();
161 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
164 int64_t cpu_get_icount(void)
166 int64_t icount;
167 unsigned start;
169 do {
170 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
171 icount = cpu_get_icount_locked();
172 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
174 return icount;
177 int64_t cpu_icount_to_ns(int64_t icount)
179 return icount << icount_time_shift;
182 /* return the host CPU cycle counter and handle stop/restart */
183 /* Caller must hold the BQL */
184 int64_t cpu_get_ticks(void)
186 int64_t ticks;
188 if (use_icount) {
189 return cpu_get_icount();
192 ticks = timers_state.cpu_ticks_offset;
193 if (timers_state.cpu_ticks_enabled) {
194 ticks += cpu_get_real_ticks();
197 if (timers_state.cpu_ticks_prev > ticks) {
198 /* Note: non increasing ticks may happen if the host uses
199 software suspend */
200 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
201 ticks = timers_state.cpu_ticks_prev;
204 timers_state.cpu_ticks_prev = ticks;
205 return ticks;
208 static int64_t cpu_get_clock_locked(void)
210 int64_t ticks;
212 ticks = timers_state.cpu_clock_offset;
213 if (timers_state.cpu_ticks_enabled) {
214 ticks += get_clock();
217 return ticks;
220 /* return the host CPU monotonic timer and handle stop/restart */
221 int64_t cpu_get_clock(void)
223 int64_t ti;
224 unsigned start;
226 do {
227 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
228 ti = cpu_get_clock_locked();
229 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
231 return ti;
234 /* enable cpu_get_ticks()
235 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
237 void cpu_enable_ticks(void)
239 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
240 seqlock_write_lock(&timers_state.vm_clock_seqlock);
241 if (!timers_state.cpu_ticks_enabled) {
242 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
243 timers_state.cpu_clock_offset -= get_clock();
244 timers_state.cpu_ticks_enabled = 1;
246 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
249 /* disable cpu_get_ticks() : the clock is stopped. You must not call
250 * cpu_get_ticks() after that.
251 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
253 void cpu_disable_ticks(void)
255 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
256 seqlock_write_lock(&timers_state.vm_clock_seqlock);
257 if (timers_state.cpu_ticks_enabled) {
258 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
259 timers_state.cpu_clock_offset = cpu_get_clock_locked();
260 timers_state.cpu_ticks_enabled = 0;
262 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
265 /* Correlation between real and virtual time is always going to be
266 fairly approximate, so ignore small variation.
267 When the guest is idle real and virtual time will be aligned in
268 the IO wait loop. */
269 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
271 static void icount_adjust(void)
273 int64_t cur_time;
274 int64_t cur_icount;
275 int64_t delta;
277 /* Protected by TimersState mutex. */
278 static int64_t last_delta;
280 /* If the VM is not running, then do nothing. */
281 if (!runstate_is_running()) {
282 return;
285 seqlock_write_lock(&timers_state.vm_clock_seqlock);
286 cur_time = cpu_get_clock_locked();
287 cur_icount = cpu_get_icount_locked();
289 delta = cur_icount - cur_time;
290 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
291 if (delta > 0
292 && last_delta + ICOUNT_WOBBLE < delta * 2
293 && icount_time_shift > 0) {
294 /* The guest is getting too far ahead. Slow time down. */
295 icount_time_shift--;
297 if (delta < 0
298 && last_delta - ICOUNT_WOBBLE > delta * 2
299 && icount_time_shift < MAX_ICOUNT_SHIFT) {
300 /* The guest is getting too far behind. Speed time up. */
301 icount_time_shift++;
303 last_delta = delta;
304 timers_state.qemu_icount_bias = cur_icount
305 - (timers_state.qemu_icount << icount_time_shift);
306 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
309 static void icount_adjust_rt(void *opaque)
311 timer_mod(icount_rt_timer,
312 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
313 icount_adjust();
316 static void icount_adjust_vm(void *opaque)
318 timer_mod(icount_vm_timer,
319 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
320 get_ticks_per_sec() / 10);
321 icount_adjust();
324 static int64_t qemu_icount_round(int64_t count)
326 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
329 static void icount_warp_rt(void *opaque)
331 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
332 * changes from -1 to another value, so the race here is okay.
334 if (atomic_read(&vm_clock_warp_start) == -1) {
335 return;
338 seqlock_write_lock(&timers_state.vm_clock_seqlock);
339 if (runstate_is_running()) {
340 int64_t clock = cpu_get_clock_locked();
341 int64_t warp_delta;
343 warp_delta = clock - vm_clock_warp_start;
344 if (use_icount == 2) {
346 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
347 * far ahead of real time.
349 int64_t cur_icount = cpu_get_icount_locked();
350 int64_t delta = clock - cur_icount;
351 warp_delta = MIN(warp_delta, delta);
353 timers_state.qemu_icount_bias += warp_delta;
355 vm_clock_warp_start = -1;
356 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
358 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
359 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
363 void qtest_clock_warp(int64_t dest)
365 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
366 AioContext *aio_context;
367 assert(qtest_enabled());
368 aio_context = qemu_get_aio_context();
369 while (clock < dest) {
370 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
371 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
373 seqlock_write_lock(&timers_state.vm_clock_seqlock);
374 timers_state.qemu_icount_bias += warp;
375 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
377 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
378 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
379 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
381 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
384 void qemu_clock_warp(QEMUClockType type)
386 int64_t clock;
387 int64_t deadline;
390 * There are too many global variables to make the "warp" behavior
391 * applicable to other clocks. But a clock argument removes the
392 * need for if statements all over the place.
394 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
395 return;
398 if (icount_sleep) {
400 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
401 * This ensures that the deadline for the timer is computed correctly
402 * below.
403 * This also makes sure that the insn counter is synchronized before
404 * the CPU starts running, in case the CPU is woken by an event other
405 * than the earliest QEMU_CLOCK_VIRTUAL timer.
407 icount_warp_rt(NULL);
408 timer_del(icount_warp_timer);
410 if (!all_cpu_threads_idle()) {
411 return;
414 if (qtest_enabled()) {
415 /* When testing, qtest commands advance icount. */
416 return;
419 /* We want to use the earliest deadline from ALL vm_clocks */
420 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
421 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
422 if (deadline < 0) {
423 static bool notified;
424 if (!icount_sleep && !notified) {
425 error_report("WARNING: icount sleep disabled and no active timers");
426 notified = true;
428 return;
431 if (deadline > 0) {
433 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
434 * sleep. Otherwise, the CPU might be waiting for a future timer
435 * interrupt to wake it up, but the interrupt never comes because
436 * the vCPU isn't running any insns and thus doesn't advance the
437 * QEMU_CLOCK_VIRTUAL.
439 if (!icount_sleep) {
441 * We never let VCPUs sleep in no sleep icount mode.
442 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
443 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
444 * It is useful when we want a deterministic execution time,
445 * isolated from host latencies.
447 seqlock_write_lock(&timers_state.vm_clock_seqlock);
448 timers_state.qemu_icount_bias += deadline;
449 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
450 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
451 } else {
453 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
454 * "real" time, (related to the time left until the next event) has
455 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
456 * This avoids that the warps are visible externally; for example,
457 * you will not be sending network packets continuously instead of
458 * every 100ms.
460 seqlock_write_lock(&timers_state.vm_clock_seqlock);
461 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
462 vm_clock_warp_start = clock;
464 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
465 timer_mod_anticipate(icount_warp_timer, clock + deadline);
467 } else if (deadline == 0) {
468 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
472 static bool icount_state_needed(void *opaque)
474 return use_icount;
478 * This is a subsection for icount migration.
480 static const VMStateDescription icount_vmstate_timers = {
481 .name = "timer/icount",
482 .version_id = 1,
483 .minimum_version_id = 1,
484 .needed = icount_state_needed,
485 .fields = (VMStateField[]) {
486 VMSTATE_INT64(qemu_icount_bias, TimersState),
487 VMSTATE_INT64(qemu_icount, TimersState),
488 VMSTATE_END_OF_LIST()
492 static const VMStateDescription vmstate_timers = {
493 .name = "timer",
494 .version_id = 2,
495 .minimum_version_id = 1,
496 .fields = (VMStateField[]) {
497 VMSTATE_INT64(cpu_ticks_offset, TimersState),
498 VMSTATE_INT64(dummy, TimersState),
499 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
500 VMSTATE_END_OF_LIST()
502 .subsections = (const VMStateDescription*[]) {
503 &icount_vmstate_timers,
504 NULL
508 void cpu_ticks_init(void)
510 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
511 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
514 void configure_icount(QemuOpts *opts, Error **errp)
516 const char *option;
517 char *rem_str = NULL;
519 option = qemu_opt_get(opts, "shift");
520 if (!option) {
521 if (qemu_opt_get(opts, "align") != NULL) {
522 error_setg(errp, "Please specify shift option when using align");
524 return;
527 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
528 if (icount_sleep) {
529 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
530 icount_warp_rt, NULL);
533 icount_align_option = qemu_opt_get_bool(opts, "align", false);
535 if (icount_align_option && !icount_sleep) {
536 error_setg(errp, "align=on and sleep=no are incompatible");
538 if (strcmp(option, "auto") != 0) {
539 errno = 0;
540 icount_time_shift = strtol(option, &rem_str, 0);
541 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
542 error_setg(errp, "icount: Invalid shift value");
544 use_icount = 1;
545 return;
546 } else if (icount_align_option) {
547 error_setg(errp, "shift=auto and align=on are incompatible");
548 } else if (!icount_sleep) {
549 error_setg(errp, "shift=auto and sleep=no are incompatible");
552 use_icount = 2;
554 /* 125MIPS seems a reasonable initial guess at the guest speed.
555 It will be corrected fairly quickly anyway. */
556 icount_time_shift = 3;
558 /* Have both realtime and virtual time triggers for speed adjustment.
559 The realtime trigger catches emulated time passing too slowly,
560 the virtual time trigger catches emulated time passing too fast.
561 Realtime triggers occur even when idle, so use them less frequently
562 than VM triggers. */
563 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
564 icount_adjust_rt, NULL);
565 timer_mod(icount_rt_timer,
566 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
567 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
568 icount_adjust_vm, NULL);
569 timer_mod(icount_vm_timer,
570 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
571 get_ticks_per_sec() / 10);
574 /***********************************************************/
575 void hw_error(const char *fmt, ...)
577 va_list ap;
578 CPUState *cpu;
580 va_start(ap, fmt);
581 fprintf(stderr, "qemu: hardware error: ");
582 vfprintf(stderr, fmt, ap);
583 fprintf(stderr, "\n");
584 CPU_FOREACH(cpu) {
585 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
586 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
588 va_end(ap);
589 abort();
592 void cpu_synchronize_all_states(void)
594 CPUState *cpu;
596 CPU_FOREACH(cpu) {
597 cpu_synchronize_state(cpu);
601 void cpu_synchronize_all_post_reset(void)
603 CPUState *cpu;
605 CPU_FOREACH(cpu) {
606 cpu_synchronize_post_reset(cpu);
610 void cpu_synchronize_all_post_init(void)
612 CPUState *cpu;
614 CPU_FOREACH(cpu) {
615 cpu_synchronize_post_init(cpu);
619 void cpu_clean_all_dirty(void)
621 CPUState *cpu;
623 CPU_FOREACH(cpu) {
624 cpu_clean_state(cpu);
628 static int do_vm_stop(RunState state)
630 int ret = 0;
632 if (runstate_is_running()) {
633 cpu_disable_ticks();
634 pause_all_vcpus();
635 runstate_set(state);
636 vm_state_notify(0, state);
637 qapi_event_send_stop(&error_abort);
640 bdrv_drain_all();
641 ret = bdrv_flush_all();
643 return ret;
646 static bool cpu_can_run(CPUState *cpu)
648 if (cpu->stop) {
649 return false;
651 if (cpu_is_stopped(cpu)) {
652 return false;
654 return true;
657 static void cpu_handle_guest_debug(CPUState *cpu)
659 gdb_set_stop_cpu(cpu);
660 qemu_system_debug_request();
661 cpu->stopped = true;
664 #ifdef CONFIG_LINUX
665 static void sigbus_reraise(void)
667 sigset_t set;
668 struct sigaction action;
670 memset(&action, 0, sizeof(action));
671 action.sa_handler = SIG_DFL;
672 if (!sigaction(SIGBUS, &action, NULL)) {
673 raise(SIGBUS);
674 sigemptyset(&set);
675 sigaddset(&set, SIGBUS);
676 sigprocmask(SIG_UNBLOCK, &set, NULL);
678 perror("Failed to re-raise SIGBUS!\n");
679 abort();
682 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
683 void *ctx)
685 if (kvm_on_sigbus(siginfo->ssi_code,
686 (void *)(intptr_t)siginfo->ssi_addr)) {
687 sigbus_reraise();
691 static void qemu_init_sigbus(void)
693 struct sigaction action;
695 memset(&action, 0, sizeof(action));
696 action.sa_flags = SA_SIGINFO;
697 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
698 sigaction(SIGBUS, &action, NULL);
700 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
703 static void qemu_kvm_eat_signals(CPUState *cpu)
705 struct timespec ts = { 0, 0 };
706 siginfo_t siginfo;
707 sigset_t waitset;
708 sigset_t chkset;
709 int r;
711 sigemptyset(&waitset);
712 sigaddset(&waitset, SIG_IPI);
713 sigaddset(&waitset, SIGBUS);
715 do {
716 r = sigtimedwait(&waitset, &siginfo, &ts);
717 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
718 perror("sigtimedwait");
719 exit(1);
722 switch (r) {
723 case SIGBUS:
724 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
725 sigbus_reraise();
727 break;
728 default:
729 break;
732 r = sigpending(&chkset);
733 if (r == -1) {
734 perror("sigpending");
735 exit(1);
737 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
740 #else /* !CONFIG_LINUX */
742 static void qemu_init_sigbus(void)
746 static void qemu_kvm_eat_signals(CPUState *cpu)
749 #endif /* !CONFIG_LINUX */
751 #ifndef _WIN32
752 static void dummy_signal(int sig)
756 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
758 int r;
759 sigset_t set;
760 struct sigaction sigact;
762 memset(&sigact, 0, sizeof(sigact));
763 sigact.sa_handler = dummy_signal;
764 sigaction(SIG_IPI, &sigact, NULL);
766 pthread_sigmask(SIG_BLOCK, NULL, &set);
767 sigdelset(&set, SIG_IPI);
768 sigdelset(&set, SIGBUS);
769 r = kvm_set_signal_mask(cpu, &set);
770 if (r) {
771 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
772 exit(1);
776 #else /* _WIN32 */
777 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
779 abort();
781 #endif /* _WIN32 */
783 static QemuMutex qemu_global_mutex;
784 static QemuCond qemu_io_proceeded_cond;
785 static unsigned iothread_requesting_mutex;
787 static QemuThread io_thread;
789 static QemuThread *tcg_cpu_thread;
790 static QemuCond *tcg_halt_cond;
792 /* cpu creation */
793 static QemuCond qemu_cpu_cond;
794 /* system init */
795 static QemuCond qemu_pause_cond;
796 static QemuCond qemu_work_cond;
798 void qemu_init_cpu_loop(void)
800 qemu_init_sigbus();
801 qemu_cond_init(&qemu_cpu_cond);
802 qemu_cond_init(&qemu_pause_cond);
803 qemu_cond_init(&qemu_work_cond);
804 qemu_cond_init(&qemu_io_proceeded_cond);
805 qemu_mutex_init(&qemu_global_mutex);
807 qemu_thread_get_self(&io_thread);
810 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
812 struct qemu_work_item wi;
814 if (qemu_cpu_is_self(cpu)) {
815 func(data);
816 return;
819 wi.func = func;
820 wi.data = data;
821 wi.free = false;
822 if (cpu->queued_work_first == NULL) {
823 cpu->queued_work_first = &wi;
824 } else {
825 cpu->queued_work_last->next = &wi;
827 cpu->queued_work_last = &wi;
828 wi.next = NULL;
829 wi.done = false;
831 qemu_cpu_kick(cpu);
832 while (!wi.done) {
833 CPUState *self_cpu = current_cpu;
835 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
836 current_cpu = self_cpu;
840 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
842 struct qemu_work_item *wi;
844 if (qemu_cpu_is_self(cpu)) {
845 func(data);
846 return;
849 wi = g_malloc0(sizeof(struct qemu_work_item));
850 wi->func = func;
851 wi->data = data;
852 wi->free = true;
853 if (cpu->queued_work_first == NULL) {
854 cpu->queued_work_first = wi;
855 } else {
856 cpu->queued_work_last->next = wi;
858 cpu->queued_work_last = wi;
859 wi->next = NULL;
860 wi->done = false;
862 qemu_cpu_kick(cpu);
865 static void flush_queued_work(CPUState *cpu)
867 struct qemu_work_item *wi;
869 if (cpu->queued_work_first == NULL) {
870 return;
873 while ((wi = cpu->queued_work_first)) {
874 cpu->queued_work_first = wi->next;
875 wi->func(wi->data);
876 wi->done = true;
877 if (wi->free) {
878 g_free(wi);
881 cpu->queued_work_last = NULL;
882 qemu_cond_broadcast(&qemu_work_cond);
885 static void qemu_wait_io_event_common(CPUState *cpu)
887 if (cpu->stop) {
888 cpu->stop = false;
889 cpu->stopped = true;
890 qemu_cond_signal(&qemu_pause_cond);
892 flush_queued_work(cpu);
893 cpu->thread_kicked = false;
896 static void qemu_tcg_wait_io_event(void)
898 CPUState *cpu;
900 while (all_cpu_threads_idle()) {
901 /* Start accounting real time to the virtual clock if the CPUs
902 are idle. */
903 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
904 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
907 while (iothread_requesting_mutex) {
908 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
911 CPU_FOREACH(cpu) {
912 qemu_wait_io_event_common(cpu);
916 static void qemu_kvm_wait_io_event(CPUState *cpu)
918 while (cpu_thread_is_idle(cpu)) {
919 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
922 qemu_kvm_eat_signals(cpu);
923 qemu_wait_io_event_common(cpu);
926 static void *qemu_kvm_cpu_thread_fn(void *arg)
928 CPUState *cpu = arg;
929 int r;
931 rcu_register_thread();
933 qemu_mutex_lock_iothread();
934 qemu_thread_get_self(cpu->thread);
935 cpu->thread_id = qemu_get_thread_id();
936 cpu->can_do_io = 1;
937 current_cpu = cpu;
939 r = kvm_init_vcpu(cpu);
940 if (r < 0) {
941 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
942 exit(1);
945 qemu_kvm_init_cpu_signals(cpu);
947 /* signal CPU creation */
948 cpu->created = true;
949 qemu_cond_signal(&qemu_cpu_cond);
951 while (1) {
952 if (cpu_can_run(cpu)) {
953 r = kvm_cpu_exec(cpu);
954 if (r == EXCP_DEBUG) {
955 cpu_handle_guest_debug(cpu);
958 qemu_kvm_wait_io_event(cpu);
961 return NULL;
964 static void *qemu_dummy_cpu_thread_fn(void *arg)
966 #ifdef _WIN32
967 fprintf(stderr, "qtest is not supported under Windows\n");
968 exit(1);
969 #else
970 CPUState *cpu = arg;
971 sigset_t waitset;
972 int r;
974 rcu_register_thread();
976 qemu_mutex_lock_iothread();
977 qemu_thread_get_self(cpu->thread);
978 cpu->thread_id = qemu_get_thread_id();
979 cpu->can_do_io = 1;
981 sigemptyset(&waitset);
982 sigaddset(&waitset, SIG_IPI);
984 /* signal CPU creation */
985 cpu->created = true;
986 qemu_cond_signal(&qemu_cpu_cond);
988 current_cpu = cpu;
989 while (1) {
990 current_cpu = NULL;
991 qemu_mutex_unlock_iothread();
992 do {
993 int sig;
994 r = sigwait(&waitset, &sig);
995 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
996 if (r == -1) {
997 perror("sigwait");
998 exit(1);
1000 qemu_mutex_lock_iothread();
1001 current_cpu = cpu;
1002 qemu_wait_io_event_common(cpu);
1005 return NULL;
1006 #endif
1009 static void tcg_exec_all(void);
1011 static void *qemu_tcg_cpu_thread_fn(void *arg)
1013 CPUState *cpu = arg;
1015 rcu_register_thread();
1017 qemu_mutex_lock_iothread();
1018 qemu_thread_get_self(cpu->thread);
1020 CPU_FOREACH(cpu) {
1021 cpu->thread_id = qemu_get_thread_id();
1022 cpu->created = true;
1023 cpu->can_do_io = 1;
1025 qemu_cond_signal(&qemu_cpu_cond);
1027 /* wait for initial kick-off after machine start */
1028 while (first_cpu->stopped) {
1029 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
1031 /* process any pending work */
1032 CPU_FOREACH(cpu) {
1033 qemu_wait_io_event_common(cpu);
1037 /* process any pending work */
1038 atomic_mb_set(&exit_request, 1);
1040 while (1) {
1041 tcg_exec_all();
1043 if (use_icount) {
1044 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1046 if (deadline == 0) {
1047 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1050 qemu_tcg_wait_io_event();
1053 return NULL;
1056 static void qemu_cpu_kick_thread(CPUState *cpu)
1058 #ifndef _WIN32
1059 int err;
1061 if (cpu->thread_kicked) {
1062 return;
1064 cpu->thread_kicked = true;
1065 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1066 if (err) {
1067 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1068 exit(1);
1070 #else /* _WIN32 */
1071 abort();
1072 #endif
1075 static void qemu_cpu_kick_no_halt(void)
1077 CPUState *cpu;
1078 /* Ensure whatever caused the exit has reached the CPU threads before
1079 * writing exit_request.
1081 atomic_mb_set(&exit_request, 1);
1082 cpu = atomic_mb_read(&tcg_current_cpu);
1083 if (cpu) {
1084 cpu_exit(cpu);
1088 void qemu_cpu_kick(CPUState *cpu)
1090 qemu_cond_broadcast(cpu->halt_cond);
1091 if (tcg_enabled()) {
1092 qemu_cpu_kick_no_halt();
1093 } else {
1094 qemu_cpu_kick_thread(cpu);
1098 void qemu_cpu_kick_self(void)
1100 assert(current_cpu);
1101 qemu_cpu_kick_thread(current_cpu);
1104 bool qemu_cpu_is_self(CPUState *cpu)
1106 return qemu_thread_is_self(cpu->thread);
1109 bool qemu_in_vcpu_thread(void)
1111 return current_cpu && qemu_cpu_is_self(current_cpu);
1114 static __thread bool iothread_locked = false;
1116 bool qemu_mutex_iothread_locked(void)
1118 return iothread_locked;
1121 void qemu_mutex_lock_iothread(void)
1123 atomic_inc(&iothread_requesting_mutex);
1124 /* In the simple case there is no need to bump the VCPU thread out of
1125 * TCG code execution.
1127 if (!tcg_enabled() || qemu_in_vcpu_thread() ||
1128 !first_cpu || !first_cpu->created) {
1129 qemu_mutex_lock(&qemu_global_mutex);
1130 atomic_dec(&iothread_requesting_mutex);
1131 } else {
1132 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1133 qemu_cpu_kick_no_halt();
1134 qemu_mutex_lock(&qemu_global_mutex);
1136 atomic_dec(&iothread_requesting_mutex);
1137 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1139 iothread_locked = true;
1142 void qemu_mutex_unlock_iothread(void)
1144 iothread_locked = false;
1145 qemu_mutex_unlock(&qemu_global_mutex);
1148 static int all_vcpus_paused(void)
1150 CPUState *cpu;
1152 CPU_FOREACH(cpu) {
1153 if (!cpu->stopped) {
1154 return 0;
1158 return 1;
1161 void pause_all_vcpus(void)
1163 CPUState *cpu;
1165 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1166 CPU_FOREACH(cpu) {
1167 cpu->stop = true;
1168 qemu_cpu_kick(cpu);
1171 if (qemu_in_vcpu_thread()) {
1172 cpu_stop_current();
1173 if (!kvm_enabled()) {
1174 CPU_FOREACH(cpu) {
1175 cpu->stop = false;
1176 cpu->stopped = true;
1178 return;
1182 while (!all_vcpus_paused()) {
1183 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1184 CPU_FOREACH(cpu) {
1185 qemu_cpu_kick(cpu);
1190 void cpu_resume(CPUState *cpu)
1192 cpu->stop = false;
1193 cpu->stopped = false;
1194 qemu_cpu_kick(cpu);
1197 void resume_all_vcpus(void)
1199 CPUState *cpu;
1201 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1202 CPU_FOREACH(cpu) {
1203 cpu_resume(cpu);
1207 /* For temporary buffers for forming a name */
1208 #define VCPU_THREAD_NAME_SIZE 16
1210 static void qemu_tcg_init_vcpu(CPUState *cpu)
1212 char thread_name[VCPU_THREAD_NAME_SIZE];
1214 tcg_cpu_address_space_init(cpu, cpu->as);
1216 /* share a single thread for all cpus with TCG */
1217 if (!tcg_cpu_thread) {
1218 cpu->thread = g_malloc0(sizeof(QemuThread));
1219 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1220 qemu_cond_init(cpu->halt_cond);
1221 tcg_halt_cond = cpu->halt_cond;
1222 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1223 cpu->cpu_index);
1224 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1225 cpu, QEMU_THREAD_JOINABLE);
1226 #ifdef _WIN32
1227 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1228 #endif
1229 while (!cpu->created) {
1230 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1232 tcg_cpu_thread = cpu->thread;
1233 } else {
1234 cpu->thread = tcg_cpu_thread;
1235 cpu->halt_cond = tcg_halt_cond;
1239 static void qemu_kvm_start_vcpu(CPUState *cpu)
1241 char thread_name[VCPU_THREAD_NAME_SIZE];
1243 cpu->thread = g_malloc0(sizeof(QemuThread));
1244 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1245 qemu_cond_init(cpu->halt_cond);
1246 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1247 cpu->cpu_index);
1248 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1249 cpu, QEMU_THREAD_JOINABLE);
1250 while (!cpu->created) {
1251 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1255 static void qemu_dummy_start_vcpu(CPUState *cpu)
1257 char thread_name[VCPU_THREAD_NAME_SIZE];
1259 cpu->thread = g_malloc0(sizeof(QemuThread));
1260 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1261 qemu_cond_init(cpu->halt_cond);
1262 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1263 cpu->cpu_index);
1264 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1265 QEMU_THREAD_JOINABLE);
1266 while (!cpu->created) {
1267 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1271 void qemu_init_vcpu(CPUState *cpu)
1273 cpu->nr_cores = smp_cores;
1274 cpu->nr_threads = smp_threads;
1275 cpu->stopped = true;
1276 if (kvm_enabled()) {
1277 qemu_kvm_start_vcpu(cpu);
1278 } else if (tcg_enabled()) {
1279 qemu_tcg_init_vcpu(cpu);
1280 } else {
1281 qemu_dummy_start_vcpu(cpu);
1285 void cpu_stop_current(void)
1287 if (current_cpu) {
1288 current_cpu->stop = false;
1289 current_cpu->stopped = true;
1290 cpu_exit(current_cpu);
1291 qemu_cond_signal(&qemu_pause_cond);
1295 int vm_stop(RunState state)
1297 if (qemu_in_vcpu_thread()) {
1298 qemu_system_vmstop_request_prepare();
1299 qemu_system_vmstop_request(state);
1301 * FIXME: should not return to device code in case
1302 * vm_stop() has been requested.
1304 cpu_stop_current();
1305 return 0;
1308 return do_vm_stop(state);
1311 /* does a state transition even if the VM is already stopped,
1312 current state is forgotten forever */
1313 int vm_stop_force_state(RunState state)
1315 if (runstate_is_running()) {
1316 return vm_stop(state);
1317 } else {
1318 runstate_set(state);
1319 /* Make sure to return an error if the flush in a previous vm_stop()
1320 * failed. */
1321 return bdrv_flush_all();
1325 static int tcg_cpu_exec(CPUState *cpu)
1327 int ret;
1328 #ifdef CONFIG_PROFILER
1329 int64_t ti;
1330 #endif
1332 #ifdef CONFIG_PROFILER
1333 ti = profile_getclock();
1334 #endif
1335 if (use_icount) {
1336 int64_t count;
1337 int64_t deadline;
1338 int decr;
1339 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1340 + cpu->icount_extra);
1341 cpu->icount_decr.u16.low = 0;
1342 cpu->icount_extra = 0;
1343 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1345 /* Maintain prior (possibly buggy) behaviour where if no deadline
1346 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1347 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1348 * nanoseconds.
1350 if ((deadline < 0) || (deadline > INT32_MAX)) {
1351 deadline = INT32_MAX;
1354 count = qemu_icount_round(deadline);
1355 timers_state.qemu_icount += count;
1356 decr = (count > 0xffff) ? 0xffff : count;
1357 count -= decr;
1358 cpu->icount_decr.u16.low = decr;
1359 cpu->icount_extra = count;
1361 ret = cpu_exec(cpu);
1362 #ifdef CONFIG_PROFILER
1363 tcg_time += profile_getclock() - ti;
1364 #endif
1365 if (use_icount) {
1366 /* Fold pending instructions back into the
1367 instruction counter, and clear the interrupt flag. */
1368 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1369 + cpu->icount_extra);
1370 cpu->icount_decr.u32 = 0;
1371 cpu->icount_extra = 0;
1373 return ret;
1376 static void tcg_exec_all(void)
1378 int r;
1380 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1381 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1383 if (next_cpu == NULL) {
1384 next_cpu = first_cpu;
1386 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1387 CPUState *cpu = next_cpu;
1389 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1390 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1392 if (cpu_can_run(cpu)) {
1393 r = tcg_cpu_exec(cpu);
1394 if (r == EXCP_DEBUG) {
1395 cpu_handle_guest_debug(cpu);
1396 break;
1398 } else if (cpu->stop || cpu->stopped) {
1399 break;
1403 /* Pairs with smp_wmb in qemu_cpu_kick. */
1404 atomic_mb_set(&exit_request, 0);
1407 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1409 /* XXX: implement xxx_cpu_list for targets that still miss it */
1410 #if defined(cpu_list)
1411 cpu_list(f, cpu_fprintf);
1412 #endif
1415 CpuInfoList *qmp_query_cpus(Error **errp)
1417 CpuInfoList *head = NULL, *cur_item = NULL;
1418 CPUState *cpu;
1420 CPU_FOREACH(cpu) {
1421 CpuInfoList *info;
1422 #if defined(TARGET_I386)
1423 X86CPU *x86_cpu = X86_CPU(cpu);
1424 CPUX86State *env = &x86_cpu->env;
1425 #elif defined(TARGET_PPC)
1426 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1427 CPUPPCState *env = &ppc_cpu->env;
1428 #elif defined(TARGET_SPARC)
1429 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1430 CPUSPARCState *env = &sparc_cpu->env;
1431 #elif defined(TARGET_MIPS)
1432 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1433 CPUMIPSState *env = &mips_cpu->env;
1434 #elif defined(TARGET_TRICORE)
1435 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1436 CPUTriCoreState *env = &tricore_cpu->env;
1437 #endif
1439 cpu_synchronize_state(cpu);
1441 info = g_malloc0(sizeof(*info));
1442 info->value = g_malloc0(sizeof(*info->value));
1443 info->value->CPU = cpu->cpu_index;
1444 info->value->current = (cpu == first_cpu);
1445 info->value->halted = cpu->halted;
1446 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
1447 info->value->thread_id = cpu->thread_id;
1448 #if defined(TARGET_I386)
1449 info->value->has_pc = true;
1450 info->value->pc = env->eip + env->segs[R_CS].base;
1451 #elif defined(TARGET_PPC)
1452 info->value->has_nip = true;
1453 info->value->nip = env->nip;
1454 #elif defined(TARGET_SPARC)
1455 info->value->has_pc = true;
1456 info->value->pc = env->pc;
1457 info->value->has_npc = true;
1458 info->value->npc = env->npc;
1459 #elif defined(TARGET_MIPS)
1460 info->value->has_PC = true;
1461 info->value->PC = env->active_tc.PC;
1462 #elif defined(TARGET_TRICORE)
1463 info->value->has_PC = true;
1464 info->value->PC = env->PC;
1465 #endif
1467 /* XXX: waiting for the qapi to support GSList */
1468 if (!cur_item) {
1469 head = cur_item = info;
1470 } else {
1471 cur_item->next = info;
1472 cur_item = info;
1476 return head;
1479 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1480 bool has_cpu, int64_t cpu_index, Error **errp)
1482 FILE *f;
1483 uint32_t l;
1484 CPUState *cpu;
1485 uint8_t buf[1024];
1486 int64_t orig_addr = addr, orig_size = size;
1488 if (!has_cpu) {
1489 cpu_index = 0;
1492 cpu = qemu_get_cpu(cpu_index);
1493 if (cpu == NULL) {
1494 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1495 "a CPU number");
1496 return;
1499 f = fopen(filename, "wb");
1500 if (!f) {
1501 error_setg_file_open(errp, errno, filename);
1502 return;
1505 while (size != 0) {
1506 l = sizeof(buf);
1507 if (l > size)
1508 l = size;
1509 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1510 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1511 " specified", orig_addr, orig_size);
1512 goto exit;
1514 if (fwrite(buf, 1, l, f) != l) {
1515 error_setg(errp, QERR_IO_ERROR);
1516 goto exit;
1518 addr += l;
1519 size -= l;
1522 exit:
1523 fclose(f);
1526 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1527 Error **errp)
1529 FILE *f;
1530 uint32_t l;
1531 uint8_t buf[1024];
1533 f = fopen(filename, "wb");
1534 if (!f) {
1535 error_setg_file_open(errp, errno, filename);
1536 return;
1539 while (size != 0) {
1540 l = sizeof(buf);
1541 if (l > size)
1542 l = size;
1543 cpu_physical_memory_read(addr, buf, l);
1544 if (fwrite(buf, 1, l, f) != l) {
1545 error_setg(errp, QERR_IO_ERROR);
1546 goto exit;
1548 addr += l;
1549 size -= l;
1552 exit:
1553 fclose(f);
1556 void qmp_inject_nmi(Error **errp)
1558 #if defined(TARGET_I386)
1559 CPUState *cs;
1561 CPU_FOREACH(cs) {
1562 X86CPU *cpu = X86_CPU(cs);
1564 if (!cpu->apic_state) {
1565 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1566 } else {
1567 apic_deliver_nmi(cpu->apic_state);
1570 #else
1571 nmi_monitor_handle(monitor_get_cpu_index(), errp);
1572 #endif
1575 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1577 if (!use_icount) {
1578 return;
1581 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1582 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1583 if (icount_align_option) {
1584 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1585 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1586 } else {
1587 cpu_fprintf(f, "Max guest delay NA\n");
1588 cpu_fprintf(f, "Max guest advance NA\n");