vhost+postcopy: Add vhost waker
[qemu/ar7.git] / cpus.c
blobc652da84cf4825225fb51b8bbca924a3ef35c0ef
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "qemu/config-file.h"
27 #include "cpu.h"
28 #include "monitor/monitor.h"
29 #include "qapi/error.h"
30 #include "qapi/qapi-commands-misc.h"
31 #include "qapi/qapi-events-run-state.h"
32 #include "qapi/qmp/qerror.h"
33 #include "qemu/error-report.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/block-backend.h"
36 #include "exec/gdbstub.h"
37 #include "sysemu/dma.h"
38 #include "sysemu/hw_accel.h"
39 #include "sysemu/kvm.h"
40 #include "sysemu/hax.h"
41 #include "sysemu/hvf.h"
42 #include "sysemu/whpx.h"
43 #include "exec/exec-all.h"
45 #include "qemu/thread.h"
46 #include "sysemu/cpus.h"
47 #include "sysemu/qtest.h"
48 #include "qemu/main-loop.h"
49 #include "qemu/option.h"
50 #include "qemu/bitmap.h"
51 #include "qemu/seqlock.h"
52 #include "tcg.h"
53 #include "hw/nmi.h"
54 #include "sysemu/replay.h"
55 #include "hw/boards.h"
57 #ifdef CONFIG_LINUX
59 #include <sys/prctl.h>
61 #ifndef PR_MCE_KILL
62 #define PR_MCE_KILL 33
63 #endif
65 #ifndef PR_MCE_KILL_SET
66 #define PR_MCE_KILL_SET 1
67 #endif
69 #ifndef PR_MCE_KILL_EARLY
70 #define PR_MCE_KILL_EARLY 1
71 #endif
73 #endif /* CONFIG_LINUX */
75 int64_t max_delay;
76 int64_t max_advance;
78 /* vcpu throttling controls */
79 static QEMUTimer *throttle_timer;
80 static unsigned int throttle_percentage;
82 #define CPU_THROTTLE_PCT_MIN 1
83 #define CPU_THROTTLE_PCT_MAX 99
84 #define CPU_THROTTLE_TIMESLICE_NS 10000000
86 bool cpu_is_stopped(CPUState *cpu)
88 return cpu->stopped || !runstate_is_running();
91 static bool cpu_thread_is_idle(CPUState *cpu)
93 if (cpu->stop || cpu->queued_work_first) {
94 return false;
96 if (cpu_is_stopped(cpu)) {
97 return true;
99 if (!cpu->halted || cpu_has_work(cpu) ||
100 kvm_halt_in_kernel()) {
101 return false;
103 return true;
106 static bool all_cpu_threads_idle(void)
108 CPUState *cpu;
110 CPU_FOREACH(cpu) {
111 if (!cpu_thread_is_idle(cpu)) {
112 return false;
115 return true;
118 /***********************************************************/
119 /* guest cycle counter */
121 /* Protected by TimersState seqlock */
123 static bool icount_sleep = true;
124 /* Conversion factor from emulated instructions to virtual clock ticks. */
125 static int icount_time_shift;
126 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
127 #define MAX_ICOUNT_SHIFT 10
129 typedef struct TimersState {
130 /* Protected by BQL. */
131 int64_t cpu_ticks_prev;
132 int64_t cpu_ticks_offset;
134 /* cpu_clock_offset can be read out of BQL, so protect it with
135 * this lock.
137 QemuSeqLock vm_clock_seqlock;
138 int64_t cpu_clock_offset;
139 int32_t cpu_ticks_enabled;
140 int64_t dummy;
142 /* Compensate for varying guest execution speed. */
143 int64_t qemu_icount_bias;
144 /* Only written by TCG thread */
145 int64_t qemu_icount;
146 /* for adjusting icount */
147 int64_t vm_clock_warp_start;
148 QEMUTimer *icount_rt_timer;
149 QEMUTimer *icount_vm_timer;
150 QEMUTimer *icount_warp_timer;
151 } TimersState;
153 static TimersState timers_state;
154 bool mttcg_enabled;
157 * We default to false if we know other options have been enabled
158 * which are currently incompatible with MTTCG. Otherwise when each
159 * guest (target) has been updated to support:
160 * - atomic instructions
161 * - memory ordering primitives (barriers)
162 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
164 * Once a guest architecture has been converted to the new primitives
165 * there are two remaining limitations to check.
167 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
168 * - The host must have a stronger memory order than the guest
170 * It may be possible in future to support strong guests on weak hosts
171 * but that will require tagging all load/stores in a guest with their
172 * implicit memory order requirements which would likely slow things
173 * down a lot.
176 static bool check_tcg_memory_orders_compatible(void)
178 #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
179 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
180 #else
181 return false;
182 #endif
185 static bool default_mttcg_enabled(void)
187 if (use_icount || TCG_OVERSIZED_GUEST) {
188 return false;
189 } else {
190 #ifdef TARGET_SUPPORTS_MTTCG
191 return check_tcg_memory_orders_compatible();
192 #else
193 return false;
194 #endif
198 void qemu_tcg_configure(QemuOpts *opts, Error **errp)
200 const char *t = qemu_opt_get(opts, "thread");
201 if (t) {
202 if (strcmp(t, "multi") == 0) {
203 if (TCG_OVERSIZED_GUEST) {
204 error_setg(errp, "No MTTCG when guest word size > hosts");
205 } else if (use_icount) {
206 error_setg(errp, "No MTTCG when icount is enabled");
207 } else {
208 #ifndef TARGET_SUPPORTS_MTTCG
209 error_report("Guest not yet converted to MTTCG - "
210 "you may get unexpected results");
211 #endif
212 if (!check_tcg_memory_orders_compatible()) {
213 error_report("Guest expects a stronger memory ordering "
214 "than the host provides");
215 error_printf("This may cause strange/hard to debug errors\n");
217 mttcg_enabled = true;
219 } else if (strcmp(t, "single") == 0) {
220 mttcg_enabled = false;
221 } else {
222 error_setg(errp, "Invalid 'thread' setting %s", t);
224 } else {
225 mttcg_enabled = default_mttcg_enabled();
229 /* The current number of executed instructions is based on what we
230 * originally budgeted minus the current state of the decrementing
231 * icount counters in extra/u16.low.
233 static int64_t cpu_get_icount_executed(CPUState *cpu)
235 return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
239 * Update the global shared timer_state.qemu_icount to take into
240 * account executed instructions. This is done by the TCG vCPU
241 * thread so the main-loop can see time has moved forward.
243 void cpu_update_icount(CPUState *cpu)
245 int64_t executed = cpu_get_icount_executed(cpu);
246 cpu->icount_budget -= executed;
248 #ifdef CONFIG_ATOMIC64
249 atomic_set__nocheck(&timers_state.qemu_icount,
250 atomic_read__nocheck(&timers_state.qemu_icount) +
251 executed);
252 #else /* FIXME: we need 64bit atomics to do this safely */
253 timers_state.qemu_icount += executed;
254 #endif
257 int64_t cpu_get_icount_raw(void)
259 CPUState *cpu = current_cpu;
261 if (cpu && cpu->running) {
262 if (!cpu->can_do_io) {
263 error_report("Bad icount read");
264 exit(1);
266 /* Take into account what has run */
267 cpu_update_icount(cpu);
269 #ifdef CONFIG_ATOMIC64
270 return atomic_read__nocheck(&timers_state.qemu_icount);
271 #else /* FIXME: we need 64bit atomics to do this safely */
272 return timers_state.qemu_icount;
273 #endif
276 /* Return the virtual CPU time, based on the instruction counter. */
277 static int64_t cpu_get_icount_locked(void)
279 int64_t icount = cpu_get_icount_raw();
280 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
283 int64_t cpu_get_icount(void)
285 int64_t icount;
286 unsigned start;
288 do {
289 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
290 icount = cpu_get_icount_locked();
291 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
293 return icount;
296 int64_t cpu_icount_to_ns(int64_t icount)
298 return icount << icount_time_shift;
301 /* return the time elapsed in VM between vm_start and vm_stop. Unless
302 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
303 * counter.
305 * Caller must hold the BQL
307 int64_t cpu_get_ticks(void)
309 int64_t ticks;
311 if (use_icount) {
312 return cpu_get_icount();
315 ticks = timers_state.cpu_ticks_offset;
316 if (timers_state.cpu_ticks_enabled) {
317 ticks += cpu_get_host_ticks();
320 if (timers_state.cpu_ticks_prev > ticks) {
321 /* Note: non increasing ticks may happen if the host uses
322 software suspend */
323 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
324 ticks = timers_state.cpu_ticks_prev;
327 timers_state.cpu_ticks_prev = ticks;
328 return ticks;
331 static int64_t cpu_get_clock_locked(void)
333 int64_t time;
335 time = timers_state.cpu_clock_offset;
336 if (timers_state.cpu_ticks_enabled) {
337 time += get_clock();
340 return time;
343 /* Return the monotonic time elapsed in VM, i.e.,
344 * the time between vm_start and vm_stop
346 int64_t cpu_get_clock(void)
348 int64_t ti;
349 unsigned start;
351 do {
352 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
353 ti = cpu_get_clock_locked();
354 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
356 return ti;
359 /* enable cpu_get_ticks()
360 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
362 void cpu_enable_ticks(void)
364 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
365 seqlock_write_begin(&timers_state.vm_clock_seqlock);
366 if (!timers_state.cpu_ticks_enabled) {
367 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
368 timers_state.cpu_clock_offset -= get_clock();
369 timers_state.cpu_ticks_enabled = 1;
371 seqlock_write_end(&timers_state.vm_clock_seqlock);
374 /* disable cpu_get_ticks() : the clock is stopped. You must not call
375 * cpu_get_ticks() after that.
376 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
378 void cpu_disable_ticks(void)
380 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
381 seqlock_write_begin(&timers_state.vm_clock_seqlock);
382 if (timers_state.cpu_ticks_enabled) {
383 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
384 timers_state.cpu_clock_offset = cpu_get_clock_locked();
385 timers_state.cpu_ticks_enabled = 0;
387 seqlock_write_end(&timers_state.vm_clock_seqlock);
390 /* Correlation between real and virtual time is always going to be
391 fairly approximate, so ignore small variation.
392 When the guest is idle real and virtual time will be aligned in
393 the IO wait loop. */
394 #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
396 static void icount_adjust(void)
398 int64_t cur_time;
399 int64_t cur_icount;
400 int64_t delta;
402 /* Protected by TimersState mutex. */
403 static int64_t last_delta;
405 /* If the VM is not running, then do nothing. */
406 if (!runstate_is_running()) {
407 return;
410 seqlock_write_begin(&timers_state.vm_clock_seqlock);
411 cur_time = cpu_get_clock_locked();
412 cur_icount = cpu_get_icount_locked();
414 delta = cur_icount - cur_time;
415 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
416 if (delta > 0
417 && last_delta + ICOUNT_WOBBLE < delta * 2
418 && icount_time_shift > 0) {
419 /* The guest is getting too far ahead. Slow time down. */
420 icount_time_shift--;
422 if (delta < 0
423 && last_delta - ICOUNT_WOBBLE > delta * 2
424 && icount_time_shift < MAX_ICOUNT_SHIFT) {
425 /* The guest is getting too far behind. Speed time up. */
426 icount_time_shift++;
428 last_delta = delta;
429 timers_state.qemu_icount_bias = cur_icount
430 - (timers_state.qemu_icount << icount_time_shift);
431 seqlock_write_end(&timers_state.vm_clock_seqlock);
434 static void icount_adjust_rt(void *opaque)
436 timer_mod(timers_state.icount_rt_timer,
437 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
438 icount_adjust();
441 static void icount_adjust_vm(void *opaque)
443 timer_mod(timers_state.icount_vm_timer,
444 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
445 NANOSECONDS_PER_SECOND / 10);
446 icount_adjust();
449 static int64_t qemu_icount_round(int64_t count)
451 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
454 static void icount_warp_rt(void)
456 unsigned seq;
457 int64_t warp_start;
459 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
460 * changes from -1 to another value, so the race here is okay.
462 do {
463 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
464 warp_start = timers_state.vm_clock_warp_start;
465 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
467 if (warp_start == -1) {
468 return;
471 seqlock_write_begin(&timers_state.vm_clock_seqlock);
472 if (runstate_is_running()) {
473 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
474 cpu_get_clock_locked());
475 int64_t warp_delta;
477 warp_delta = clock - timers_state.vm_clock_warp_start;
478 if (use_icount == 2) {
480 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
481 * far ahead of real time.
483 int64_t cur_icount = cpu_get_icount_locked();
484 int64_t delta = clock - cur_icount;
485 warp_delta = MIN(warp_delta, delta);
487 timers_state.qemu_icount_bias += warp_delta;
489 timers_state.vm_clock_warp_start = -1;
490 seqlock_write_end(&timers_state.vm_clock_seqlock);
492 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
493 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
497 static void icount_timer_cb(void *opaque)
499 /* No need for a checkpoint because the timer already synchronizes
500 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
502 icount_warp_rt();
505 void qtest_clock_warp(int64_t dest)
507 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
508 AioContext *aio_context;
509 assert(qtest_enabled());
510 aio_context = qemu_get_aio_context();
511 while (clock < dest) {
512 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
513 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
515 seqlock_write_begin(&timers_state.vm_clock_seqlock);
516 timers_state.qemu_icount_bias += warp;
517 seqlock_write_end(&timers_state.vm_clock_seqlock);
519 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
520 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
521 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
523 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
526 void qemu_start_warp_timer(void)
528 int64_t clock;
529 int64_t deadline;
531 if (!use_icount) {
532 return;
535 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
536 * do not fire, so computing the deadline does not make sense.
538 if (!runstate_is_running()) {
539 return;
542 /* warp clock deterministically in record/replay mode */
543 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
544 return;
547 if (!all_cpu_threads_idle()) {
548 return;
551 if (qtest_enabled()) {
552 /* When testing, qtest commands advance icount. */
553 return;
556 /* We want to use the earliest deadline from ALL vm_clocks */
557 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
558 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
559 if (deadline < 0) {
560 static bool notified;
561 if (!icount_sleep && !notified) {
562 warn_report("icount sleep disabled and no active timers");
563 notified = true;
565 return;
568 if (deadline > 0) {
570 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
571 * sleep. Otherwise, the CPU might be waiting for a future timer
572 * interrupt to wake it up, but the interrupt never comes because
573 * the vCPU isn't running any insns and thus doesn't advance the
574 * QEMU_CLOCK_VIRTUAL.
576 if (!icount_sleep) {
578 * We never let VCPUs sleep in no sleep icount mode.
579 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
580 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
581 * It is useful when we want a deterministic execution time,
582 * isolated from host latencies.
584 seqlock_write_begin(&timers_state.vm_clock_seqlock);
585 timers_state.qemu_icount_bias += deadline;
586 seqlock_write_end(&timers_state.vm_clock_seqlock);
587 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
588 } else {
590 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
591 * "real" time, (related to the time left until the next event) has
592 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
593 * This avoids that the warps are visible externally; for example,
594 * you will not be sending network packets continuously instead of
595 * every 100ms.
597 seqlock_write_begin(&timers_state.vm_clock_seqlock);
598 if (timers_state.vm_clock_warp_start == -1
599 || timers_state.vm_clock_warp_start > clock) {
600 timers_state.vm_clock_warp_start = clock;
602 seqlock_write_end(&timers_state.vm_clock_seqlock);
603 timer_mod_anticipate(timers_state.icount_warp_timer,
604 clock + deadline);
606 } else if (deadline == 0) {
607 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
611 static void qemu_account_warp_timer(void)
613 if (!use_icount || !icount_sleep) {
614 return;
617 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
618 * do not fire, so computing the deadline does not make sense.
620 if (!runstate_is_running()) {
621 return;
624 /* warp clock deterministically in record/replay mode */
625 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
626 return;
629 timer_del(timers_state.icount_warp_timer);
630 icount_warp_rt();
633 static bool icount_state_needed(void *opaque)
635 return use_icount;
638 static bool warp_timer_state_needed(void *opaque)
640 TimersState *s = opaque;
641 return s->icount_warp_timer != NULL;
644 static bool adjust_timers_state_needed(void *opaque)
646 TimersState *s = opaque;
647 return s->icount_rt_timer != NULL;
651 * Subsection for warp timer migration is optional, because may not be created
653 static const VMStateDescription icount_vmstate_warp_timer = {
654 .name = "timer/icount/warp_timer",
655 .version_id = 1,
656 .minimum_version_id = 1,
657 .needed = warp_timer_state_needed,
658 .fields = (VMStateField[]) {
659 VMSTATE_INT64(vm_clock_warp_start, TimersState),
660 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
661 VMSTATE_END_OF_LIST()
665 static const VMStateDescription icount_vmstate_adjust_timers = {
666 .name = "timer/icount/timers",
667 .version_id = 1,
668 .minimum_version_id = 1,
669 .needed = adjust_timers_state_needed,
670 .fields = (VMStateField[]) {
671 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
672 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
673 VMSTATE_END_OF_LIST()
678 * This is a subsection for icount migration.
680 static const VMStateDescription icount_vmstate_timers = {
681 .name = "timer/icount",
682 .version_id = 1,
683 .minimum_version_id = 1,
684 .needed = icount_state_needed,
685 .fields = (VMStateField[]) {
686 VMSTATE_INT64(qemu_icount_bias, TimersState),
687 VMSTATE_INT64(qemu_icount, TimersState),
688 VMSTATE_END_OF_LIST()
690 .subsections = (const VMStateDescription*[]) {
691 &icount_vmstate_warp_timer,
692 &icount_vmstate_adjust_timers,
693 NULL
697 static const VMStateDescription vmstate_timers = {
698 .name = "timer",
699 .version_id = 2,
700 .minimum_version_id = 1,
701 .fields = (VMStateField[]) {
702 VMSTATE_INT64(cpu_ticks_offset, TimersState),
703 VMSTATE_INT64(dummy, TimersState),
704 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
705 VMSTATE_END_OF_LIST()
707 .subsections = (const VMStateDescription*[]) {
708 &icount_vmstate_timers,
709 NULL
713 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
715 double pct;
716 double throttle_ratio;
717 long sleeptime_ns;
719 if (!cpu_throttle_get_percentage()) {
720 return;
723 pct = (double)cpu_throttle_get_percentage()/100;
724 throttle_ratio = pct / (1 - pct);
725 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
727 qemu_mutex_unlock_iothread();
728 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
729 qemu_mutex_lock_iothread();
730 atomic_set(&cpu->throttle_thread_scheduled, 0);
733 static void cpu_throttle_timer_tick(void *opaque)
735 CPUState *cpu;
736 double pct;
738 /* Stop the timer if needed */
739 if (!cpu_throttle_get_percentage()) {
740 return;
742 CPU_FOREACH(cpu) {
743 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
744 async_run_on_cpu(cpu, cpu_throttle_thread,
745 RUN_ON_CPU_NULL);
749 pct = (double)cpu_throttle_get_percentage()/100;
750 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
751 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
754 void cpu_throttle_set(int new_throttle_pct)
756 /* Ensure throttle percentage is within valid range */
757 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
758 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
760 atomic_set(&throttle_percentage, new_throttle_pct);
762 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
763 CPU_THROTTLE_TIMESLICE_NS);
766 void cpu_throttle_stop(void)
768 atomic_set(&throttle_percentage, 0);
771 bool cpu_throttle_active(void)
773 return (cpu_throttle_get_percentage() != 0);
776 int cpu_throttle_get_percentage(void)
778 return atomic_read(&throttle_percentage);
781 void cpu_ticks_init(void)
783 seqlock_init(&timers_state.vm_clock_seqlock);
784 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
785 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
786 cpu_throttle_timer_tick, NULL);
789 void configure_icount(QemuOpts *opts, Error **errp)
791 const char *option;
792 char *rem_str = NULL;
794 option = qemu_opt_get(opts, "shift");
795 if (!option) {
796 if (qemu_opt_get(opts, "align") != NULL) {
797 error_setg(errp, "Please specify shift option when using align");
799 return;
802 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
803 if (icount_sleep) {
804 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
805 icount_timer_cb, NULL);
808 icount_align_option = qemu_opt_get_bool(opts, "align", false);
810 if (icount_align_option && !icount_sleep) {
811 error_setg(errp, "align=on and sleep=off are incompatible");
813 if (strcmp(option, "auto") != 0) {
814 errno = 0;
815 icount_time_shift = strtol(option, &rem_str, 0);
816 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
817 error_setg(errp, "icount: Invalid shift value");
819 use_icount = 1;
820 return;
821 } else if (icount_align_option) {
822 error_setg(errp, "shift=auto and align=on are incompatible");
823 } else if (!icount_sleep) {
824 error_setg(errp, "shift=auto and sleep=off are incompatible");
827 use_icount = 2;
829 /* 125MIPS seems a reasonable initial guess at the guest speed.
830 It will be corrected fairly quickly anyway. */
831 icount_time_shift = 3;
833 /* Have both realtime and virtual time triggers for speed adjustment.
834 The realtime trigger catches emulated time passing too slowly,
835 the virtual time trigger catches emulated time passing too fast.
836 Realtime triggers occur even when idle, so use them less frequently
837 than VM triggers. */
838 timers_state.vm_clock_warp_start = -1;
839 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
840 icount_adjust_rt, NULL);
841 timer_mod(timers_state.icount_rt_timer,
842 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
843 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
844 icount_adjust_vm, NULL);
845 timer_mod(timers_state.icount_vm_timer,
846 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
847 NANOSECONDS_PER_SECOND / 10);
850 /***********************************************************/
851 /* TCG vCPU kick timer
853 * The kick timer is responsible for moving single threaded vCPU
854 * emulation on to the next vCPU. If more than one vCPU is running a
855 * timer event with force a cpu->exit so the next vCPU can get
856 * scheduled.
858 * The timer is removed if all vCPUs are idle and restarted again once
859 * idleness is complete.
862 static QEMUTimer *tcg_kick_vcpu_timer;
863 static CPUState *tcg_current_rr_cpu;
865 #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
867 static inline int64_t qemu_tcg_next_kick(void)
869 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
872 /* Kick the currently round-robin scheduled vCPU */
873 static void qemu_cpu_kick_rr_cpu(void)
875 CPUState *cpu;
876 do {
877 cpu = atomic_mb_read(&tcg_current_rr_cpu);
878 if (cpu) {
879 cpu_exit(cpu);
881 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
884 static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
888 void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
890 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
891 qemu_notify_event();
892 return;
895 if (!qemu_in_vcpu_thread() && first_cpu) {
896 /* qemu_cpu_kick is not enough to kick a halted CPU out of
897 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
898 * causes cpu_thread_is_idle to return false. This way,
899 * handle_icount_deadline can run.
901 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
905 static void kick_tcg_thread(void *opaque)
907 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
908 qemu_cpu_kick_rr_cpu();
911 static void start_tcg_kick_timer(void)
913 assert(!mttcg_enabled);
914 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
915 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
916 kick_tcg_thread, NULL);
917 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
921 static void stop_tcg_kick_timer(void)
923 assert(!mttcg_enabled);
924 if (tcg_kick_vcpu_timer) {
925 timer_del(tcg_kick_vcpu_timer);
926 tcg_kick_vcpu_timer = NULL;
930 /***********************************************************/
931 void hw_error(const char *fmt, ...)
933 va_list ap;
934 CPUState *cpu;
936 va_start(ap, fmt);
937 fprintf(stderr, "qemu: hardware error: ");
938 vfprintf(stderr, fmt, ap);
939 fprintf(stderr, "\n");
940 CPU_FOREACH(cpu) {
941 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
942 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
944 va_end(ap);
945 abort();
948 void cpu_synchronize_all_states(void)
950 CPUState *cpu;
952 CPU_FOREACH(cpu) {
953 cpu_synchronize_state(cpu);
954 /* TODO: move to cpu_synchronize_state() */
955 if (hvf_enabled()) {
956 hvf_cpu_synchronize_state(cpu);
961 void cpu_synchronize_all_post_reset(void)
963 CPUState *cpu;
965 CPU_FOREACH(cpu) {
966 cpu_synchronize_post_reset(cpu);
967 /* TODO: move to cpu_synchronize_post_reset() */
968 if (hvf_enabled()) {
969 hvf_cpu_synchronize_post_reset(cpu);
974 void cpu_synchronize_all_post_init(void)
976 CPUState *cpu;
978 CPU_FOREACH(cpu) {
979 cpu_synchronize_post_init(cpu);
980 /* TODO: move to cpu_synchronize_post_init() */
981 if (hvf_enabled()) {
982 hvf_cpu_synchronize_post_init(cpu);
987 void cpu_synchronize_all_pre_loadvm(void)
989 CPUState *cpu;
991 CPU_FOREACH(cpu) {
992 cpu_synchronize_pre_loadvm(cpu);
996 static int do_vm_stop(RunState state, bool send_stop)
998 int ret = 0;
1000 if (runstate_is_running()) {
1001 cpu_disable_ticks();
1002 pause_all_vcpus();
1003 runstate_set(state);
1004 vm_state_notify(0, state);
1005 if (send_stop) {
1006 qapi_event_send_stop(&error_abort);
1010 bdrv_drain_all();
1011 replay_disable_events();
1012 ret = bdrv_flush_all();
1014 return ret;
1017 /* Special vm_stop() variant for terminating the process. Historically clients
1018 * did not expect a QMP STOP event and so we need to retain compatibility.
1020 int vm_shutdown(void)
1022 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1025 static bool cpu_can_run(CPUState *cpu)
1027 if (cpu->stop) {
1028 return false;
1030 if (cpu_is_stopped(cpu)) {
1031 return false;
1033 return true;
1036 static void cpu_handle_guest_debug(CPUState *cpu)
1038 gdb_set_stop_cpu(cpu);
1039 qemu_system_debug_request();
1040 cpu->stopped = true;
1043 #ifdef CONFIG_LINUX
1044 static void sigbus_reraise(void)
1046 sigset_t set;
1047 struct sigaction action;
1049 memset(&action, 0, sizeof(action));
1050 action.sa_handler = SIG_DFL;
1051 if (!sigaction(SIGBUS, &action, NULL)) {
1052 raise(SIGBUS);
1053 sigemptyset(&set);
1054 sigaddset(&set, SIGBUS);
1055 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1057 perror("Failed to re-raise SIGBUS!\n");
1058 abort();
1061 static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
1063 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1064 sigbus_reraise();
1067 if (current_cpu) {
1068 /* Called asynchronously in VCPU thread. */
1069 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1070 sigbus_reraise();
1072 } else {
1073 /* Called synchronously (via signalfd) in main thread. */
1074 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1075 sigbus_reraise();
1080 static void qemu_init_sigbus(void)
1082 struct sigaction action;
1084 memset(&action, 0, sizeof(action));
1085 action.sa_flags = SA_SIGINFO;
1086 action.sa_sigaction = sigbus_handler;
1087 sigaction(SIGBUS, &action, NULL);
1089 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1091 #else /* !CONFIG_LINUX */
1092 static void qemu_init_sigbus(void)
1095 #endif /* !CONFIG_LINUX */
1097 static QemuMutex qemu_global_mutex;
1099 static QemuThread io_thread;
1101 /* cpu creation */
1102 static QemuCond qemu_cpu_cond;
1103 /* system init */
1104 static QemuCond qemu_pause_cond;
1106 void qemu_init_cpu_loop(void)
1108 qemu_init_sigbus();
1109 qemu_cond_init(&qemu_cpu_cond);
1110 qemu_cond_init(&qemu_pause_cond);
1111 qemu_mutex_init(&qemu_global_mutex);
1113 qemu_thread_get_self(&io_thread);
1116 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
1118 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
1121 static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1123 if (kvm_destroy_vcpu(cpu) < 0) {
1124 error_report("kvm_destroy_vcpu failed");
1125 exit(EXIT_FAILURE);
1129 static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1133 static void qemu_cpu_stop(CPUState *cpu, bool exit)
1135 g_assert(qemu_cpu_is_self(cpu));
1136 cpu->stop = false;
1137 cpu->stopped = true;
1138 if (exit) {
1139 cpu_exit(cpu);
1141 qemu_cond_broadcast(&qemu_pause_cond);
1144 static void qemu_wait_io_event_common(CPUState *cpu)
1146 atomic_mb_set(&cpu->thread_kicked, false);
1147 if (cpu->stop) {
1148 qemu_cpu_stop(cpu, false);
1150 process_queued_cpu_work(cpu);
1153 static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
1155 while (all_cpu_threads_idle()) {
1156 stop_tcg_kick_timer();
1157 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1160 start_tcg_kick_timer();
1162 qemu_wait_io_event_common(cpu);
1165 static void qemu_wait_io_event(CPUState *cpu)
1167 while (cpu_thread_is_idle(cpu)) {
1168 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1171 #ifdef _WIN32
1172 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1173 if (!tcg_enabled()) {
1174 SleepEx(0, TRUE);
1176 #endif
1177 qemu_wait_io_event_common(cpu);
1180 static void *qemu_kvm_cpu_thread_fn(void *arg)
1182 CPUState *cpu = arg;
1183 int r;
1185 rcu_register_thread();
1187 qemu_mutex_lock_iothread();
1188 qemu_thread_get_self(cpu->thread);
1189 cpu->thread_id = qemu_get_thread_id();
1190 cpu->can_do_io = 1;
1191 current_cpu = cpu;
1193 r = kvm_init_vcpu(cpu);
1194 if (r < 0) {
1195 error_report("kvm_init_vcpu failed: %s", strerror(-r));
1196 exit(1);
1199 kvm_init_cpu_signals(cpu);
1201 /* signal CPU creation */
1202 cpu->created = true;
1203 qemu_cond_signal(&qemu_cpu_cond);
1205 do {
1206 if (cpu_can_run(cpu)) {
1207 r = kvm_cpu_exec(cpu);
1208 if (r == EXCP_DEBUG) {
1209 cpu_handle_guest_debug(cpu);
1212 qemu_wait_io_event(cpu);
1213 } while (!cpu->unplug || cpu_can_run(cpu));
1215 qemu_kvm_destroy_vcpu(cpu);
1216 cpu->created = false;
1217 qemu_cond_signal(&qemu_cpu_cond);
1218 qemu_mutex_unlock_iothread();
1219 rcu_unregister_thread();
1220 return NULL;
1223 static void *qemu_dummy_cpu_thread_fn(void *arg)
1225 #ifdef _WIN32
1226 error_report("qtest is not supported under Windows");
1227 exit(1);
1228 #else
1229 CPUState *cpu = arg;
1230 sigset_t waitset;
1231 int r;
1233 rcu_register_thread();
1235 qemu_mutex_lock_iothread();
1236 qemu_thread_get_self(cpu->thread);
1237 cpu->thread_id = qemu_get_thread_id();
1238 cpu->can_do_io = 1;
1239 current_cpu = cpu;
1241 sigemptyset(&waitset);
1242 sigaddset(&waitset, SIG_IPI);
1244 /* signal CPU creation */
1245 cpu->created = true;
1246 qemu_cond_signal(&qemu_cpu_cond);
1248 do {
1249 qemu_mutex_unlock_iothread();
1250 do {
1251 int sig;
1252 r = sigwait(&waitset, &sig);
1253 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1254 if (r == -1) {
1255 perror("sigwait");
1256 exit(1);
1258 qemu_mutex_lock_iothread();
1259 qemu_wait_io_event(cpu);
1260 } while (!cpu->unplug);
1262 rcu_unregister_thread();
1263 return NULL;
1264 #endif
1267 static int64_t tcg_get_icount_limit(void)
1269 int64_t deadline;
1271 if (replay_mode != REPLAY_MODE_PLAY) {
1272 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1274 /* Maintain prior (possibly buggy) behaviour where if no deadline
1275 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1276 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1277 * nanoseconds.
1279 if ((deadline < 0) || (deadline > INT32_MAX)) {
1280 deadline = INT32_MAX;
1283 return qemu_icount_round(deadline);
1284 } else {
1285 return replay_get_instructions();
1289 static void handle_icount_deadline(void)
1291 assert(qemu_in_vcpu_thread());
1292 if (use_icount) {
1293 int64_t deadline =
1294 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1296 if (deadline == 0) {
1297 /* Wake up other AioContexts. */
1298 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1299 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
1304 static void prepare_icount_for_run(CPUState *cpu)
1306 if (use_icount) {
1307 int insns_left;
1309 /* These should always be cleared by process_icount_data after
1310 * each vCPU execution. However u16.high can be raised
1311 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1313 g_assert(cpu->icount_decr.u16.low == 0);
1314 g_assert(cpu->icount_extra == 0);
1316 cpu->icount_budget = tcg_get_icount_limit();
1317 insns_left = MIN(0xffff, cpu->icount_budget);
1318 cpu->icount_decr.u16.low = insns_left;
1319 cpu->icount_extra = cpu->icount_budget - insns_left;
1323 static void process_icount_data(CPUState *cpu)
1325 if (use_icount) {
1326 /* Account for executed instructions */
1327 cpu_update_icount(cpu);
1329 /* Reset the counters */
1330 cpu->icount_decr.u16.low = 0;
1331 cpu->icount_extra = 0;
1332 cpu->icount_budget = 0;
1334 replay_account_executed_instructions();
1339 static int tcg_cpu_exec(CPUState *cpu)
1341 int ret;
1342 #ifdef CONFIG_PROFILER
1343 int64_t ti;
1344 #endif
1346 #ifdef CONFIG_PROFILER
1347 ti = profile_getclock();
1348 #endif
1349 qemu_mutex_unlock_iothread();
1350 cpu_exec_start(cpu);
1351 ret = cpu_exec(cpu);
1352 cpu_exec_end(cpu);
1353 qemu_mutex_lock_iothread();
1354 #ifdef CONFIG_PROFILER
1355 tcg_time += profile_getclock() - ti;
1356 #endif
1357 return ret;
1360 /* Destroy any remaining vCPUs which have been unplugged and have
1361 * finished running
1363 static void deal_with_unplugged_cpus(void)
1365 CPUState *cpu;
1367 CPU_FOREACH(cpu) {
1368 if (cpu->unplug && !cpu_can_run(cpu)) {
1369 qemu_tcg_destroy_vcpu(cpu);
1370 cpu->created = false;
1371 qemu_cond_signal(&qemu_cpu_cond);
1372 break;
1377 /* Single-threaded TCG
1379 * In the single-threaded case each vCPU is simulated in turn. If
1380 * there is more than a single vCPU we create a simple timer to kick
1381 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1382 * This is done explicitly rather than relying on side-effects
1383 * elsewhere.
1386 static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
1388 CPUState *cpu = arg;
1390 rcu_register_thread();
1391 tcg_register_thread();
1393 qemu_mutex_lock_iothread();
1394 qemu_thread_get_self(cpu->thread);
1396 cpu->thread_id = qemu_get_thread_id();
1397 cpu->created = true;
1398 cpu->can_do_io = 1;
1399 qemu_cond_signal(&qemu_cpu_cond);
1401 /* wait for initial kick-off after machine start */
1402 while (first_cpu->stopped) {
1403 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1405 /* process any pending work */
1406 CPU_FOREACH(cpu) {
1407 current_cpu = cpu;
1408 qemu_wait_io_event_common(cpu);
1412 start_tcg_kick_timer();
1414 cpu = first_cpu;
1416 /* process any pending work */
1417 cpu->exit_request = 1;
1419 while (1) {
1420 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1421 qemu_account_warp_timer();
1423 /* Run the timers here. This is much more efficient than
1424 * waking up the I/O thread and waiting for completion.
1426 handle_icount_deadline();
1428 if (!cpu) {
1429 cpu = first_cpu;
1432 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1434 atomic_mb_set(&tcg_current_rr_cpu, cpu);
1435 current_cpu = cpu;
1437 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1438 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1440 if (cpu_can_run(cpu)) {
1441 int r;
1443 prepare_icount_for_run(cpu);
1445 r = tcg_cpu_exec(cpu);
1447 process_icount_data(cpu);
1449 if (r == EXCP_DEBUG) {
1450 cpu_handle_guest_debug(cpu);
1451 break;
1452 } else if (r == EXCP_ATOMIC) {
1453 qemu_mutex_unlock_iothread();
1454 cpu_exec_step_atomic(cpu);
1455 qemu_mutex_lock_iothread();
1456 break;
1458 } else if (cpu->stop) {
1459 if (cpu->unplug) {
1460 cpu = CPU_NEXT(cpu);
1462 break;
1465 cpu = CPU_NEXT(cpu);
1466 } /* while (cpu && !cpu->exit_request).. */
1468 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1469 atomic_set(&tcg_current_rr_cpu, NULL);
1471 if (cpu && cpu->exit_request) {
1472 atomic_mb_set(&cpu->exit_request, 0);
1475 qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
1476 deal_with_unplugged_cpus();
1479 rcu_unregister_thread();
1480 return NULL;
1483 static void *qemu_hax_cpu_thread_fn(void *arg)
1485 CPUState *cpu = arg;
1486 int r;
1488 rcu_register_thread();
1489 qemu_mutex_lock_iothread();
1490 qemu_thread_get_self(cpu->thread);
1492 cpu->thread_id = qemu_get_thread_id();
1493 cpu->created = true;
1494 cpu->halted = 0;
1495 current_cpu = cpu;
1497 hax_init_vcpu(cpu);
1498 qemu_cond_signal(&qemu_cpu_cond);
1500 do {
1501 if (cpu_can_run(cpu)) {
1502 r = hax_smp_cpu_exec(cpu);
1503 if (r == EXCP_DEBUG) {
1504 cpu_handle_guest_debug(cpu);
1508 qemu_wait_io_event(cpu);
1509 } while (!cpu->unplug || cpu_can_run(cpu));
1510 rcu_unregister_thread();
1511 return NULL;
1514 /* The HVF-specific vCPU thread function. This one should only run when the host
1515 * CPU supports the VMX "unrestricted guest" feature. */
1516 static void *qemu_hvf_cpu_thread_fn(void *arg)
1518 CPUState *cpu = arg;
1520 int r;
1522 assert(hvf_enabled());
1524 rcu_register_thread();
1526 qemu_mutex_lock_iothread();
1527 qemu_thread_get_self(cpu->thread);
1529 cpu->thread_id = qemu_get_thread_id();
1530 cpu->can_do_io = 1;
1531 current_cpu = cpu;
1533 hvf_init_vcpu(cpu);
1535 /* signal CPU creation */
1536 cpu->created = true;
1537 qemu_cond_signal(&qemu_cpu_cond);
1539 do {
1540 if (cpu_can_run(cpu)) {
1541 r = hvf_vcpu_exec(cpu);
1542 if (r == EXCP_DEBUG) {
1543 cpu_handle_guest_debug(cpu);
1546 qemu_wait_io_event(cpu);
1547 } while (!cpu->unplug || cpu_can_run(cpu));
1549 hvf_vcpu_destroy(cpu);
1550 cpu->created = false;
1551 qemu_cond_signal(&qemu_cpu_cond);
1552 qemu_mutex_unlock_iothread();
1553 rcu_unregister_thread();
1554 return NULL;
1557 static void *qemu_whpx_cpu_thread_fn(void *arg)
1559 CPUState *cpu = arg;
1560 int r;
1562 rcu_register_thread();
1564 qemu_mutex_lock_iothread();
1565 qemu_thread_get_self(cpu->thread);
1566 cpu->thread_id = qemu_get_thread_id();
1567 current_cpu = cpu;
1569 r = whpx_init_vcpu(cpu);
1570 if (r < 0) {
1571 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1572 exit(1);
1575 /* signal CPU creation */
1576 cpu->created = true;
1577 qemu_cond_signal(&qemu_cpu_cond);
1579 do {
1580 if (cpu_can_run(cpu)) {
1581 r = whpx_vcpu_exec(cpu);
1582 if (r == EXCP_DEBUG) {
1583 cpu_handle_guest_debug(cpu);
1586 while (cpu_thread_is_idle(cpu)) {
1587 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1589 qemu_wait_io_event_common(cpu);
1590 } while (!cpu->unplug || cpu_can_run(cpu));
1592 whpx_destroy_vcpu(cpu);
1593 cpu->created = false;
1594 qemu_cond_signal(&qemu_cpu_cond);
1595 qemu_mutex_unlock_iothread();
1596 rcu_unregister_thread();
1597 return NULL;
1600 #ifdef _WIN32
1601 static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1604 #endif
1606 /* Multi-threaded TCG
1608 * In the multi-threaded case each vCPU has its own thread. The TLS
1609 * variable current_cpu can be used deep in the code to find the
1610 * current CPUState for a given thread.
1613 static void *qemu_tcg_cpu_thread_fn(void *arg)
1615 CPUState *cpu = arg;
1617 g_assert(!use_icount);
1619 rcu_register_thread();
1620 tcg_register_thread();
1622 qemu_mutex_lock_iothread();
1623 qemu_thread_get_self(cpu->thread);
1625 cpu->thread_id = qemu_get_thread_id();
1626 cpu->created = true;
1627 cpu->can_do_io = 1;
1628 current_cpu = cpu;
1629 qemu_cond_signal(&qemu_cpu_cond);
1631 /* process any pending work */
1632 cpu->exit_request = 1;
1634 while (1) {
1635 if (cpu_can_run(cpu)) {
1636 int r;
1637 r = tcg_cpu_exec(cpu);
1638 switch (r) {
1639 case EXCP_DEBUG:
1640 cpu_handle_guest_debug(cpu);
1641 break;
1642 case EXCP_HALTED:
1643 /* during start-up the vCPU is reset and the thread is
1644 * kicked several times. If we don't ensure we go back
1645 * to sleep in the halted state we won't cleanly
1646 * start-up when the vCPU is enabled.
1648 * cpu->halted should ensure we sleep in wait_io_event
1650 g_assert(cpu->halted);
1651 break;
1652 case EXCP_ATOMIC:
1653 qemu_mutex_unlock_iothread();
1654 cpu_exec_step_atomic(cpu);
1655 qemu_mutex_lock_iothread();
1656 default:
1657 /* Ignore everything else? */
1658 break;
1662 atomic_mb_set(&cpu->exit_request, 0);
1663 qemu_wait_io_event(cpu);
1664 } while (!cpu->unplug || cpu_can_run(cpu));
1666 qemu_tcg_destroy_vcpu(cpu);
1667 cpu->created = false;
1668 qemu_cond_signal(&qemu_cpu_cond);
1669 qemu_mutex_unlock_iothread();
1670 rcu_unregister_thread();
1671 return NULL;
1674 static void qemu_cpu_kick_thread(CPUState *cpu)
1676 #ifndef _WIN32
1677 int err;
1679 if (cpu->thread_kicked) {
1680 return;
1682 cpu->thread_kicked = true;
1683 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1684 if (err) {
1685 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1686 exit(1);
1688 #else /* _WIN32 */
1689 if (!qemu_cpu_is_self(cpu)) {
1690 if (whpx_enabled()) {
1691 whpx_vcpu_kick(cpu);
1692 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
1693 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1694 __func__, GetLastError());
1695 exit(1);
1698 #endif
1701 void qemu_cpu_kick(CPUState *cpu)
1703 qemu_cond_broadcast(cpu->halt_cond);
1704 if (tcg_enabled()) {
1705 cpu_exit(cpu);
1706 /* NOP unless doing single-thread RR */
1707 qemu_cpu_kick_rr_cpu();
1708 } else {
1709 if (hax_enabled()) {
1711 * FIXME: race condition with the exit_request check in
1712 * hax_vcpu_hax_exec
1714 cpu->exit_request = 1;
1716 qemu_cpu_kick_thread(cpu);
1720 void qemu_cpu_kick_self(void)
1722 assert(current_cpu);
1723 qemu_cpu_kick_thread(current_cpu);
1726 bool qemu_cpu_is_self(CPUState *cpu)
1728 return qemu_thread_is_self(cpu->thread);
1731 bool qemu_in_vcpu_thread(void)
1733 return current_cpu && qemu_cpu_is_self(current_cpu);
1736 static __thread bool iothread_locked = false;
1738 bool qemu_mutex_iothread_locked(void)
1740 return iothread_locked;
1743 void qemu_mutex_lock_iothread(void)
1745 g_assert(!qemu_mutex_iothread_locked());
1746 qemu_mutex_lock(&qemu_global_mutex);
1747 iothread_locked = true;
1750 void qemu_mutex_unlock_iothread(void)
1752 g_assert(qemu_mutex_iothread_locked());
1753 iothread_locked = false;
1754 qemu_mutex_unlock(&qemu_global_mutex);
1757 static bool all_vcpus_paused(void)
1759 CPUState *cpu;
1761 CPU_FOREACH(cpu) {
1762 if (!cpu->stopped) {
1763 return false;
1767 return true;
1770 void pause_all_vcpus(void)
1772 CPUState *cpu;
1774 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1775 CPU_FOREACH(cpu) {
1776 if (qemu_cpu_is_self(cpu)) {
1777 qemu_cpu_stop(cpu, true);
1778 } else {
1779 cpu->stop = true;
1780 qemu_cpu_kick(cpu);
1784 while (!all_vcpus_paused()) {
1785 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1786 CPU_FOREACH(cpu) {
1787 qemu_cpu_kick(cpu);
1792 void cpu_resume(CPUState *cpu)
1794 cpu->stop = false;
1795 cpu->stopped = false;
1796 qemu_cpu_kick(cpu);
1799 void resume_all_vcpus(void)
1801 CPUState *cpu;
1803 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1804 CPU_FOREACH(cpu) {
1805 cpu_resume(cpu);
1809 void cpu_remove_sync(CPUState *cpu)
1811 cpu->stop = true;
1812 cpu->unplug = true;
1813 qemu_cpu_kick(cpu);
1814 qemu_mutex_unlock_iothread();
1815 qemu_thread_join(cpu->thread);
1816 qemu_mutex_lock_iothread();
1819 /* For temporary buffers for forming a name */
1820 #define VCPU_THREAD_NAME_SIZE 16
1822 static void qemu_tcg_init_vcpu(CPUState *cpu)
1824 char thread_name[VCPU_THREAD_NAME_SIZE];
1825 static QemuCond *single_tcg_halt_cond;
1826 static QemuThread *single_tcg_cpu_thread;
1827 static int tcg_region_inited;
1830 * Initialize TCG regions--once. Now is a good time, because:
1831 * (1) TCG's init context, prologue and target globals have been set up.
1832 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1833 * -accel flag is processed, so the check doesn't work then).
1835 if (!tcg_region_inited) {
1836 tcg_region_inited = 1;
1837 tcg_region_init();
1840 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
1841 cpu->thread = g_malloc0(sizeof(QemuThread));
1842 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1843 qemu_cond_init(cpu->halt_cond);
1845 if (qemu_tcg_mttcg_enabled()) {
1846 /* create a thread per vCPU with TCG (MTTCG) */
1847 parallel_cpus = true;
1848 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1849 cpu->cpu_index);
1851 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1852 cpu, QEMU_THREAD_JOINABLE);
1854 } else {
1855 /* share a single thread for all cpus with TCG */
1856 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1857 qemu_thread_create(cpu->thread, thread_name,
1858 qemu_tcg_rr_cpu_thread_fn,
1859 cpu, QEMU_THREAD_JOINABLE);
1861 single_tcg_halt_cond = cpu->halt_cond;
1862 single_tcg_cpu_thread = cpu->thread;
1864 #ifdef _WIN32
1865 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1866 #endif
1867 } else {
1868 /* For non-MTTCG cases we share the thread */
1869 cpu->thread = single_tcg_cpu_thread;
1870 cpu->halt_cond = single_tcg_halt_cond;
1871 cpu->thread_id = first_cpu->thread_id;
1872 cpu->can_do_io = 1;
1873 cpu->created = true;
1877 static void qemu_hax_start_vcpu(CPUState *cpu)
1879 char thread_name[VCPU_THREAD_NAME_SIZE];
1881 cpu->thread = g_malloc0(sizeof(QemuThread));
1882 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1883 qemu_cond_init(cpu->halt_cond);
1885 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1886 cpu->cpu_index);
1887 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1888 cpu, QEMU_THREAD_JOINABLE);
1889 #ifdef _WIN32
1890 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1891 #endif
1894 static void qemu_kvm_start_vcpu(CPUState *cpu)
1896 char thread_name[VCPU_THREAD_NAME_SIZE];
1898 cpu->thread = g_malloc0(sizeof(QemuThread));
1899 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1900 qemu_cond_init(cpu->halt_cond);
1901 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1902 cpu->cpu_index);
1903 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1904 cpu, QEMU_THREAD_JOINABLE);
1907 static void qemu_hvf_start_vcpu(CPUState *cpu)
1909 char thread_name[VCPU_THREAD_NAME_SIZE];
1911 /* HVF currently does not support TCG, and only runs in
1912 * unrestricted-guest mode. */
1913 assert(hvf_enabled());
1915 cpu->thread = g_malloc0(sizeof(QemuThread));
1916 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1917 qemu_cond_init(cpu->halt_cond);
1919 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
1920 cpu->cpu_index);
1921 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
1922 cpu, QEMU_THREAD_JOINABLE);
1925 static void qemu_whpx_start_vcpu(CPUState *cpu)
1927 char thread_name[VCPU_THREAD_NAME_SIZE];
1929 cpu->thread = g_malloc0(sizeof(QemuThread));
1930 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1931 qemu_cond_init(cpu->halt_cond);
1932 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
1933 cpu->cpu_index);
1934 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
1935 cpu, QEMU_THREAD_JOINABLE);
1936 #ifdef _WIN32
1937 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1938 #endif
1941 static void qemu_dummy_start_vcpu(CPUState *cpu)
1943 char thread_name[VCPU_THREAD_NAME_SIZE];
1945 cpu->thread = g_malloc0(sizeof(QemuThread));
1946 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1947 qemu_cond_init(cpu->halt_cond);
1948 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1949 cpu->cpu_index);
1950 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1951 QEMU_THREAD_JOINABLE);
1954 void qemu_init_vcpu(CPUState *cpu)
1956 cpu->nr_cores = smp_cores;
1957 cpu->nr_threads = smp_threads;
1958 cpu->stopped = true;
1960 if (!cpu->as) {
1961 /* If the target cpu hasn't set up any address spaces itself,
1962 * give it the default one.
1964 cpu->num_ases = 1;
1965 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
1968 if (kvm_enabled()) {
1969 qemu_kvm_start_vcpu(cpu);
1970 } else if (hax_enabled()) {
1971 qemu_hax_start_vcpu(cpu);
1972 } else if (hvf_enabled()) {
1973 qemu_hvf_start_vcpu(cpu);
1974 } else if (tcg_enabled()) {
1975 qemu_tcg_init_vcpu(cpu);
1976 } else if (whpx_enabled()) {
1977 qemu_whpx_start_vcpu(cpu);
1978 } else {
1979 qemu_dummy_start_vcpu(cpu);
1982 while (!cpu->created) {
1983 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1987 void cpu_stop_current(void)
1989 if (current_cpu) {
1990 qemu_cpu_stop(current_cpu, true);
1994 int vm_stop(RunState state)
1996 if (qemu_in_vcpu_thread()) {
1997 qemu_system_vmstop_request_prepare();
1998 qemu_system_vmstop_request(state);
2000 * FIXME: should not return to device code in case
2001 * vm_stop() has been requested.
2003 cpu_stop_current();
2004 return 0;
2007 return do_vm_stop(state, true);
2011 * Prepare for (re)starting the VM.
2012 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2013 * running or in case of an error condition), 0 otherwise.
2015 int vm_prepare_start(void)
2017 RunState requested;
2018 int res = 0;
2020 qemu_vmstop_requested(&requested);
2021 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2022 return -1;
2025 /* Ensure that a STOP/RESUME pair of events is emitted if a
2026 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2027 * example, according to documentation is always followed by
2028 * the STOP event.
2030 if (runstate_is_running()) {
2031 qapi_event_send_stop(&error_abort);
2032 res = -1;
2033 } else {
2034 replay_enable_events();
2035 cpu_enable_ticks();
2036 runstate_set(RUN_STATE_RUNNING);
2037 vm_state_notify(1, RUN_STATE_RUNNING);
2040 /* We are sending this now, but the CPUs will be resumed shortly later */
2041 qapi_event_send_resume(&error_abort);
2042 return res;
2045 void vm_start(void)
2047 if (!vm_prepare_start()) {
2048 resume_all_vcpus();
2052 /* does a state transition even if the VM is already stopped,
2053 current state is forgotten forever */
2054 int vm_stop_force_state(RunState state)
2056 if (runstate_is_running()) {
2057 return vm_stop(state);
2058 } else {
2059 runstate_set(state);
2061 bdrv_drain_all();
2062 /* Make sure to return an error if the flush in a previous vm_stop()
2063 * failed. */
2064 return bdrv_flush_all();
2068 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
2070 /* XXX: implement xxx_cpu_list for targets that still miss it */
2071 #if defined(cpu_list)
2072 cpu_list(f, cpu_fprintf);
2073 #endif
2076 CpuInfoList *qmp_query_cpus(Error **errp)
2078 MachineState *ms = MACHINE(qdev_get_machine());
2079 MachineClass *mc = MACHINE_GET_CLASS(ms);
2080 CpuInfoList *head = NULL, *cur_item = NULL;
2081 CPUState *cpu;
2083 CPU_FOREACH(cpu) {
2084 CpuInfoList *info;
2085 #if defined(TARGET_I386)
2086 X86CPU *x86_cpu = X86_CPU(cpu);
2087 CPUX86State *env = &x86_cpu->env;
2088 #elif defined(TARGET_PPC)
2089 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2090 CPUPPCState *env = &ppc_cpu->env;
2091 #elif defined(TARGET_SPARC)
2092 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2093 CPUSPARCState *env = &sparc_cpu->env;
2094 #elif defined(TARGET_RISCV)
2095 RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2096 CPURISCVState *env = &riscv_cpu->env;
2097 #elif defined(TARGET_MIPS)
2098 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2099 CPUMIPSState *env = &mips_cpu->env;
2100 #elif defined(TARGET_TRICORE)
2101 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2102 CPUTriCoreState *env = &tricore_cpu->env;
2103 #elif defined(TARGET_S390X)
2104 S390CPU *s390_cpu = S390_CPU(cpu);
2105 CPUS390XState *env = &s390_cpu->env;
2106 #endif
2108 cpu_synchronize_state(cpu);
2110 info = g_malloc0(sizeof(*info));
2111 info->value = g_malloc0(sizeof(*info->value));
2112 info->value->CPU = cpu->cpu_index;
2113 info->value->current = (cpu == first_cpu);
2114 info->value->halted = cpu->halted;
2115 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2116 info->value->thread_id = cpu->thread_id;
2117 #if defined(TARGET_I386)
2118 info->value->arch = CPU_INFO_ARCH_X86;
2119 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
2120 #elif defined(TARGET_PPC)
2121 info->value->arch = CPU_INFO_ARCH_PPC;
2122 info->value->u.ppc.nip = env->nip;
2123 #elif defined(TARGET_SPARC)
2124 info->value->arch = CPU_INFO_ARCH_SPARC;
2125 info->value->u.q_sparc.pc = env->pc;
2126 info->value->u.q_sparc.npc = env->npc;
2127 #elif defined(TARGET_MIPS)
2128 info->value->arch = CPU_INFO_ARCH_MIPS;
2129 info->value->u.q_mips.PC = env->active_tc.PC;
2130 #elif defined(TARGET_TRICORE)
2131 info->value->arch = CPU_INFO_ARCH_TRICORE;
2132 info->value->u.tricore.PC = env->PC;
2133 #elif defined(TARGET_S390X)
2134 info->value->arch = CPU_INFO_ARCH_S390;
2135 info->value->u.s390.cpu_state = env->cpu_state;
2136 #elif defined(TARGET_RISCV)
2137 info->value->arch = CPU_INFO_ARCH_RISCV;
2138 info->value->u.riscv.pc = env->pc;
2139 #else
2140 info->value->arch = CPU_INFO_ARCH_OTHER;
2141 #endif
2142 info->value->has_props = !!mc->cpu_index_to_instance_props;
2143 if (info->value->has_props) {
2144 CpuInstanceProperties *props;
2145 props = g_malloc0(sizeof(*props));
2146 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2147 info->value->props = props;
2150 /* XXX: waiting for the qapi to support GSList */
2151 if (!cur_item) {
2152 head = cur_item = info;
2153 } else {
2154 cur_item->next = info;
2155 cur_item = info;
2159 return head;
2163 * fast means: we NEVER interrupt vCPU threads to retrieve
2164 * information from KVM.
2166 CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2168 MachineState *ms = MACHINE(qdev_get_machine());
2169 MachineClass *mc = MACHINE_GET_CLASS(ms);
2170 CpuInfoFastList *head = NULL, *cur_item = NULL;
2171 CPUState *cpu;
2172 #if defined(TARGET_S390X)
2173 S390CPU *s390_cpu;
2174 CPUS390XState *env;
2175 #endif
2177 CPU_FOREACH(cpu) {
2178 CpuInfoFastList *info = g_malloc0(sizeof(*info));
2179 info->value = g_malloc0(sizeof(*info->value));
2181 info->value->cpu_index = cpu->cpu_index;
2182 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2183 info->value->thread_id = cpu->thread_id;
2185 info->value->has_props = !!mc->cpu_index_to_instance_props;
2186 if (info->value->has_props) {
2187 CpuInstanceProperties *props;
2188 props = g_malloc0(sizeof(*props));
2189 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2190 info->value->props = props;
2193 #if defined(TARGET_S390X)
2194 s390_cpu = S390_CPU(cpu);
2195 env = &s390_cpu->env;
2196 info->value->arch = CPU_INFO_ARCH_S390;
2197 info->value->u.s390.cpu_state = env->cpu_state;
2198 #endif
2199 if (!cur_item) {
2200 head = cur_item = info;
2201 } else {
2202 cur_item->next = info;
2203 cur_item = info;
2207 return head;
2210 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2211 bool has_cpu, int64_t cpu_index, Error **errp)
2213 FILE *f;
2214 uint32_t l;
2215 CPUState *cpu;
2216 uint8_t buf[1024];
2217 int64_t orig_addr = addr, orig_size = size;
2219 if (!has_cpu) {
2220 cpu_index = 0;
2223 cpu = qemu_get_cpu(cpu_index);
2224 if (cpu == NULL) {
2225 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2226 "a CPU number");
2227 return;
2230 f = fopen(filename, "wb");
2231 if (!f) {
2232 error_setg_file_open(errp, errno, filename);
2233 return;
2236 while (size != 0) {
2237 l = sizeof(buf);
2238 if (l > size)
2239 l = size;
2240 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
2241 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2242 " specified", orig_addr, orig_size);
2243 goto exit;
2245 if (fwrite(buf, 1, l, f) != l) {
2246 error_setg(errp, QERR_IO_ERROR);
2247 goto exit;
2249 addr += l;
2250 size -= l;
2253 exit:
2254 fclose(f);
2257 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2258 Error **errp)
2260 FILE *f;
2261 uint32_t l;
2262 uint8_t buf[1024];
2264 f = fopen(filename, "wb");
2265 if (!f) {
2266 error_setg_file_open(errp, errno, filename);
2267 return;
2270 while (size != 0) {
2271 l = sizeof(buf);
2272 if (l > size)
2273 l = size;
2274 cpu_physical_memory_read(addr, buf, l);
2275 if (fwrite(buf, 1, l, f) != l) {
2276 error_setg(errp, QERR_IO_ERROR);
2277 goto exit;
2279 addr += l;
2280 size -= l;
2283 exit:
2284 fclose(f);
2287 void qmp_inject_nmi(Error **errp)
2289 nmi_monitor_handle(monitor_get_cpu_index(), errp);
2292 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
2294 if (!use_icount) {
2295 return;
2298 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
2299 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2300 if (icount_align_option) {
2301 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
2302 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
2303 } else {
2304 cpu_fprintf(f, "Max guest delay NA\n");
2305 cpu_fprintf(f, "Max guest advance NA\n");