configure: add test for docker availability
[qemu/ar7.git] / cpus.c
blobd1f16296de7ef526ca0d0ce403b3bc8e70d5679a
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "qemu/config-file.h"
27 #include "cpu.h"
28 #include "monitor/monitor.h"
29 #include "qapi/error.h"
30 #include "qapi/qapi-commands-misc.h"
31 #include "qapi/qapi-events-run-state.h"
32 #include "qapi/qmp/qerror.h"
33 #include "qemu/error-report.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/block-backend.h"
36 #include "exec/gdbstub.h"
37 #include "sysemu/dma.h"
38 #include "sysemu/hw_accel.h"
39 #include "sysemu/kvm.h"
40 #include "sysemu/hax.h"
41 #include "sysemu/hvf.h"
42 #include "sysemu/whpx.h"
43 #include "exec/exec-all.h"
45 #include "qemu/thread.h"
46 #include "sysemu/cpus.h"
47 #include "sysemu/qtest.h"
48 #include "qemu/main-loop.h"
49 #include "qemu/option.h"
50 #include "qemu/bitmap.h"
51 #include "qemu/seqlock.h"
52 #include "tcg.h"
53 #include "hw/nmi.h"
54 #include "sysemu/replay.h"
55 #include "hw/boards.h"
57 #ifdef CONFIG_LINUX
59 #include <sys/prctl.h>
61 #ifndef PR_MCE_KILL
62 #define PR_MCE_KILL 33
63 #endif
65 #ifndef PR_MCE_KILL_SET
66 #define PR_MCE_KILL_SET 1
67 #endif
69 #ifndef PR_MCE_KILL_EARLY
70 #define PR_MCE_KILL_EARLY 1
71 #endif
73 #endif /* CONFIG_LINUX */
75 int64_t max_delay;
76 int64_t max_advance;
78 /* vcpu throttling controls */
79 static QEMUTimer *throttle_timer;
80 static unsigned int throttle_percentage;
82 #define CPU_THROTTLE_PCT_MIN 1
83 #define CPU_THROTTLE_PCT_MAX 99
84 #define CPU_THROTTLE_TIMESLICE_NS 10000000
86 bool cpu_is_stopped(CPUState *cpu)
88 return cpu->stopped || !runstate_is_running();
91 static bool cpu_thread_is_idle(CPUState *cpu)
93 if (cpu->stop || cpu->queued_work_first) {
94 return false;
96 if (cpu_is_stopped(cpu)) {
97 return true;
99 if (!cpu->halted || cpu_has_work(cpu) ||
100 kvm_halt_in_kernel()) {
101 return false;
103 return true;
106 static bool all_cpu_threads_idle(void)
108 CPUState *cpu;
110 CPU_FOREACH(cpu) {
111 if (!cpu_thread_is_idle(cpu)) {
112 return false;
115 return true;
118 /***********************************************************/
119 /* guest cycle counter */
121 /* Protected by TimersState seqlock */
123 static bool icount_sleep = true;
124 /* Conversion factor from emulated instructions to virtual clock ticks. */
125 static int icount_time_shift;
126 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
127 #define MAX_ICOUNT_SHIFT 10
129 typedef struct TimersState {
130 /* Protected by BQL. */
131 int64_t cpu_ticks_prev;
132 int64_t cpu_ticks_offset;
134 /* cpu_clock_offset can be read out of BQL, so protect it with
135 * this lock.
137 QemuSeqLock vm_clock_seqlock;
138 int64_t cpu_clock_offset;
139 int32_t cpu_ticks_enabled;
140 int64_t dummy;
142 /* Compensate for varying guest execution speed. */
143 int64_t qemu_icount_bias;
144 /* Only written by TCG thread */
145 int64_t qemu_icount;
146 /* for adjusting icount */
147 int64_t vm_clock_warp_start;
148 QEMUTimer *icount_rt_timer;
149 QEMUTimer *icount_vm_timer;
150 QEMUTimer *icount_warp_timer;
151 } TimersState;
153 static TimersState timers_state;
154 bool mttcg_enabled;
157 * We default to false if we know other options have been enabled
158 * which are currently incompatible with MTTCG. Otherwise when each
159 * guest (target) has been updated to support:
160 * - atomic instructions
161 * - memory ordering primitives (barriers)
162 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
164 * Once a guest architecture has been converted to the new primitives
165 * there are two remaining limitations to check.
167 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
168 * - The host must have a stronger memory order than the guest
170 * It may be possible in future to support strong guests on weak hosts
171 * but that will require tagging all load/stores in a guest with their
172 * implicit memory order requirements which would likely slow things
173 * down a lot.
176 static bool check_tcg_memory_orders_compatible(void)
178 #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
179 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
180 #else
181 return false;
182 #endif
185 static bool default_mttcg_enabled(void)
187 if (use_icount || TCG_OVERSIZED_GUEST) {
188 return false;
189 } else {
190 #ifdef TARGET_SUPPORTS_MTTCG
191 return check_tcg_memory_orders_compatible();
192 #else
193 return false;
194 #endif
198 void qemu_tcg_configure(QemuOpts *opts, Error **errp)
200 const char *t = qemu_opt_get(opts, "thread");
201 if (t) {
202 if (strcmp(t, "multi") == 0) {
203 if (TCG_OVERSIZED_GUEST) {
204 error_setg(errp, "No MTTCG when guest word size > hosts");
205 } else if (use_icount) {
206 error_setg(errp, "No MTTCG when icount is enabled");
207 } else {
208 #ifndef TARGET_SUPPORTS_MTTCG
209 error_report("Guest not yet converted to MTTCG - "
210 "you may get unexpected results");
211 #endif
212 if (!check_tcg_memory_orders_compatible()) {
213 error_report("Guest expects a stronger memory ordering "
214 "than the host provides");
215 error_printf("This may cause strange/hard to debug errors\n");
217 mttcg_enabled = true;
219 } else if (strcmp(t, "single") == 0) {
220 mttcg_enabled = false;
221 } else {
222 error_setg(errp, "Invalid 'thread' setting %s", t);
224 } else {
225 mttcg_enabled = default_mttcg_enabled();
229 /* The current number of executed instructions is based on what we
230 * originally budgeted minus the current state of the decrementing
231 * icount counters in extra/u16.low.
233 static int64_t cpu_get_icount_executed(CPUState *cpu)
235 return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
239 * Update the global shared timer_state.qemu_icount to take into
240 * account executed instructions. This is done by the TCG vCPU
241 * thread so the main-loop can see time has moved forward.
243 void cpu_update_icount(CPUState *cpu)
245 int64_t executed = cpu_get_icount_executed(cpu);
246 cpu->icount_budget -= executed;
248 #ifdef CONFIG_ATOMIC64
249 atomic_set__nocheck(&timers_state.qemu_icount,
250 atomic_read__nocheck(&timers_state.qemu_icount) +
251 executed);
252 #else /* FIXME: we need 64bit atomics to do this safely */
253 timers_state.qemu_icount += executed;
254 #endif
257 int64_t cpu_get_icount_raw(void)
259 CPUState *cpu = current_cpu;
261 if (cpu && cpu->running) {
262 if (!cpu->can_do_io) {
263 error_report("Bad icount read");
264 exit(1);
266 /* Take into account what has run */
267 cpu_update_icount(cpu);
269 #ifdef CONFIG_ATOMIC64
270 return atomic_read__nocheck(&timers_state.qemu_icount);
271 #else /* FIXME: we need 64bit atomics to do this safely */
272 return timers_state.qemu_icount;
273 #endif
276 /* Return the virtual CPU time, based on the instruction counter. */
277 static int64_t cpu_get_icount_locked(void)
279 int64_t icount = cpu_get_icount_raw();
280 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
283 int64_t cpu_get_icount(void)
285 int64_t icount;
286 unsigned start;
288 do {
289 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
290 icount = cpu_get_icount_locked();
291 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
293 return icount;
296 int64_t cpu_icount_to_ns(int64_t icount)
298 return icount << icount_time_shift;
301 /* return the time elapsed in VM between vm_start and vm_stop. Unless
302 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
303 * counter.
305 * Caller must hold the BQL
307 int64_t cpu_get_ticks(void)
309 int64_t ticks;
311 if (use_icount) {
312 return cpu_get_icount();
315 ticks = timers_state.cpu_ticks_offset;
316 if (timers_state.cpu_ticks_enabled) {
317 ticks += cpu_get_host_ticks();
320 if (timers_state.cpu_ticks_prev > ticks) {
321 /* Note: non increasing ticks may happen if the host uses
322 software suspend */
323 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
324 ticks = timers_state.cpu_ticks_prev;
327 timers_state.cpu_ticks_prev = ticks;
328 return ticks;
331 static int64_t cpu_get_clock_locked(void)
333 int64_t time;
335 time = timers_state.cpu_clock_offset;
336 if (timers_state.cpu_ticks_enabled) {
337 time += get_clock();
340 return time;
343 /* Return the monotonic time elapsed in VM, i.e.,
344 * the time between vm_start and vm_stop
346 int64_t cpu_get_clock(void)
348 int64_t ti;
349 unsigned start;
351 do {
352 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
353 ti = cpu_get_clock_locked();
354 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
356 return ti;
359 /* enable cpu_get_ticks()
360 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
362 void cpu_enable_ticks(void)
364 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
365 seqlock_write_begin(&timers_state.vm_clock_seqlock);
366 if (!timers_state.cpu_ticks_enabled) {
367 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
368 timers_state.cpu_clock_offset -= get_clock();
369 timers_state.cpu_ticks_enabled = 1;
371 seqlock_write_end(&timers_state.vm_clock_seqlock);
374 /* disable cpu_get_ticks() : the clock is stopped. You must not call
375 * cpu_get_ticks() after that.
376 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
378 void cpu_disable_ticks(void)
380 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
381 seqlock_write_begin(&timers_state.vm_clock_seqlock);
382 if (timers_state.cpu_ticks_enabled) {
383 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
384 timers_state.cpu_clock_offset = cpu_get_clock_locked();
385 timers_state.cpu_ticks_enabled = 0;
387 seqlock_write_end(&timers_state.vm_clock_seqlock);
390 /* Correlation between real and virtual time is always going to be
391 fairly approximate, so ignore small variation.
392 When the guest is idle real and virtual time will be aligned in
393 the IO wait loop. */
394 #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
396 static void icount_adjust(void)
398 int64_t cur_time;
399 int64_t cur_icount;
400 int64_t delta;
402 /* Protected by TimersState mutex. */
403 static int64_t last_delta;
405 /* If the VM is not running, then do nothing. */
406 if (!runstate_is_running()) {
407 return;
410 seqlock_write_begin(&timers_state.vm_clock_seqlock);
411 cur_time = cpu_get_clock_locked();
412 cur_icount = cpu_get_icount_locked();
414 delta = cur_icount - cur_time;
415 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
416 if (delta > 0
417 && last_delta + ICOUNT_WOBBLE < delta * 2
418 && icount_time_shift > 0) {
419 /* The guest is getting too far ahead. Slow time down. */
420 icount_time_shift--;
422 if (delta < 0
423 && last_delta - ICOUNT_WOBBLE > delta * 2
424 && icount_time_shift < MAX_ICOUNT_SHIFT) {
425 /* The guest is getting too far behind. Speed time up. */
426 icount_time_shift++;
428 last_delta = delta;
429 timers_state.qemu_icount_bias = cur_icount
430 - (timers_state.qemu_icount << icount_time_shift);
431 seqlock_write_end(&timers_state.vm_clock_seqlock);
434 static void icount_adjust_rt(void *opaque)
436 timer_mod(timers_state.icount_rt_timer,
437 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
438 icount_adjust();
441 static void icount_adjust_vm(void *opaque)
443 timer_mod(timers_state.icount_vm_timer,
444 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
445 NANOSECONDS_PER_SECOND / 10);
446 icount_adjust();
449 static int64_t qemu_icount_round(int64_t count)
451 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
454 static void icount_warp_rt(void)
456 unsigned seq;
457 int64_t warp_start;
459 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
460 * changes from -1 to another value, so the race here is okay.
462 do {
463 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
464 warp_start = timers_state.vm_clock_warp_start;
465 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
467 if (warp_start == -1) {
468 return;
471 seqlock_write_begin(&timers_state.vm_clock_seqlock);
472 if (runstate_is_running()) {
473 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
474 cpu_get_clock_locked());
475 int64_t warp_delta;
477 warp_delta = clock - timers_state.vm_clock_warp_start;
478 if (use_icount == 2) {
480 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
481 * far ahead of real time.
483 int64_t cur_icount = cpu_get_icount_locked();
484 int64_t delta = clock - cur_icount;
485 warp_delta = MIN(warp_delta, delta);
487 timers_state.qemu_icount_bias += warp_delta;
489 timers_state.vm_clock_warp_start = -1;
490 seqlock_write_end(&timers_state.vm_clock_seqlock);
492 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
493 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
497 static void icount_timer_cb(void *opaque)
499 /* No need for a checkpoint because the timer already synchronizes
500 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
502 icount_warp_rt();
505 void qtest_clock_warp(int64_t dest)
507 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
508 AioContext *aio_context;
509 assert(qtest_enabled());
510 aio_context = qemu_get_aio_context();
511 while (clock < dest) {
512 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
513 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
515 seqlock_write_begin(&timers_state.vm_clock_seqlock);
516 timers_state.qemu_icount_bias += warp;
517 seqlock_write_end(&timers_state.vm_clock_seqlock);
519 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
520 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
521 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
523 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
526 void qemu_start_warp_timer(void)
528 int64_t clock;
529 int64_t deadline;
531 if (!use_icount) {
532 return;
535 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
536 * do not fire, so computing the deadline does not make sense.
538 if (!runstate_is_running()) {
539 return;
542 /* warp clock deterministically in record/replay mode */
543 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
544 return;
547 if (!all_cpu_threads_idle()) {
548 return;
551 if (qtest_enabled()) {
552 /* When testing, qtest commands advance icount. */
553 return;
556 /* We want to use the earliest deadline from ALL vm_clocks */
557 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
558 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
559 if (deadline < 0) {
560 static bool notified;
561 if (!icount_sleep && !notified) {
562 warn_report("icount sleep disabled and no active timers");
563 notified = true;
565 return;
568 if (deadline > 0) {
570 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
571 * sleep. Otherwise, the CPU might be waiting for a future timer
572 * interrupt to wake it up, but the interrupt never comes because
573 * the vCPU isn't running any insns and thus doesn't advance the
574 * QEMU_CLOCK_VIRTUAL.
576 if (!icount_sleep) {
578 * We never let VCPUs sleep in no sleep icount mode.
579 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
580 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
581 * It is useful when we want a deterministic execution time,
582 * isolated from host latencies.
584 seqlock_write_begin(&timers_state.vm_clock_seqlock);
585 timers_state.qemu_icount_bias += deadline;
586 seqlock_write_end(&timers_state.vm_clock_seqlock);
587 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
588 } else {
590 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
591 * "real" time, (related to the time left until the next event) has
592 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
593 * This avoids that the warps are visible externally; for example,
594 * you will not be sending network packets continuously instead of
595 * every 100ms.
597 seqlock_write_begin(&timers_state.vm_clock_seqlock);
598 if (timers_state.vm_clock_warp_start == -1
599 || timers_state.vm_clock_warp_start > clock) {
600 timers_state.vm_clock_warp_start = clock;
602 seqlock_write_end(&timers_state.vm_clock_seqlock);
603 timer_mod_anticipate(timers_state.icount_warp_timer,
604 clock + deadline);
606 } else if (deadline == 0) {
607 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
611 static void qemu_account_warp_timer(void)
613 if (!use_icount || !icount_sleep) {
614 return;
617 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
618 * do not fire, so computing the deadline does not make sense.
620 if (!runstate_is_running()) {
621 return;
624 /* warp clock deterministically in record/replay mode */
625 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
626 return;
629 timer_del(timers_state.icount_warp_timer);
630 icount_warp_rt();
633 static bool icount_state_needed(void *opaque)
635 return use_icount;
638 static bool warp_timer_state_needed(void *opaque)
640 TimersState *s = opaque;
641 return s->icount_warp_timer != NULL;
644 static bool adjust_timers_state_needed(void *opaque)
646 TimersState *s = opaque;
647 return s->icount_rt_timer != NULL;
651 * Subsection for warp timer migration is optional, because may not be created
653 static const VMStateDescription icount_vmstate_warp_timer = {
654 .name = "timer/icount/warp_timer",
655 .version_id = 1,
656 .minimum_version_id = 1,
657 .needed = warp_timer_state_needed,
658 .fields = (VMStateField[]) {
659 VMSTATE_INT64(vm_clock_warp_start, TimersState),
660 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
661 VMSTATE_END_OF_LIST()
665 static const VMStateDescription icount_vmstate_adjust_timers = {
666 .name = "timer/icount/timers",
667 .version_id = 1,
668 .minimum_version_id = 1,
669 .needed = adjust_timers_state_needed,
670 .fields = (VMStateField[]) {
671 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
672 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
673 VMSTATE_END_OF_LIST()
678 * This is a subsection for icount migration.
680 static const VMStateDescription icount_vmstate_timers = {
681 .name = "timer/icount",
682 .version_id = 1,
683 .minimum_version_id = 1,
684 .needed = icount_state_needed,
685 .fields = (VMStateField[]) {
686 VMSTATE_INT64(qemu_icount_bias, TimersState),
687 VMSTATE_INT64(qemu_icount, TimersState),
688 VMSTATE_END_OF_LIST()
690 .subsections = (const VMStateDescription*[]) {
691 &icount_vmstate_warp_timer,
692 &icount_vmstate_adjust_timers,
693 NULL
697 static const VMStateDescription vmstate_timers = {
698 .name = "timer",
699 .version_id = 2,
700 .minimum_version_id = 1,
701 .fields = (VMStateField[]) {
702 VMSTATE_INT64(cpu_ticks_offset, TimersState),
703 VMSTATE_INT64(dummy, TimersState),
704 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
705 VMSTATE_END_OF_LIST()
707 .subsections = (const VMStateDescription*[]) {
708 &icount_vmstate_timers,
709 NULL
713 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
715 double pct;
716 double throttle_ratio;
717 long sleeptime_ns;
719 if (!cpu_throttle_get_percentage()) {
720 return;
723 pct = (double)cpu_throttle_get_percentage()/100;
724 throttle_ratio = pct / (1 - pct);
725 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
727 qemu_mutex_unlock_iothread();
728 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
729 qemu_mutex_lock_iothread();
730 atomic_set(&cpu->throttle_thread_scheduled, 0);
733 static void cpu_throttle_timer_tick(void *opaque)
735 CPUState *cpu;
736 double pct;
738 /* Stop the timer if needed */
739 if (!cpu_throttle_get_percentage()) {
740 return;
742 CPU_FOREACH(cpu) {
743 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
744 async_run_on_cpu(cpu, cpu_throttle_thread,
745 RUN_ON_CPU_NULL);
749 pct = (double)cpu_throttle_get_percentage()/100;
750 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
751 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
754 void cpu_throttle_set(int new_throttle_pct)
756 /* Ensure throttle percentage is within valid range */
757 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
758 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
760 atomic_set(&throttle_percentage, new_throttle_pct);
762 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
763 CPU_THROTTLE_TIMESLICE_NS);
766 void cpu_throttle_stop(void)
768 atomic_set(&throttle_percentage, 0);
771 bool cpu_throttle_active(void)
773 return (cpu_throttle_get_percentage() != 0);
776 int cpu_throttle_get_percentage(void)
778 return atomic_read(&throttle_percentage);
781 void cpu_ticks_init(void)
783 seqlock_init(&timers_state.vm_clock_seqlock);
784 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
785 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
786 cpu_throttle_timer_tick, NULL);
789 void configure_icount(QemuOpts *opts, Error **errp)
791 const char *option;
792 char *rem_str = NULL;
794 option = qemu_opt_get(opts, "shift");
795 if (!option) {
796 if (qemu_opt_get(opts, "align") != NULL) {
797 error_setg(errp, "Please specify shift option when using align");
799 return;
802 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
803 if (icount_sleep) {
804 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
805 icount_timer_cb, NULL);
808 icount_align_option = qemu_opt_get_bool(opts, "align", false);
810 if (icount_align_option && !icount_sleep) {
811 error_setg(errp, "align=on and sleep=off are incompatible");
813 if (strcmp(option, "auto") != 0) {
814 errno = 0;
815 icount_time_shift = strtol(option, &rem_str, 0);
816 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
817 error_setg(errp, "icount: Invalid shift value");
819 use_icount = 1;
820 return;
821 } else if (icount_align_option) {
822 error_setg(errp, "shift=auto and align=on are incompatible");
823 } else if (!icount_sleep) {
824 error_setg(errp, "shift=auto and sleep=off are incompatible");
827 use_icount = 2;
829 /* 125MIPS seems a reasonable initial guess at the guest speed.
830 It will be corrected fairly quickly anyway. */
831 icount_time_shift = 3;
833 /* Have both realtime and virtual time triggers for speed adjustment.
834 The realtime trigger catches emulated time passing too slowly,
835 the virtual time trigger catches emulated time passing too fast.
836 Realtime triggers occur even when idle, so use them less frequently
837 than VM triggers. */
838 timers_state.vm_clock_warp_start = -1;
839 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
840 icount_adjust_rt, NULL);
841 timer_mod(timers_state.icount_rt_timer,
842 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
843 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
844 icount_adjust_vm, NULL);
845 timer_mod(timers_state.icount_vm_timer,
846 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
847 NANOSECONDS_PER_SECOND / 10);
850 /***********************************************************/
851 /* TCG vCPU kick timer
853 * The kick timer is responsible for moving single threaded vCPU
854 * emulation on to the next vCPU. If more than one vCPU is running a
855 * timer event with force a cpu->exit so the next vCPU can get
856 * scheduled.
858 * The timer is removed if all vCPUs are idle and restarted again once
859 * idleness is complete.
862 static QEMUTimer *tcg_kick_vcpu_timer;
863 static CPUState *tcg_current_rr_cpu;
865 #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
867 static inline int64_t qemu_tcg_next_kick(void)
869 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
872 /* Kick the currently round-robin scheduled vCPU */
873 static void qemu_cpu_kick_rr_cpu(void)
875 CPUState *cpu;
876 do {
877 cpu = atomic_mb_read(&tcg_current_rr_cpu);
878 if (cpu) {
879 cpu_exit(cpu);
881 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
884 static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
888 void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
890 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
891 qemu_notify_event();
892 return;
895 if (qemu_in_vcpu_thread()) {
896 /* A CPU is currently running; kick it back out to the
897 * tcg_cpu_exec() loop so it will recalculate its
898 * icount deadline immediately.
900 qemu_cpu_kick(current_cpu);
901 } else if (first_cpu) {
902 /* qemu_cpu_kick is not enough to kick a halted CPU out of
903 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
904 * causes cpu_thread_is_idle to return false. This way,
905 * handle_icount_deadline can run.
906 * If we have no CPUs at all for some reason, we don't
907 * need to do anything.
909 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
913 static void kick_tcg_thread(void *opaque)
915 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
916 qemu_cpu_kick_rr_cpu();
919 static void start_tcg_kick_timer(void)
921 assert(!mttcg_enabled);
922 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
923 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
924 kick_tcg_thread, NULL);
925 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
929 static void stop_tcg_kick_timer(void)
931 assert(!mttcg_enabled);
932 if (tcg_kick_vcpu_timer) {
933 timer_del(tcg_kick_vcpu_timer);
934 tcg_kick_vcpu_timer = NULL;
938 /***********************************************************/
939 void hw_error(const char *fmt, ...)
941 va_list ap;
942 CPUState *cpu;
944 va_start(ap, fmt);
945 fprintf(stderr, "qemu: hardware error: ");
946 vfprintf(stderr, fmt, ap);
947 fprintf(stderr, "\n");
948 CPU_FOREACH(cpu) {
949 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
950 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
952 va_end(ap);
953 abort();
956 void cpu_synchronize_all_states(void)
958 CPUState *cpu;
960 CPU_FOREACH(cpu) {
961 cpu_synchronize_state(cpu);
962 /* TODO: move to cpu_synchronize_state() */
963 if (hvf_enabled()) {
964 hvf_cpu_synchronize_state(cpu);
969 void cpu_synchronize_all_post_reset(void)
971 CPUState *cpu;
973 CPU_FOREACH(cpu) {
974 cpu_synchronize_post_reset(cpu);
975 /* TODO: move to cpu_synchronize_post_reset() */
976 if (hvf_enabled()) {
977 hvf_cpu_synchronize_post_reset(cpu);
982 void cpu_synchronize_all_post_init(void)
984 CPUState *cpu;
986 CPU_FOREACH(cpu) {
987 cpu_synchronize_post_init(cpu);
988 /* TODO: move to cpu_synchronize_post_init() */
989 if (hvf_enabled()) {
990 hvf_cpu_synchronize_post_init(cpu);
995 void cpu_synchronize_all_pre_loadvm(void)
997 CPUState *cpu;
999 CPU_FOREACH(cpu) {
1000 cpu_synchronize_pre_loadvm(cpu);
1004 static int do_vm_stop(RunState state, bool send_stop)
1006 int ret = 0;
1008 if (runstate_is_running()) {
1009 cpu_disable_ticks();
1010 pause_all_vcpus();
1011 runstate_set(state);
1012 vm_state_notify(0, state);
1013 if (send_stop) {
1014 qapi_event_send_stop(&error_abort);
1018 bdrv_drain_all();
1019 replay_disable_events();
1020 ret = bdrv_flush_all();
1022 return ret;
1025 /* Special vm_stop() variant for terminating the process. Historically clients
1026 * did not expect a QMP STOP event and so we need to retain compatibility.
1028 int vm_shutdown(void)
1030 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1033 static bool cpu_can_run(CPUState *cpu)
1035 if (cpu->stop) {
1036 return false;
1038 if (cpu_is_stopped(cpu)) {
1039 return false;
1041 return true;
1044 static void cpu_handle_guest_debug(CPUState *cpu)
1046 gdb_set_stop_cpu(cpu);
1047 qemu_system_debug_request();
1048 cpu->stopped = true;
1051 #ifdef CONFIG_LINUX
1052 static void sigbus_reraise(void)
1054 sigset_t set;
1055 struct sigaction action;
1057 memset(&action, 0, sizeof(action));
1058 action.sa_handler = SIG_DFL;
1059 if (!sigaction(SIGBUS, &action, NULL)) {
1060 raise(SIGBUS);
1061 sigemptyset(&set);
1062 sigaddset(&set, SIGBUS);
1063 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1065 perror("Failed to re-raise SIGBUS!\n");
1066 abort();
1069 static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
1071 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1072 sigbus_reraise();
1075 if (current_cpu) {
1076 /* Called asynchronously in VCPU thread. */
1077 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1078 sigbus_reraise();
1080 } else {
1081 /* Called synchronously (via signalfd) in main thread. */
1082 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1083 sigbus_reraise();
1088 static void qemu_init_sigbus(void)
1090 struct sigaction action;
1092 memset(&action, 0, sizeof(action));
1093 action.sa_flags = SA_SIGINFO;
1094 action.sa_sigaction = sigbus_handler;
1095 sigaction(SIGBUS, &action, NULL);
1097 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1099 #else /* !CONFIG_LINUX */
1100 static void qemu_init_sigbus(void)
1103 #endif /* !CONFIG_LINUX */
1105 static QemuMutex qemu_global_mutex;
1107 static QemuThread io_thread;
1109 /* cpu creation */
1110 static QemuCond qemu_cpu_cond;
1111 /* system init */
1112 static QemuCond qemu_pause_cond;
1114 void qemu_init_cpu_loop(void)
1116 qemu_init_sigbus();
1117 qemu_cond_init(&qemu_cpu_cond);
1118 qemu_cond_init(&qemu_pause_cond);
1119 qemu_mutex_init(&qemu_global_mutex);
1121 qemu_thread_get_self(&io_thread);
1124 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
1126 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
1129 static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1131 if (kvm_destroy_vcpu(cpu) < 0) {
1132 error_report("kvm_destroy_vcpu failed");
1133 exit(EXIT_FAILURE);
1137 static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1141 static void qemu_cpu_stop(CPUState *cpu, bool exit)
1143 g_assert(qemu_cpu_is_self(cpu));
1144 cpu->stop = false;
1145 cpu->stopped = true;
1146 if (exit) {
1147 cpu_exit(cpu);
1149 qemu_cond_broadcast(&qemu_pause_cond);
1152 static void qemu_wait_io_event_common(CPUState *cpu)
1154 atomic_mb_set(&cpu->thread_kicked, false);
1155 if (cpu->stop) {
1156 qemu_cpu_stop(cpu, false);
1158 process_queued_cpu_work(cpu);
1161 static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
1163 while (all_cpu_threads_idle()) {
1164 stop_tcg_kick_timer();
1165 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1168 start_tcg_kick_timer();
1170 qemu_wait_io_event_common(cpu);
1173 static void qemu_wait_io_event(CPUState *cpu)
1175 while (cpu_thread_is_idle(cpu)) {
1176 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1179 #ifdef _WIN32
1180 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1181 if (!tcg_enabled()) {
1182 SleepEx(0, TRUE);
1184 #endif
1185 qemu_wait_io_event_common(cpu);
1188 static void *qemu_kvm_cpu_thread_fn(void *arg)
1190 CPUState *cpu = arg;
1191 int r;
1193 rcu_register_thread();
1195 qemu_mutex_lock_iothread();
1196 qemu_thread_get_self(cpu->thread);
1197 cpu->thread_id = qemu_get_thread_id();
1198 cpu->can_do_io = 1;
1199 current_cpu = cpu;
1201 r = kvm_init_vcpu(cpu);
1202 if (r < 0) {
1203 error_report("kvm_init_vcpu failed: %s", strerror(-r));
1204 exit(1);
1207 kvm_init_cpu_signals(cpu);
1209 /* signal CPU creation */
1210 cpu->created = true;
1211 qemu_cond_signal(&qemu_cpu_cond);
1213 do {
1214 if (cpu_can_run(cpu)) {
1215 r = kvm_cpu_exec(cpu);
1216 if (r == EXCP_DEBUG) {
1217 cpu_handle_guest_debug(cpu);
1220 qemu_wait_io_event(cpu);
1221 } while (!cpu->unplug || cpu_can_run(cpu));
1223 qemu_kvm_destroy_vcpu(cpu);
1224 cpu->created = false;
1225 qemu_cond_signal(&qemu_cpu_cond);
1226 qemu_mutex_unlock_iothread();
1227 rcu_unregister_thread();
1228 return NULL;
1231 static void *qemu_dummy_cpu_thread_fn(void *arg)
1233 #ifdef _WIN32
1234 error_report("qtest is not supported under Windows");
1235 exit(1);
1236 #else
1237 CPUState *cpu = arg;
1238 sigset_t waitset;
1239 int r;
1241 rcu_register_thread();
1243 qemu_mutex_lock_iothread();
1244 qemu_thread_get_self(cpu->thread);
1245 cpu->thread_id = qemu_get_thread_id();
1246 cpu->can_do_io = 1;
1247 current_cpu = cpu;
1249 sigemptyset(&waitset);
1250 sigaddset(&waitset, SIG_IPI);
1252 /* signal CPU creation */
1253 cpu->created = true;
1254 qemu_cond_signal(&qemu_cpu_cond);
1256 do {
1257 qemu_mutex_unlock_iothread();
1258 do {
1259 int sig;
1260 r = sigwait(&waitset, &sig);
1261 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1262 if (r == -1) {
1263 perror("sigwait");
1264 exit(1);
1266 qemu_mutex_lock_iothread();
1267 qemu_wait_io_event(cpu);
1268 } while (!cpu->unplug);
1270 rcu_unregister_thread();
1271 return NULL;
1272 #endif
1275 static int64_t tcg_get_icount_limit(void)
1277 int64_t deadline;
1279 if (replay_mode != REPLAY_MODE_PLAY) {
1280 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1282 /* Maintain prior (possibly buggy) behaviour where if no deadline
1283 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1284 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1285 * nanoseconds.
1287 if ((deadline < 0) || (deadline > INT32_MAX)) {
1288 deadline = INT32_MAX;
1291 return qemu_icount_round(deadline);
1292 } else {
1293 return replay_get_instructions();
1297 static void handle_icount_deadline(void)
1299 assert(qemu_in_vcpu_thread());
1300 if (use_icount) {
1301 int64_t deadline =
1302 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1304 if (deadline == 0) {
1305 /* Wake up other AioContexts. */
1306 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1307 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
1312 static void prepare_icount_for_run(CPUState *cpu)
1314 if (use_icount) {
1315 int insns_left;
1317 /* These should always be cleared by process_icount_data after
1318 * each vCPU execution. However u16.high can be raised
1319 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1321 g_assert(cpu->icount_decr.u16.low == 0);
1322 g_assert(cpu->icount_extra == 0);
1324 cpu->icount_budget = tcg_get_icount_limit();
1325 insns_left = MIN(0xffff, cpu->icount_budget);
1326 cpu->icount_decr.u16.low = insns_left;
1327 cpu->icount_extra = cpu->icount_budget - insns_left;
1329 replay_mutex_lock();
1333 static void process_icount_data(CPUState *cpu)
1335 if (use_icount) {
1336 /* Account for executed instructions */
1337 cpu_update_icount(cpu);
1339 /* Reset the counters */
1340 cpu->icount_decr.u16.low = 0;
1341 cpu->icount_extra = 0;
1342 cpu->icount_budget = 0;
1344 replay_account_executed_instructions();
1346 replay_mutex_unlock();
1351 static int tcg_cpu_exec(CPUState *cpu)
1353 int ret;
1354 #ifdef CONFIG_PROFILER
1355 int64_t ti;
1356 #endif
1358 #ifdef CONFIG_PROFILER
1359 ti = profile_getclock();
1360 #endif
1361 cpu_exec_start(cpu);
1362 ret = cpu_exec(cpu);
1363 cpu_exec_end(cpu);
1364 #ifdef CONFIG_PROFILER
1365 tcg_time += profile_getclock() - ti;
1366 #endif
1367 return ret;
1370 /* Destroy any remaining vCPUs which have been unplugged and have
1371 * finished running
1373 static void deal_with_unplugged_cpus(void)
1375 CPUState *cpu;
1377 CPU_FOREACH(cpu) {
1378 if (cpu->unplug && !cpu_can_run(cpu)) {
1379 qemu_tcg_destroy_vcpu(cpu);
1380 cpu->created = false;
1381 qemu_cond_signal(&qemu_cpu_cond);
1382 break;
1387 /* Single-threaded TCG
1389 * In the single-threaded case each vCPU is simulated in turn. If
1390 * there is more than a single vCPU we create a simple timer to kick
1391 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1392 * This is done explicitly rather than relying on side-effects
1393 * elsewhere.
1396 static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
1398 CPUState *cpu = arg;
1400 rcu_register_thread();
1401 tcg_register_thread();
1403 qemu_mutex_lock_iothread();
1404 qemu_thread_get_self(cpu->thread);
1406 cpu->thread_id = qemu_get_thread_id();
1407 cpu->created = true;
1408 cpu->can_do_io = 1;
1409 qemu_cond_signal(&qemu_cpu_cond);
1411 /* wait for initial kick-off after machine start */
1412 while (first_cpu->stopped) {
1413 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1415 /* process any pending work */
1416 CPU_FOREACH(cpu) {
1417 current_cpu = cpu;
1418 qemu_wait_io_event_common(cpu);
1422 start_tcg_kick_timer();
1424 cpu = first_cpu;
1426 /* process any pending work */
1427 cpu->exit_request = 1;
1429 while (1) {
1430 qemu_mutex_unlock_iothread();
1431 replay_mutex_lock();
1432 qemu_mutex_lock_iothread();
1433 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1434 qemu_account_warp_timer();
1436 /* Run the timers here. This is much more efficient than
1437 * waking up the I/O thread and waiting for completion.
1439 handle_icount_deadline();
1441 replay_mutex_unlock();
1443 if (!cpu) {
1444 cpu = first_cpu;
1447 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1449 atomic_mb_set(&tcg_current_rr_cpu, cpu);
1450 current_cpu = cpu;
1452 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1453 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1455 if (cpu_can_run(cpu)) {
1456 int r;
1458 qemu_mutex_unlock_iothread();
1459 prepare_icount_for_run(cpu);
1461 r = tcg_cpu_exec(cpu);
1463 process_icount_data(cpu);
1464 qemu_mutex_lock_iothread();
1466 if (r == EXCP_DEBUG) {
1467 cpu_handle_guest_debug(cpu);
1468 break;
1469 } else if (r == EXCP_ATOMIC) {
1470 qemu_mutex_unlock_iothread();
1471 cpu_exec_step_atomic(cpu);
1472 qemu_mutex_lock_iothread();
1473 break;
1475 } else if (cpu->stop) {
1476 if (cpu->unplug) {
1477 cpu = CPU_NEXT(cpu);
1479 break;
1482 cpu = CPU_NEXT(cpu);
1483 } /* while (cpu && !cpu->exit_request).. */
1485 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1486 atomic_set(&tcg_current_rr_cpu, NULL);
1488 if (cpu && cpu->exit_request) {
1489 atomic_mb_set(&cpu->exit_request, 0);
1492 qemu_tcg_rr_wait_io_event(cpu ? cpu : QTAILQ_FIRST(&cpus));
1493 deal_with_unplugged_cpus();
1496 rcu_unregister_thread();
1497 return NULL;
1500 static void *qemu_hax_cpu_thread_fn(void *arg)
1502 CPUState *cpu = arg;
1503 int r;
1505 rcu_register_thread();
1506 qemu_mutex_lock_iothread();
1507 qemu_thread_get_self(cpu->thread);
1509 cpu->thread_id = qemu_get_thread_id();
1510 cpu->created = true;
1511 cpu->halted = 0;
1512 current_cpu = cpu;
1514 hax_init_vcpu(cpu);
1515 qemu_cond_signal(&qemu_cpu_cond);
1517 do {
1518 if (cpu_can_run(cpu)) {
1519 r = hax_smp_cpu_exec(cpu);
1520 if (r == EXCP_DEBUG) {
1521 cpu_handle_guest_debug(cpu);
1525 qemu_wait_io_event(cpu);
1526 } while (!cpu->unplug || cpu_can_run(cpu));
1527 rcu_unregister_thread();
1528 return NULL;
1531 /* The HVF-specific vCPU thread function. This one should only run when the host
1532 * CPU supports the VMX "unrestricted guest" feature. */
1533 static void *qemu_hvf_cpu_thread_fn(void *arg)
1535 CPUState *cpu = arg;
1537 int r;
1539 assert(hvf_enabled());
1541 rcu_register_thread();
1543 qemu_mutex_lock_iothread();
1544 qemu_thread_get_self(cpu->thread);
1546 cpu->thread_id = qemu_get_thread_id();
1547 cpu->can_do_io = 1;
1548 current_cpu = cpu;
1550 hvf_init_vcpu(cpu);
1552 /* signal CPU creation */
1553 cpu->created = true;
1554 qemu_cond_signal(&qemu_cpu_cond);
1556 do {
1557 if (cpu_can_run(cpu)) {
1558 r = hvf_vcpu_exec(cpu);
1559 if (r == EXCP_DEBUG) {
1560 cpu_handle_guest_debug(cpu);
1563 qemu_wait_io_event(cpu);
1564 } while (!cpu->unplug || cpu_can_run(cpu));
1566 hvf_vcpu_destroy(cpu);
1567 cpu->created = false;
1568 qemu_cond_signal(&qemu_cpu_cond);
1569 qemu_mutex_unlock_iothread();
1570 rcu_unregister_thread();
1571 return NULL;
1574 static void *qemu_whpx_cpu_thread_fn(void *arg)
1576 CPUState *cpu = arg;
1577 int r;
1579 rcu_register_thread();
1581 qemu_mutex_lock_iothread();
1582 qemu_thread_get_self(cpu->thread);
1583 cpu->thread_id = qemu_get_thread_id();
1584 current_cpu = cpu;
1586 r = whpx_init_vcpu(cpu);
1587 if (r < 0) {
1588 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1589 exit(1);
1592 /* signal CPU creation */
1593 cpu->created = true;
1594 qemu_cond_signal(&qemu_cpu_cond);
1596 do {
1597 if (cpu_can_run(cpu)) {
1598 r = whpx_vcpu_exec(cpu);
1599 if (r == EXCP_DEBUG) {
1600 cpu_handle_guest_debug(cpu);
1603 while (cpu_thread_is_idle(cpu)) {
1604 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1606 qemu_wait_io_event_common(cpu);
1607 } while (!cpu->unplug || cpu_can_run(cpu));
1609 whpx_destroy_vcpu(cpu);
1610 cpu->created = false;
1611 qemu_cond_signal(&qemu_cpu_cond);
1612 qemu_mutex_unlock_iothread();
1613 rcu_unregister_thread();
1614 return NULL;
1617 #ifdef _WIN32
1618 static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1621 #endif
1623 /* Multi-threaded TCG
1625 * In the multi-threaded case each vCPU has its own thread. The TLS
1626 * variable current_cpu can be used deep in the code to find the
1627 * current CPUState for a given thread.
1630 static void *qemu_tcg_cpu_thread_fn(void *arg)
1632 CPUState *cpu = arg;
1634 g_assert(!use_icount);
1636 rcu_register_thread();
1637 tcg_register_thread();
1639 qemu_mutex_lock_iothread();
1640 qemu_thread_get_self(cpu->thread);
1642 cpu->thread_id = qemu_get_thread_id();
1643 cpu->created = true;
1644 cpu->can_do_io = 1;
1645 current_cpu = cpu;
1646 qemu_cond_signal(&qemu_cpu_cond);
1648 /* process any pending work */
1649 cpu->exit_request = 1;
1651 do {
1652 if (cpu_can_run(cpu)) {
1653 int r;
1654 qemu_mutex_unlock_iothread();
1655 r = tcg_cpu_exec(cpu);
1656 qemu_mutex_lock_iothread();
1657 switch (r) {
1658 case EXCP_DEBUG:
1659 cpu_handle_guest_debug(cpu);
1660 break;
1661 case EXCP_HALTED:
1662 /* during start-up the vCPU is reset and the thread is
1663 * kicked several times. If we don't ensure we go back
1664 * to sleep in the halted state we won't cleanly
1665 * start-up when the vCPU is enabled.
1667 * cpu->halted should ensure we sleep in wait_io_event
1669 g_assert(cpu->halted);
1670 break;
1671 case EXCP_ATOMIC:
1672 qemu_mutex_unlock_iothread();
1673 cpu_exec_step_atomic(cpu);
1674 qemu_mutex_lock_iothread();
1675 default:
1676 /* Ignore everything else? */
1677 break;
1681 atomic_mb_set(&cpu->exit_request, 0);
1682 qemu_wait_io_event(cpu);
1683 } while (!cpu->unplug || cpu_can_run(cpu));
1685 qemu_tcg_destroy_vcpu(cpu);
1686 cpu->created = false;
1687 qemu_cond_signal(&qemu_cpu_cond);
1688 qemu_mutex_unlock_iothread();
1689 rcu_unregister_thread();
1690 return NULL;
1693 static void qemu_cpu_kick_thread(CPUState *cpu)
1695 #ifndef _WIN32
1696 int err;
1698 if (cpu->thread_kicked) {
1699 return;
1701 cpu->thread_kicked = true;
1702 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1703 if (err) {
1704 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1705 exit(1);
1707 #else /* _WIN32 */
1708 if (!qemu_cpu_is_self(cpu)) {
1709 if (whpx_enabled()) {
1710 whpx_vcpu_kick(cpu);
1711 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
1712 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1713 __func__, GetLastError());
1714 exit(1);
1717 #endif
1720 void qemu_cpu_kick(CPUState *cpu)
1722 qemu_cond_broadcast(cpu->halt_cond);
1723 if (tcg_enabled()) {
1724 cpu_exit(cpu);
1725 /* NOP unless doing single-thread RR */
1726 qemu_cpu_kick_rr_cpu();
1727 } else {
1728 if (hax_enabled()) {
1730 * FIXME: race condition with the exit_request check in
1731 * hax_vcpu_hax_exec
1733 cpu->exit_request = 1;
1735 qemu_cpu_kick_thread(cpu);
1739 void qemu_cpu_kick_self(void)
1741 assert(current_cpu);
1742 qemu_cpu_kick_thread(current_cpu);
1745 bool qemu_cpu_is_self(CPUState *cpu)
1747 return qemu_thread_is_self(cpu->thread);
1750 bool qemu_in_vcpu_thread(void)
1752 return current_cpu && qemu_cpu_is_self(current_cpu);
1755 static __thread bool iothread_locked = false;
1757 bool qemu_mutex_iothread_locked(void)
1759 return iothread_locked;
1762 void qemu_mutex_lock_iothread(void)
1764 g_assert(!qemu_mutex_iothread_locked());
1765 qemu_mutex_lock(&qemu_global_mutex);
1766 iothread_locked = true;
1769 void qemu_mutex_unlock_iothread(void)
1771 g_assert(qemu_mutex_iothread_locked());
1772 iothread_locked = false;
1773 qemu_mutex_unlock(&qemu_global_mutex);
1776 static bool all_vcpus_paused(void)
1778 CPUState *cpu;
1780 CPU_FOREACH(cpu) {
1781 if (!cpu->stopped) {
1782 return false;
1786 return true;
1789 void pause_all_vcpus(void)
1791 CPUState *cpu;
1793 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1794 CPU_FOREACH(cpu) {
1795 if (qemu_cpu_is_self(cpu)) {
1796 qemu_cpu_stop(cpu, true);
1797 } else {
1798 cpu->stop = true;
1799 qemu_cpu_kick(cpu);
1803 /* We need to drop the replay_lock so any vCPU threads woken up
1804 * can finish their replay tasks
1806 replay_mutex_unlock();
1808 while (!all_vcpus_paused()) {
1809 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1810 CPU_FOREACH(cpu) {
1811 qemu_cpu_kick(cpu);
1815 qemu_mutex_unlock_iothread();
1816 replay_mutex_lock();
1817 qemu_mutex_lock_iothread();
1820 void cpu_resume(CPUState *cpu)
1822 cpu->stop = false;
1823 cpu->stopped = false;
1824 qemu_cpu_kick(cpu);
1827 void resume_all_vcpus(void)
1829 CPUState *cpu;
1831 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1832 CPU_FOREACH(cpu) {
1833 cpu_resume(cpu);
1837 void cpu_remove_sync(CPUState *cpu)
1839 cpu->stop = true;
1840 cpu->unplug = true;
1841 qemu_cpu_kick(cpu);
1842 qemu_mutex_unlock_iothread();
1843 qemu_thread_join(cpu->thread);
1844 qemu_mutex_lock_iothread();
1847 /* For temporary buffers for forming a name */
1848 #define VCPU_THREAD_NAME_SIZE 16
1850 static void qemu_tcg_init_vcpu(CPUState *cpu)
1852 char thread_name[VCPU_THREAD_NAME_SIZE];
1853 static QemuCond *single_tcg_halt_cond;
1854 static QemuThread *single_tcg_cpu_thread;
1855 static int tcg_region_inited;
1858 * Initialize TCG regions--once. Now is a good time, because:
1859 * (1) TCG's init context, prologue and target globals have been set up.
1860 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1861 * -accel flag is processed, so the check doesn't work then).
1863 if (!tcg_region_inited) {
1864 tcg_region_inited = 1;
1865 tcg_region_init();
1868 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
1869 cpu->thread = g_malloc0(sizeof(QemuThread));
1870 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1871 qemu_cond_init(cpu->halt_cond);
1873 if (qemu_tcg_mttcg_enabled()) {
1874 /* create a thread per vCPU with TCG (MTTCG) */
1875 parallel_cpus = true;
1876 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1877 cpu->cpu_index);
1879 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1880 cpu, QEMU_THREAD_JOINABLE);
1882 } else {
1883 /* share a single thread for all cpus with TCG */
1884 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1885 qemu_thread_create(cpu->thread, thread_name,
1886 qemu_tcg_rr_cpu_thread_fn,
1887 cpu, QEMU_THREAD_JOINABLE);
1889 single_tcg_halt_cond = cpu->halt_cond;
1890 single_tcg_cpu_thread = cpu->thread;
1892 #ifdef _WIN32
1893 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1894 #endif
1895 } else {
1896 /* For non-MTTCG cases we share the thread */
1897 cpu->thread = single_tcg_cpu_thread;
1898 cpu->halt_cond = single_tcg_halt_cond;
1899 cpu->thread_id = first_cpu->thread_id;
1900 cpu->can_do_io = 1;
1901 cpu->created = true;
1905 static void qemu_hax_start_vcpu(CPUState *cpu)
1907 char thread_name[VCPU_THREAD_NAME_SIZE];
1909 cpu->thread = g_malloc0(sizeof(QemuThread));
1910 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1911 qemu_cond_init(cpu->halt_cond);
1913 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1914 cpu->cpu_index);
1915 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1916 cpu, QEMU_THREAD_JOINABLE);
1917 #ifdef _WIN32
1918 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1919 #endif
1922 static void qemu_kvm_start_vcpu(CPUState *cpu)
1924 char thread_name[VCPU_THREAD_NAME_SIZE];
1926 cpu->thread = g_malloc0(sizeof(QemuThread));
1927 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1928 qemu_cond_init(cpu->halt_cond);
1929 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1930 cpu->cpu_index);
1931 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1932 cpu, QEMU_THREAD_JOINABLE);
1935 static void qemu_hvf_start_vcpu(CPUState *cpu)
1937 char thread_name[VCPU_THREAD_NAME_SIZE];
1939 /* HVF currently does not support TCG, and only runs in
1940 * unrestricted-guest mode. */
1941 assert(hvf_enabled());
1943 cpu->thread = g_malloc0(sizeof(QemuThread));
1944 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1945 qemu_cond_init(cpu->halt_cond);
1947 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
1948 cpu->cpu_index);
1949 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
1950 cpu, QEMU_THREAD_JOINABLE);
1953 static void qemu_whpx_start_vcpu(CPUState *cpu)
1955 char thread_name[VCPU_THREAD_NAME_SIZE];
1957 cpu->thread = g_malloc0(sizeof(QemuThread));
1958 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1959 qemu_cond_init(cpu->halt_cond);
1960 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
1961 cpu->cpu_index);
1962 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
1963 cpu, QEMU_THREAD_JOINABLE);
1964 #ifdef _WIN32
1965 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1966 #endif
1969 static void qemu_dummy_start_vcpu(CPUState *cpu)
1971 char thread_name[VCPU_THREAD_NAME_SIZE];
1973 cpu->thread = g_malloc0(sizeof(QemuThread));
1974 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1975 qemu_cond_init(cpu->halt_cond);
1976 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1977 cpu->cpu_index);
1978 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1979 QEMU_THREAD_JOINABLE);
1982 void qemu_init_vcpu(CPUState *cpu)
1984 cpu->nr_cores = smp_cores;
1985 cpu->nr_threads = smp_threads;
1986 cpu->stopped = true;
1988 if (!cpu->as) {
1989 /* If the target cpu hasn't set up any address spaces itself,
1990 * give it the default one.
1992 cpu->num_ases = 1;
1993 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
1996 if (kvm_enabled()) {
1997 qemu_kvm_start_vcpu(cpu);
1998 } else if (hax_enabled()) {
1999 qemu_hax_start_vcpu(cpu);
2000 } else if (hvf_enabled()) {
2001 qemu_hvf_start_vcpu(cpu);
2002 } else if (tcg_enabled()) {
2003 qemu_tcg_init_vcpu(cpu);
2004 } else if (whpx_enabled()) {
2005 qemu_whpx_start_vcpu(cpu);
2006 } else {
2007 qemu_dummy_start_vcpu(cpu);
2010 while (!cpu->created) {
2011 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2015 void cpu_stop_current(void)
2017 if (current_cpu) {
2018 qemu_cpu_stop(current_cpu, true);
2022 int vm_stop(RunState state)
2024 if (qemu_in_vcpu_thread()) {
2025 qemu_system_vmstop_request_prepare();
2026 qemu_system_vmstop_request(state);
2028 * FIXME: should not return to device code in case
2029 * vm_stop() has been requested.
2031 cpu_stop_current();
2032 return 0;
2035 return do_vm_stop(state, true);
2039 * Prepare for (re)starting the VM.
2040 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2041 * running or in case of an error condition), 0 otherwise.
2043 int vm_prepare_start(void)
2045 RunState requested;
2047 qemu_vmstop_requested(&requested);
2048 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2049 return -1;
2052 /* Ensure that a STOP/RESUME pair of events is emitted if a
2053 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2054 * example, according to documentation is always followed by
2055 * the STOP event.
2057 if (runstate_is_running()) {
2058 qapi_event_send_stop(&error_abort);
2059 qapi_event_send_resume(&error_abort);
2060 return -1;
2063 /* We are sending this now, but the CPUs will be resumed shortly later */
2064 qapi_event_send_resume(&error_abort);
2066 replay_enable_events();
2067 cpu_enable_ticks();
2068 runstate_set(RUN_STATE_RUNNING);
2069 vm_state_notify(1, RUN_STATE_RUNNING);
2070 return 0;
2073 void vm_start(void)
2075 if (!vm_prepare_start()) {
2076 resume_all_vcpus();
2080 /* does a state transition even if the VM is already stopped,
2081 current state is forgotten forever */
2082 int vm_stop_force_state(RunState state)
2084 if (runstate_is_running()) {
2085 return vm_stop(state);
2086 } else {
2087 runstate_set(state);
2089 bdrv_drain_all();
2090 /* Make sure to return an error if the flush in a previous vm_stop()
2091 * failed. */
2092 return bdrv_flush_all();
2096 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
2098 /* XXX: implement xxx_cpu_list for targets that still miss it */
2099 #if defined(cpu_list)
2100 cpu_list(f, cpu_fprintf);
2101 #endif
2104 CpuInfoList *qmp_query_cpus(Error **errp)
2106 MachineState *ms = MACHINE(qdev_get_machine());
2107 MachineClass *mc = MACHINE_GET_CLASS(ms);
2108 CpuInfoList *head = NULL, *cur_item = NULL;
2109 CPUState *cpu;
2111 CPU_FOREACH(cpu) {
2112 CpuInfoList *info;
2113 #if defined(TARGET_I386)
2114 X86CPU *x86_cpu = X86_CPU(cpu);
2115 CPUX86State *env = &x86_cpu->env;
2116 #elif defined(TARGET_PPC)
2117 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2118 CPUPPCState *env = &ppc_cpu->env;
2119 #elif defined(TARGET_SPARC)
2120 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2121 CPUSPARCState *env = &sparc_cpu->env;
2122 #elif defined(TARGET_RISCV)
2123 RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2124 CPURISCVState *env = &riscv_cpu->env;
2125 #elif defined(TARGET_MIPS)
2126 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2127 CPUMIPSState *env = &mips_cpu->env;
2128 #elif defined(TARGET_TRICORE)
2129 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2130 CPUTriCoreState *env = &tricore_cpu->env;
2131 #elif defined(TARGET_S390X)
2132 S390CPU *s390_cpu = S390_CPU(cpu);
2133 CPUS390XState *env = &s390_cpu->env;
2134 #endif
2136 cpu_synchronize_state(cpu);
2138 info = g_malloc0(sizeof(*info));
2139 info->value = g_malloc0(sizeof(*info->value));
2140 info->value->CPU = cpu->cpu_index;
2141 info->value->current = (cpu == first_cpu);
2142 info->value->halted = cpu->halted;
2143 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2144 info->value->thread_id = cpu->thread_id;
2145 #if defined(TARGET_I386)
2146 info->value->arch = CPU_INFO_ARCH_X86;
2147 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
2148 #elif defined(TARGET_PPC)
2149 info->value->arch = CPU_INFO_ARCH_PPC;
2150 info->value->u.ppc.nip = env->nip;
2151 #elif defined(TARGET_SPARC)
2152 info->value->arch = CPU_INFO_ARCH_SPARC;
2153 info->value->u.q_sparc.pc = env->pc;
2154 info->value->u.q_sparc.npc = env->npc;
2155 #elif defined(TARGET_MIPS)
2156 info->value->arch = CPU_INFO_ARCH_MIPS;
2157 info->value->u.q_mips.PC = env->active_tc.PC;
2158 #elif defined(TARGET_TRICORE)
2159 info->value->arch = CPU_INFO_ARCH_TRICORE;
2160 info->value->u.tricore.PC = env->PC;
2161 #elif defined(TARGET_S390X)
2162 info->value->arch = CPU_INFO_ARCH_S390;
2163 info->value->u.s390.cpu_state = env->cpu_state;
2164 #elif defined(TARGET_RISCV)
2165 info->value->arch = CPU_INFO_ARCH_RISCV;
2166 info->value->u.riscv.pc = env->pc;
2167 #else
2168 info->value->arch = CPU_INFO_ARCH_OTHER;
2169 #endif
2170 info->value->has_props = !!mc->cpu_index_to_instance_props;
2171 if (info->value->has_props) {
2172 CpuInstanceProperties *props;
2173 props = g_malloc0(sizeof(*props));
2174 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2175 info->value->props = props;
2178 /* XXX: waiting for the qapi to support GSList */
2179 if (!cur_item) {
2180 head = cur_item = info;
2181 } else {
2182 cur_item->next = info;
2183 cur_item = info;
2187 return head;
2190 static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
2193 * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
2194 * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
2196 switch (target) {
2197 case SYS_EMU_TARGET_I386:
2198 case SYS_EMU_TARGET_X86_64:
2199 return CPU_INFO_ARCH_X86;
2201 case SYS_EMU_TARGET_PPC:
2202 case SYS_EMU_TARGET_PPCEMB:
2203 case SYS_EMU_TARGET_PPC64:
2204 return CPU_INFO_ARCH_PPC;
2206 case SYS_EMU_TARGET_SPARC:
2207 case SYS_EMU_TARGET_SPARC64:
2208 return CPU_INFO_ARCH_SPARC;
2210 case SYS_EMU_TARGET_MIPS:
2211 case SYS_EMU_TARGET_MIPSEL:
2212 case SYS_EMU_TARGET_MIPS64:
2213 case SYS_EMU_TARGET_MIPS64EL:
2214 return CPU_INFO_ARCH_MIPS;
2216 case SYS_EMU_TARGET_TRICORE:
2217 return CPU_INFO_ARCH_TRICORE;
2219 case SYS_EMU_TARGET_S390X:
2220 return CPU_INFO_ARCH_S390;
2222 case SYS_EMU_TARGET_RISCV32:
2223 case SYS_EMU_TARGET_RISCV64:
2224 return CPU_INFO_ARCH_RISCV;
2226 default:
2227 return CPU_INFO_ARCH_OTHER;
2231 static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
2233 #ifdef TARGET_S390X
2234 S390CPU *s390_cpu = S390_CPU(cpu);
2235 CPUS390XState *env = &s390_cpu->env;
2237 info->cpu_state = env->cpu_state;
2238 #else
2239 abort();
2240 #endif
2244 * fast means: we NEVER interrupt vCPU threads to retrieve
2245 * information from KVM.
2247 CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2249 MachineState *ms = MACHINE(qdev_get_machine());
2250 MachineClass *mc = MACHINE_GET_CLASS(ms);
2251 CpuInfoFastList *head = NULL, *cur_item = NULL;
2252 SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
2253 -1, &error_abort);
2254 CPUState *cpu;
2256 CPU_FOREACH(cpu) {
2257 CpuInfoFastList *info = g_malloc0(sizeof(*info));
2258 info->value = g_malloc0(sizeof(*info->value));
2260 info->value->cpu_index = cpu->cpu_index;
2261 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2262 info->value->thread_id = cpu->thread_id;
2264 info->value->has_props = !!mc->cpu_index_to_instance_props;
2265 if (info->value->has_props) {
2266 CpuInstanceProperties *props;
2267 props = g_malloc0(sizeof(*props));
2268 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2269 info->value->props = props;
2272 info->value->arch = sysemu_target_to_cpuinfo_arch(target);
2273 info->value->target = target;
2274 if (target == SYS_EMU_TARGET_S390X) {
2275 cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
2276 } else {
2277 /* do nothing for @CpuInfoOther */
2280 if (!cur_item) {
2281 head = cur_item = info;
2282 } else {
2283 cur_item->next = info;
2284 cur_item = info;
2288 return head;
2291 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2292 bool has_cpu, int64_t cpu_index, Error **errp)
2294 FILE *f;
2295 uint32_t l;
2296 CPUState *cpu;
2297 uint8_t buf[1024];
2298 int64_t orig_addr = addr, orig_size = size;
2300 if (!has_cpu) {
2301 cpu_index = 0;
2304 cpu = qemu_get_cpu(cpu_index);
2305 if (cpu == NULL) {
2306 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2307 "a CPU number");
2308 return;
2311 f = fopen(filename, "wb");
2312 if (!f) {
2313 error_setg_file_open(errp, errno, filename);
2314 return;
2317 while (size != 0) {
2318 l = sizeof(buf);
2319 if (l > size)
2320 l = size;
2321 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
2322 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2323 " specified", orig_addr, orig_size);
2324 goto exit;
2326 if (fwrite(buf, 1, l, f) != l) {
2327 error_setg(errp, QERR_IO_ERROR);
2328 goto exit;
2330 addr += l;
2331 size -= l;
2334 exit:
2335 fclose(f);
2338 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2339 Error **errp)
2341 FILE *f;
2342 uint32_t l;
2343 uint8_t buf[1024];
2345 f = fopen(filename, "wb");
2346 if (!f) {
2347 error_setg_file_open(errp, errno, filename);
2348 return;
2351 while (size != 0) {
2352 l = sizeof(buf);
2353 if (l > size)
2354 l = size;
2355 cpu_physical_memory_read(addr, buf, l);
2356 if (fwrite(buf, 1, l, f) != l) {
2357 error_setg(errp, QERR_IO_ERROR);
2358 goto exit;
2360 addr += l;
2361 size -= l;
2364 exit:
2365 fclose(f);
2368 void qmp_inject_nmi(Error **errp)
2370 nmi_monitor_handle(monitor_get_cpu_index(), errp);
2373 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
2375 if (!use_icount) {
2376 return;
2379 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
2380 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2381 if (icount_align_option) {
2382 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
2383 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
2384 } else {
2385 cpu_fprintf(f, "Max guest delay NA\n");
2386 cpu_fprintf(f, "Max guest advance NA\n");