4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
27 #include "qemu/config-file.h"
29 #include "monitor/monitor.h"
30 #include "qapi/error.h"
31 #include "qapi/qapi-commands-misc.h"
32 #include "qapi/qapi-events-run-state.h"
33 #include "qapi/qmp/qerror.h"
34 #include "qemu/error-report.h"
35 #include "qemu/qemu-print.h"
36 #include "sysemu/sysemu.h"
37 #include "sysemu/tcg.h"
38 #include "sysemu/block-backend.h"
39 #include "exec/gdbstub.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/hw_accel.h"
42 #include "sysemu/kvm.h"
43 #include "sysemu/hax.h"
44 #include "sysemu/hvf.h"
45 #include "sysemu/whpx.h"
46 #include "exec/exec-all.h"
48 #include "qemu/thread.h"
49 #include "sysemu/cpus.h"
50 #include "sysemu/qtest.h"
51 #include "qemu/main-loop.h"
52 #include "qemu/option.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/seqlock.h"
55 #include "qemu/guest-random.h"
58 #include "sysemu/replay.h"
59 #include "hw/boards.h"
63 #include <sys/prctl.h>
66 #define PR_MCE_KILL 33
69 #ifndef PR_MCE_KILL_SET
70 #define PR_MCE_KILL_SET 1
73 #ifndef PR_MCE_KILL_EARLY
74 #define PR_MCE_KILL_EARLY 1
77 #endif /* CONFIG_LINUX */
82 /* vcpu throttling controls */
83 static QEMUTimer
*throttle_timer
;
84 static unsigned int throttle_percentage
;
86 #define CPU_THROTTLE_PCT_MIN 1
87 #define CPU_THROTTLE_PCT_MAX 99
88 #define CPU_THROTTLE_TIMESLICE_NS 10000000
90 bool cpu_is_stopped(CPUState
*cpu
)
92 return cpu
->stopped
|| !runstate_is_running();
95 static bool cpu_thread_is_idle(CPUState
*cpu
)
97 if (cpu
->stop
|| cpu
->queued_work_first
) {
100 if (cpu_is_stopped(cpu
)) {
103 if (!cpu
->halted
|| cpu_has_work(cpu
) ||
104 kvm_halt_in_kernel()) {
110 static bool all_cpu_threads_idle(void)
115 if (!cpu_thread_is_idle(cpu
)) {
122 /***********************************************************/
123 /* guest cycle counter */
125 /* Protected by TimersState seqlock */
127 static bool icount_sleep
= true;
128 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
129 #define MAX_ICOUNT_SHIFT 10
131 typedef struct TimersState
{
132 /* Protected by BQL. */
133 int64_t cpu_ticks_prev
;
134 int64_t cpu_ticks_offset
;
136 /* Protect fields that can be respectively read outside the
137 * BQL, and written from multiple threads.
139 QemuSeqLock vm_clock_seqlock
;
140 QemuSpin vm_clock_lock
;
142 int16_t cpu_ticks_enabled
;
144 /* Conversion factor from emulated instructions to virtual clock ticks. */
145 int16_t icount_time_shift
;
147 /* Compensate for varying guest execution speed. */
148 int64_t qemu_icount_bias
;
150 int64_t vm_clock_warp_start
;
151 int64_t cpu_clock_offset
;
153 /* Only written by TCG thread */
156 /* for adjusting icount */
157 QEMUTimer
*icount_rt_timer
;
158 QEMUTimer
*icount_vm_timer
;
159 QEMUTimer
*icount_warp_timer
;
162 static TimersState timers_state
;
166 * We default to false if we know other options have been enabled
167 * which are currently incompatible with MTTCG. Otherwise when each
168 * guest (target) has been updated to support:
169 * - atomic instructions
170 * - memory ordering primitives (barriers)
171 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
173 * Once a guest architecture has been converted to the new primitives
174 * there are two remaining limitations to check.
176 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
177 * - The host must have a stronger memory order than the guest
179 * It may be possible in future to support strong guests on weak hosts
180 * but that will require tagging all load/stores in a guest with their
181 * implicit memory order requirements which would likely slow things
185 static bool check_tcg_memory_orders_compatible(void)
187 #if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
188 return (TCG_GUEST_DEFAULT_MO
& ~TCG_TARGET_DEFAULT_MO
) == 0;
194 static bool default_mttcg_enabled(void)
196 if (use_icount
|| TCG_OVERSIZED_GUEST
) {
199 #ifdef TARGET_SUPPORTS_MTTCG
200 return check_tcg_memory_orders_compatible();
207 void qemu_tcg_configure(QemuOpts
*opts
, Error
**errp
)
209 const char *t
= qemu_opt_get(opts
, "thread");
211 if (strcmp(t
, "multi") == 0) {
212 if (TCG_OVERSIZED_GUEST
) {
213 error_setg(errp
, "No MTTCG when guest word size > hosts");
214 } else if (use_icount
) {
215 error_setg(errp
, "No MTTCG when icount is enabled");
217 #ifndef TARGET_SUPPORTS_MTTCG
218 warn_report("Guest not yet converted to MTTCG - "
219 "you may get unexpected results");
221 if (!check_tcg_memory_orders_compatible()) {
222 warn_report("Guest expects a stronger memory ordering "
223 "than the host provides");
224 error_printf("This may cause strange/hard to debug errors\n");
226 mttcg_enabled
= true;
228 } else if (strcmp(t
, "single") == 0) {
229 mttcg_enabled
= false;
231 error_setg(errp
, "Invalid 'thread' setting %s", t
);
234 mttcg_enabled
= default_mttcg_enabled();
238 /* The current number of executed instructions is based on what we
239 * originally budgeted minus the current state of the decrementing
240 * icount counters in extra/u16.low.
242 static int64_t cpu_get_icount_executed(CPUState
*cpu
)
244 return (cpu
->icount_budget
-
245 (cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
));
249 * Update the global shared timer_state.qemu_icount to take into
250 * account executed instructions. This is done by the TCG vCPU
251 * thread so the main-loop can see time has moved forward.
253 static void cpu_update_icount_locked(CPUState
*cpu
)
255 int64_t executed
= cpu_get_icount_executed(cpu
);
256 cpu
->icount_budget
-= executed
;
258 atomic_set_i64(&timers_state
.qemu_icount
,
259 timers_state
.qemu_icount
+ executed
);
263 * Update the global shared timer_state.qemu_icount to take into
264 * account executed instructions. This is done by the TCG vCPU
265 * thread so the main-loop can see time has moved forward.
267 void cpu_update_icount(CPUState
*cpu
)
269 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
270 &timers_state
.vm_clock_lock
);
271 cpu_update_icount_locked(cpu
);
272 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
273 &timers_state
.vm_clock_lock
);
276 static int64_t cpu_get_icount_raw_locked(void)
278 CPUState
*cpu
= current_cpu
;
280 if (cpu
&& cpu
->running
) {
281 if (!cpu
->can_do_io
) {
282 error_report("Bad icount read");
285 /* Take into account what has run */
286 cpu_update_icount_locked(cpu
);
288 /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
289 return atomic_read_i64(&timers_state
.qemu_icount
);
292 static int64_t cpu_get_icount_locked(void)
294 int64_t icount
= cpu_get_icount_raw_locked();
295 return atomic_read_i64(&timers_state
.qemu_icount_bias
) +
296 cpu_icount_to_ns(icount
);
299 int64_t cpu_get_icount_raw(void)
305 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
306 icount
= cpu_get_icount_raw_locked();
307 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
312 /* Return the virtual CPU time, based on the instruction counter. */
313 int64_t cpu_get_icount(void)
319 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
320 icount
= cpu_get_icount_locked();
321 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
326 int64_t cpu_icount_to_ns(int64_t icount
)
328 return icount
<< atomic_read(&timers_state
.icount_time_shift
);
331 static int64_t cpu_get_ticks_locked(void)
333 int64_t ticks
= timers_state
.cpu_ticks_offset
;
334 if (timers_state
.cpu_ticks_enabled
) {
335 ticks
+= cpu_get_host_ticks();
338 if (timers_state
.cpu_ticks_prev
> ticks
) {
339 /* Non increasing ticks may happen if the host uses software suspend. */
340 timers_state
.cpu_ticks_offset
+= timers_state
.cpu_ticks_prev
- ticks
;
341 ticks
= timers_state
.cpu_ticks_prev
;
344 timers_state
.cpu_ticks_prev
= ticks
;
348 /* return the time elapsed in VM between vm_start and vm_stop. Unless
349 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
352 int64_t cpu_get_ticks(void)
357 return cpu_get_icount();
360 qemu_spin_lock(&timers_state
.vm_clock_lock
);
361 ticks
= cpu_get_ticks_locked();
362 qemu_spin_unlock(&timers_state
.vm_clock_lock
);
366 static int64_t cpu_get_clock_locked(void)
370 time
= timers_state
.cpu_clock_offset
;
371 if (timers_state
.cpu_ticks_enabled
) {
378 /* Return the monotonic time elapsed in VM, i.e.,
379 * the time between vm_start and vm_stop
381 int64_t cpu_get_clock(void)
387 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
388 ti
= cpu_get_clock_locked();
389 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
394 /* enable cpu_get_ticks()
395 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
397 void cpu_enable_ticks(void)
399 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
400 &timers_state
.vm_clock_lock
);
401 if (!timers_state
.cpu_ticks_enabled
) {
402 timers_state
.cpu_ticks_offset
-= cpu_get_host_ticks();
403 timers_state
.cpu_clock_offset
-= get_clock();
404 timers_state
.cpu_ticks_enabled
= 1;
406 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
407 &timers_state
.vm_clock_lock
);
410 /* disable cpu_get_ticks() : the clock is stopped. You must not call
411 * cpu_get_ticks() after that.
412 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
414 void cpu_disable_ticks(void)
416 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
417 &timers_state
.vm_clock_lock
);
418 if (timers_state
.cpu_ticks_enabled
) {
419 timers_state
.cpu_ticks_offset
+= cpu_get_host_ticks();
420 timers_state
.cpu_clock_offset
= cpu_get_clock_locked();
421 timers_state
.cpu_ticks_enabled
= 0;
423 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
424 &timers_state
.vm_clock_lock
);
427 /* Correlation between real and virtual time is always going to be
428 fairly approximate, so ignore small variation.
429 When the guest is idle real and virtual time will be aligned in
431 #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
433 static void icount_adjust(void)
439 /* Protected by TimersState mutex. */
440 static int64_t last_delta
;
442 /* If the VM is not running, then do nothing. */
443 if (!runstate_is_running()) {
447 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
448 &timers_state
.vm_clock_lock
);
449 cur_time
= cpu_get_clock_locked();
450 cur_icount
= cpu_get_icount_locked();
452 delta
= cur_icount
- cur_time
;
453 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
455 && last_delta
+ ICOUNT_WOBBLE
< delta
* 2
456 && timers_state
.icount_time_shift
> 0) {
457 /* The guest is getting too far ahead. Slow time down. */
458 atomic_set(&timers_state
.icount_time_shift
,
459 timers_state
.icount_time_shift
- 1);
462 && last_delta
- ICOUNT_WOBBLE
> delta
* 2
463 && timers_state
.icount_time_shift
< MAX_ICOUNT_SHIFT
) {
464 /* The guest is getting too far behind. Speed time up. */
465 atomic_set(&timers_state
.icount_time_shift
,
466 timers_state
.icount_time_shift
+ 1);
469 atomic_set_i64(&timers_state
.qemu_icount_bias
,
470 cur_icount
- (timers_state
.qemu_icount
471 << timers_state
.icount_time_shift
));
472 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
473 &timers_state
.vm_clock_lock
);
476 static void icount_adjust_rt(void *opaque
)
478 timer_mod(timers_state
.icount_rt_timer
,
479 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT
) + 1000);
483 static void icount_adjust_vm(void *opaque
)
485 timer_mod(timers_state
.icount_vm_timer
,
486 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
487 NANOSECONDS_PER_SECOND
/ 10);
491 static int64_t qemu_icount_round(int64_t count
)
493 int shift
= atomic_read(&timers_state
.icount_time_shift
);
494 return (count
+ (1 << shift
) - 1) >> shift
;
497 static void icount_warp_rt(void)
502 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
503 * changes from -1 to another value, so the race here is okay.
506 seq
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
507 warp_start
= timers_state
.vm_clock_warp_start
;
508 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, seq
));
510 if (warp_start
== -1) {
514 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
515 &timers_state
.vm_clock_lock
);
516 if (runstate_is_running()) {
517 int64_t clock
= REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT
,
518 cpu_get_clock_locked());
521 warp_delta
= clock
- timers_state
.vm_clock_warp_start
;
522 if (use_icount
== 2) {
524 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
525 * far ahead of real time.
527 int64_t cur_icount
= cpu_get_icount_locked();
528 int64_t delta
= clock
- cur_icount
;
529 warp_delta
= MIN(warp_delta
, delta
);
531 atomic_set_i64(&timers_state
.qemu_icount_bias
,
532 timers_state
.qemu_icount_bias
+ warp_delta
);
534 timers_state
.vm_clock_warp_start
= -1;
535 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
536 &timers_state
.vm_clock_lock
);
538 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL
)) {
539 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
543 static void icount_timer_cb(void *opaque
)
545 /* No need for a checkpoint because the timer already synchronizes
546 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
551 void qtest_clock_warp(int64_t dest
)
553 int64_t clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
554 AioContext
*aio_context
;
555 assert(qtest_enabled());
556 aio_context
= qemu_get_aio_context();
557 while (clock
< dest
) {
558 int64_t deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
559 int64_t warp
= qemu_soonest_timeout(dest
- clock
, deadline
);
561 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
562 &timers_state
.vm_clock_lock
);
563 atomic_set_i64(&timers_state
.qemu_icount_bias
,
564 timers_state
.qemu_icount_bias
+ warp
);
565 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
566 &timers_state
.vm_clock_lock
);
568 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL
);
569 timerlist_run_timers(aio_context
->tlg
.tl
[QEMU_CLOCK_VIRTUAL
]);
570 clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
572 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
575 void qemu_start_warp_timer(void)
584 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
585 * do not fire, so computing the deadline does not make sense.
587 if (!runstate_is_running()) {
591 if (replay_mode
!= REPLAY_MODE_PLAY
) {
592 if (!all_cpu_threads_idle()) {
596 if (qtest_enabled()) {
597 /* When testing, qtest commands advance icount. */
601 replay_checkpoint(CHECKPOINT_CLOCK_WARP_START
);
603 /* warp clock deterministically in record/replay mode */
604 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START
)) {
605 /* vCPU is sleeping and warp can't be started.
606 It is probably a race condition: notification sent
607 to vCPU was processed in advance and vCPU went to sleep.
608 Therefore we have to wake it up for doing someting. */
609 if (replay_has_checkpoint()) {
610 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
616 /* We want to use the earliest deadline from ALL vm_clocks */
617 clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
618 deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
620 static bool notified
;
621 if (!icount_sleep
&& !notified
) {
622 warn_report("icount sleep disabled and no active timers");
630 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
631 * sleep. Otherwise, the CPU might be waiting for a future timer
632 * interrupt to wake it up, but the interrupt never comes because
633 * the vCPU isn't running any insns and thus doesn't advance the
634 * QEMU_CLOCK_VIRTUAL.
638 * We never let VCPUs sleep in no sleep icount mode.
639 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
640 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
641 * It is useful when we want a deterministic execution time,
642 * isolated from host latencies.
644 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
645 &timers_state
.vm_clock_lock
);
646 atomic_set_i64(&timers_state
.qemu_icount_bias
,
647 timers_state
.qemu_icount_bias
+ deadline
);
648 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
649 &timers_state
.vm_clock_lock
);
650 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
653 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
654 * "real" time, (related to the time left until the next event) has
655 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
656 * This avoids that the warps are visible externally; for example,
657 * you will not be sending network packets continuously instead of
660 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
661 &timers_state
.vm_clock_lock
);
662 if (timers_state
.vm_clock_warp_start
== -1
663 || timers_state
.vm_clock_warp_start
> clock
) {
664 timers_state
.vm_clock_warp_start
= clock
;
666 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
667 &timers_state
.vm_clock_lock
);
668 timer_mod_anticipate(timers_state
.icount_warp_timer
,
671 } else if (deadline
== 0) {
672 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
676 static void qemu_account_warp_timer(void)
678 if (!use_icount
|| !icount_sleep
) {
682 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
683 * do not fire, so computing the deadline does not make sense.
685 if (!runstate_is_running()) {
689 /* warp clock deterministically in record/replay mode */
690 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT
)) {
694 timer_del(timers_state
.icount_warp_timer
);
698 static bool icount_state_needed(void *opaque
)
703 static bool warp_timer_state_needed(void *opaque
)
705 TimersState
*s
= opaque
;
706 return s
->icount_warp_timer
!= NULL
;
709 static bool adjust_timers_state_needed(void *opaque
)
711 TimersState
*s
= opaque
;
712 return s
->icount_rt_timer
!= NULL
;
716 * Subsection for warp timer migration is optional, because may not be created
718 static const VMStateDescription icount_vmstate_warp_timer
= {
719 .name
= "timer/icount/warp_timer",
721 .minimum_version_id
= 1,
722 .needed
= warp_timer_state_needed
,
723 .fields
= (VMStateField
[]) {
724 VMSTATE_INT64(vm_clock_warp_start
, TimersState
),
725 VMSTATE_TIMER_PTR(icount_warp_timer
, TimersState
),
726 VMSTATE_END_OF_LIST()
730 static const VMStateDescription icount_vmstate_adjust_timers
= {
731 .name
= "timer/icount/timers",
733 .minimum_version_id
= 1,
734 .needed
= adjust_timers_state_needed
,
735 .fields
= (VMStateField
[]) {
736 VMSTATE_TIMER_PTR(icount_rt_timer
, TimersState
),
737 VMSTATE_TIMER_PTR(icount_vm_timer
, TimersState
),
738 VMSTATE_END_OF_LIST()
743 * This is a subsection for icount migration.
745 static const VMStateDescription icount_vmstate_timers
= {
746 .name
= "timer/icount",
748 .minimum_version_id
= 1,
749 .needed
= icount_state_needed
,
750 .fields
= (VMStateField
[]) {
751 VMSTATE_INT64(qemu_icount_bias
, TimersState
),
752 VMSTATE_INT64(qemu_icount
, TimersState
),
753 VMSTATE_END_OF_LIST()
755 .subsections
= (const VMStateDescription
*[]) {
756 &icount_vmstate_warp_timer
,
757 &icount_vmstate_adjust_timers
,
762 static const VMStateDescription vmstate_timers
= {
765 .minimum_version_id
= 1,
766 .fields
= (VMStateField
[]) {
767 VMSTATE_INT64(cpu_ticks_offset
, TimersState
),
769 VMSTATE_INT64_V(cpu_clock_offset
, TimersState
, 2),
770 VMSTATE_END_OF_LIST()
772 .subsections
= (const VMStateDescription
*[]) {
773 &icount_vmstate_timers
,
778 static void cpu_throttle_thread(CPUState
*cpu
, run_on_cpu_data opaque
)
781 double throttle_ratio
;
784 if (!cpu_throttle_get_percentage()) {
788 pct
= (double)cpu_throttle_get_percentage()/100;
789 throttle_ratio
= pct
/ (1 - pct
);
790 sleeptime_ns
= (long)(throttle_ratio
* CPU_THROTTLE_TIMESLICE_NS
);
792 qemu_mutex_unlock_iothread();
793 g_usleep(sleeptime_ns
/ 1000); /* Convert ns to us for usleep call */
794 qemu_mutex_lock_iothread();
795 atomic_set(&cpu
->throttle_thread_scheduled
, 0);
798 static void cpu_throttle_timer_tick(void *opaque
)
803 /* Stop the timer if needed */
804 if (!cpu_throttle_get_percentage()) {
808 if (!atomic_xchg(&cpu
->throttle_thread_scheduled
, 1)) {
809 async_run_on_cpu(cpu
, cpu_throttle_thread
,
814 pct
= (double)cpu_throttle_get_percentage()/100;
815 timer_mod(throttle_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
) +
816 CPU_THROTTLE_TIMESLICE_NS
/ (1-pct
));
819 void cpu_throttle_set(int new_throttle_pct
)
821 /* Ensure throttle percentage is within valid range */
822 new_throttle_pct
= MIN(new_throttle_pct
, CPU_THROTTLE_PCT_MAX
);
823 new_throttle_pct
= MAX(new_throttle_pct
, CPU_THROTTLE_PCT_MIN
);
825 atomic_set(&throttle_percentage
, new_throttle_pct
);
827 timer_mod(throttle_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
) +
828 CPU_THROTTLE_TIMESLICE_NS
);
831 void cpu_throttle_stop(void)
833 atomic_set(&throttle_percentage
, 0);
836 bool cpu_throttle_active(void)
838 return (cpu_throttle_get_percentage() != 0);
841 int cpu_throttle_get_percentage(void)
843 return atomic_read(&throttle_percentage
);
846 void cpu_ticks_init(void)
848 seqlock_init(&timers_state
.vm_clock_seqlock
);
849 qemu_spin_init(&timers_state
.vm_clock_lock
);
850 vmstate_register(NULL
, 0, &vmstate_timers
, &timers_state
);
851 throttle_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL_RT
,
852 cpu_throttle_timer_tick
, NULL
);
855 void configure_icount(QemuOpts
*opts
, Error
**errp
)
858 char *rem_str
= NULL
;
860 option
= qemu_opt_get(opts
, "shift");
862 if (qemu_opt_get(opts
, "align") != NULL
) {
863 error_setg(errp
, "Please specify shift option when using align");
868 icount_sleep
= qemu_opt_get_bool(opts
, "sleep", true);
870 timers_state
.icount_warp_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL_RT
,
871 icount_timer_cb
, NULL
);
874 icount_align_option
= qemu_opt_get_bool(opts
, "align", false);
876 if (icount_align_option
&& !icount_sleep
) {
877 error_setg(errp
, "align=on and sleep=off are incompatible");
879 if (strcmp(option
, "auto") != 0) {
881 timers_state
.icount_time_shift
= strtol(option
, &rem_str
, 0);
882 if (errno
!= 0 || *rem_str
!= '\0' || !strlen(option
)) {
883 error_setg(errp
, "icount: Invalid shift value");
887 } else if (icount_align_option
) {
888 error_setg(errp
, "shift=auto and align=on are incompatible");
889 } else if (!icount_sleep
) {
890 error_setg(errp
, "shift=auto and sleep=off are incompatible");
895 /* 125MIPS seems a reasonable initial guess at the guest speed.
896 It will be corrected fairly quickly anyway. */
897 timers_state
.icount_time_shift
= 3;
899 /* Have both realtime and virtual time triggers for speed adjustment.
900 The realtime trigger catches emulated time passing too slowly,
901 the virtual time trigger catches emulated time passing too fast.
902 Realtime triggers occur even when idle, so use them less frequently
904 timers_state
.vm_clock_warp_start
= -1;
905 timers_state
.icount_rt_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL_RT
,
906 icount_adjust_rt
, NULL
);
907 timer_mod(timers_state
.icount_rt_timer
,
908 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT
) + 1000);
909 timers_state
.icount_vm_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
910 icount_adjust_vm
, NULL
);
911 timer_mod(timers_state
.icount_vm_timer
,
912 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
913 NANOSECONDS_PER_SECOND
/ 10);
916 /***********************************************************/
917 /* TCG vCPU kick timer
919 * The kick timer is responsible for moving single threaded vCPU
920 * emulation on to the next vCPU. If more than one vCPU is running a
921 * timer event with force a cpu->exit so the next vCPU can get
924 * The timer is removed if all vCPUs are idle and restarted again once
925 * idleness is complete.
928 static QEMUTimer
*tcg_kick_vcpu_timer
;
929 static CPUState
*tcg_current_rr_cpu
;
931 #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
933 static inline int64_t qemu_tcg_next_kick(void)
935 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + TCG_KICK_PERIOD
;
938 /* Kick the currently round-robin scheduled vCPU */
939 static void qemu_cpu_kick_rr_cpu(void)
943 cpu
= atomic_mb_read(&tcg_current_rr_cpu
);
947 } while (cpu
!= atomic_mb_read(&tcg_current_rr_cpu
));
950 static void do_nothing(CPUState
*cpu
, run_on_cpu_data unused
)
954 void qemu_timer_notify_cb(void *opaque
, QEMUClockType type
)
956 if (!use_icount
|| type
!= QEMU_CLOCK_VIRTUAL
) {
961 if (qemu_in_vcpu_thread()) {
962 /* A CPU is currently running; kick it back out to the
963 * tcg_cpu_exec() loop so it will recalculate its
964 * icount deadline immediately.
966 qemu_cpu_kick(current_cpu
);
967 } else if (first_cpu
) {
968 /* qemu_cpu_kick is not enough to kick a halted CPU out of
969 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
970 * causes cpu_thread_is_idle to return false. This way,
971 * handle_icount_deadline can run.
972 * If we have no CPUs at all for some reason, we don't
973 * need to do anything.
975 async_run_on_cpu(first_cpu
, do_nothing
, RUN_ON_CPU_NULL
);
979 static void kick_tcg_thread(void *opaque
)
981 timer_mod(tcg_kick_vcpu_timer
, qemu_tcg_next_kick());
982 qemu_cpu_kick_rr_cpu();
985 static void start_tcg_kick_timer(void)
987 assert(!mttcg_enabled
);
988 if (!tcg_kick_vcpu_timer
&& CPU_NEXT(first_cpu
)) {
989 tcg_kick_vcpu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
990 kick_tcg_thread
, NULL
);
992 if (tcg_kick_vcpu_timer
&& !timer_pending(tcg_kick_vcpu_timer
)) {
993 timer_mod(tcg_kick_vcpu_timer
, qemu_tcg_next_kick());
997 static void stop_tcg_kick_timer(void)
999 assert(!mttcg_enabled
);
1000 if (tcg_kick_vcpu_timer
&& timer_pending(tcg_kick_vcpu_timer
)) {
1001 timer_del(tcg_kick_vcpu_timer
);
1005 /***********************************************************/
1006 void hw_error(const char *fmt
, ...)
1012 fprintf(stderr
, "qemu: hardware error: ");
1013 vfprintf(stderr
, fmt
, ap
);
1014 fprintf(stderr
, "\n");
1016 fprintf(stderr
, "CPU #%d:\n", cpu
->cpu_index
);
1017 cpu_dump_state(cpu
, stderr
, CPU_DUMP_FPU
);
1023 void cpu_synchronize_all_states(void)
1028 cpu_synchronize_state(cpu
);
1029 /* TODO: move to cpu_synchronize_state() */
1030 if (hvf_enabled()) {
1031 hvf_cpu_synchronize_state(cpu
);
1036 void cpu_synchronize_all_post_reset(void)
1041 cpu_synchronize_post_reset(cpu
);
1042 /* TODO: move to cpu_synchronize_post_reset() */
1043 if (hvf_enabled()) {
1044 hvf_cpu_synchronize_post_reset(cpu
);
1049 void cpu_synchronize_all_post_init(void)
1054 cpu_synchronize_post_init(cpu
);
1055 /* TODO: move to cpu_synchronize_post_init() */
1056 if (hvf_enabled()) {
1057 hvf_cpu_synchronize_post_init(cpu
);
1062 void cpu_synchronize_all_pre_loadvm(void)
1067 cpu_synchronize_pre_loadvm(cpu
);
1071 static int do_vm_stop(RunState state
, bool send_stop
)
1075 if (runstate_is_running()) {
1076 cpu_disable_ticks();
1078 runstate_set(state
);
1079 vm_state_notify(0, state
);
1081 qapi_event_send_stop();
1086 replay_disable_events();
1087 ret
= bdrv_flush_all();
1092 /* Special vm_stop() variant for terminating the process. Historically clients
1093 * did not expect a QMP STOP event and so we need to retain compatibility.
1095 int vm_shutdown(void)
1097 return do_vm_stop(RUN_STATE_SHUTDOWN
, false);
1100 static bool cpu_can_run(CPUState
*cpu
)
1105 if (cpu_is_stopped(cpu
)) {
1111 static void cpu_handle_guest_debug(CPUState
*cpu
)
1113 gdb_set_stop_cpu(cpu
);
1114 qemu_system_debug_request();
1115 cpu
->stopped
= true;
1119 static void sigbus_reraise(void)
1122 struct sigaction action
;
1124 memset(&action
, 0, sizeof(action
));
1125 action
.sa_handler
= SIG_DFL
;
1126 if (!sigaction(SIGBUS
, &action
, NULL
)) {
1129 sigaddset(&set
, SIGBUS
);
1130 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
1132 perror("Failed to re-raise SIGBUS!\n");
1136 static void sigbus_handler(int n
, siginfo_t
*siginfo
, void *ctx
)
1138 if (siginfo
->si_code
!= BUS_MCEERR_AO
&& siginfo
->si_code
!= BUS_MCEERR_AR
) {
1143 /* Called asynchronously in VCPU thread. */
1144 if (kvm_on_sigbus_vcpu(current_cpu
, siginfo
->si_code
, siginfo
->si_addr
)) {
1148 /* Called synchronously (via signalfd) in main thread. */
1149 if (kvm_on_sigbus(siginfo
->si_code
, siginfo
->si_addr
)) {
1155 static void qemu_init_sigbus(void)
1157 struct sigaction action
;
1159 memset(&action
, 0, sizeof(action
));
1160 action
.sa_flags
= SA_SIGINFO
;
1161 action
.sa_sigaction
= sigbus_handler
;
1162 sigaction(SIGBUS
, &action
, NULL
);
1164 prctl(PR_MCE_KILL
, PR_MCE_KILL_SET
, PR_MCE_KILL_EARLY
, 0, 0);
1166 #else /* !CONFIG_LINUX */
1167 static void qemu_init_sigbus(void)
1170 #endif /* !CONFIG_LINUX */
1172 static QemuMutex qemu_global_mutex
;
1174 static QemuThread io_thread
;
1177 static QemuCond qemu_cpu_cond
;
1179 static QemuCond qemu_pause_cond
;
1181 void qemu_init_cpu_loop(void)
1184 qemu_cond_init(&qemu_cpu_cond
);
1185 qemu_cond_init(&qemu_pause_cond
);
1186 qemu_mutex_init(&qemu_global_mutex
);
1188 qemu_thread_get_self(&io_thread
);
1191 void run_on_cpu(CPUState
*cpu
, run_on_cpu_func func
, run_on_cpu_data data
)
1193 do_run_on_cpu(cpu
, func
, data
, &qemu_global_mutex
);
1196 static void qemu_kvm_destroy_vcpu(CPUState
*cpu
)
1198 if (kvm_destroy_vcpu(cpu
) < 0) {
1199 error_report("kvm_destroy_vcpu failed");
1204 static void qemu_tcg_destroy_vcpu(CPUState
*cpu
)
1208 static void qemu_cpu_stop(CPUState
*cpu
, bool exit
)
1210 g_assert(qemu_cpu_is_self(cpu
));
1212 cpu
->stopped
= true;
1216 qemu_cond_broadcast(&qemu_pause_cond
);
1219 static void qemu_wait_io_event_common(CPUState
*cpu
)
1221 atomic_mb_set(&cpu
->thread_kicked
, false);
1223 qemu_cpu_stop(cpu
, false);
1225 process_queued_cpu_work(cpu
);
1228 static void qemu_tcg_rr_wait_io_event(void)
1232 while (all_cpu_threads_idle()) {
1233 stop_tcg_kick_timer();
1234 qemu_cond_wait(first_cpu
->halt_cond
, &qemu_global_mutex
);
1237 start_tcg_kick_timer();
1240 qemu_wait_io_event_common(cpu
);
1244 static void qemu_wait_io_event(CPUState
*cpu
)
1246 while (cpu_thread_is_idle(cpu
)) {
1247 qemu_cond_wait(cpu
->halt_cond
, &qemu_global_mutex
);
1251 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1252 if (!tcg_enabled()) {
1256 qemu_wait_io_event_common(cpu
);
1259 static void *qemu_kvm_cpu_thread_fn(void *arg
)
1261 CPUState
*cpu
= arg
;
1264 rcu_register_thread();
1266 qemu_mutex_lock_iothread();
1267 qemu_thread_get_self(cpu
->thread
);
1268 cpu
->thread_id
= qemu_get_thread_id();
1272 r
= kvm_init_vcpu(cpu
);
1274 error_report("kvm_init_vcpu failed: %s", strerror(-r
));
1278 kvm_init_cpu_signals(cpu
);
1280 /* signal CPU creation */
1281 cpu
->created
= true;
1282 qemu_cond_signal(&qemu_cpu_cond
);
1283 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
1286 if (cpu_can_run(cpu
)) {
1287 r
= kvm_cpu_exec(cpu
);
1288 if (r
== EXCP_DEBUG
) {
1289 cpu_handle_guest_debug(cpu
);
1292 qemu_wait_io_event(cpu
);
1293 } while (!cpu
->unplug
|| cpu_can_run(cpu
));
1295 qemu_kvm_destroy_vcpu(cpu
);
1296 cpu
->created
= false;
1297 qemu_cond_signal(&qemu_cpu_cond
);
1298 qemu_mutex_unlock_iothread();
1299 rcu_unregister_thread();
1303 static void *qemu_dummy_cpu_thread_fn(void *arg
)
1306 error_report("qtest is not supported under Windows");
1309 CPUState
*cpu
= arg
;
1313 rcu_register_thread();
1315 qemu_mutex_lock_iothread();
1316 qemu_thread_get_self(cpu
->thread
);
1317 cpu
->thread_id
= qemu_get_thread_id();
1321 sigemptyset(&waitset
);
1322 sigaddset(&waitset
, SIG_IPI
);
1324 /* signal CPU creation */
1325 cpu
->created
= true;
1326 qemu_cond_signal(&qemu_cpu_cond
);
1327 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
1330 qemu_mutex_unlock_iothread();
1333 r
= sigwait(&waitset
, &sig
);
1334 } while (r
== -1 && (errno
== EAGAIN
|| errno
== EINTR
));
1339 qemu_mutex_lock_iothread();
1340 qemu_wait_io_event(cpu
);
1341 } while (!cpu
->unplug
);
1343 qemu_mutex_unlock_iothread();
1344 rcu_unregister_thread();
1349 static int64_t tcg_get_icount_limit(void)
1353 if (replay_mode
!= REPLAY_MODE_PLAY
) {
1354 deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
1356 /* Maintain prior (possibly buggy) behaviour where if no deadline
1357 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1358 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1361 if ((deadline
< 0) || (deadline
> INT32_MAX
)) {
1362 deadline
= INT32_MAX
;
1365 return qemu_icount_round(deadline
);
1367 return replay_get_instructions();
1371 static void handle_icount_deadline(void)
1373 assert(qemu_in_vcpu_thread());
1376 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
1378 if (deadline
== 0) {
1379 /* Wake up other AioContexts. */
1380 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
1381 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL
);
1386 static void prepare_icount_for_run(CPUState
*cpu
)
1391 /* These should always be cleared by process_icount_data after
1392 * each vCPU execution. However u16.high can be raised
1393 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1395 g_assert(cpu_neg(cpu
)->icount_decr
.u16
.low
== 0);
1396 g_assert(cpu
->icount_extra
== 0);
1398 cpu
->icount_budget
= tcg_get_icount_limit();
1399 insns_left
= MIN(0xffff, cpu
->icount_budget
);
1400 cpu_neg(cpu
)->icount_decr
.u16
.low
= insns_left
;
1401 cpu
->icount_extra
= cpu
->icount_budget
- insns_left
;
1403 replay_mutex_lock();
1407 static void process_icount_data(CPUState
*cpu
)
1410 /* Account for executed instructions */
1411 cpu_update_icount(cpu
);
1413 /* Reset the counters */
1414 cpu_neg(cpu
)->icount_decr
.u16
.low
= 0;
1415 cpu
->icount_extra
= 0;
1416 cpu
->icount_budget
= 0;
1418 replay_account_executed_instructions();
1420 replay_mutex_unlock();
1425 static int tcg_cpu_exec(CPUState
*cpu
)
1428 #ifdef CONFIG_PROFILER
1432 assert(tcg_enabled());
1433 #ifdef CONFIG_PROFILER
1434 ti
= profile_getclock();
1436 cpu_exec_start(cpu
);
1437 ret
= cpu_exec(cpu
);
1439 #ifdef CONFIG_PROFILER
1440 atomic_set(&tcg_ctx
->prof
.cpu_exec_time
,
1441 tcg_ctx
->prof
.cpu_exec_time
+ profile_getclock() - ti
);
1446 /* Destroy any remaining vCPUs which have been unplugged and have
1449 static void deal_with_unplugged_cpus(void)
1454 if (cpu
->unplug
&& !cpu_can_run(cpu
)) {
1455 qemu_tcg_destroy_vcpu(cpu
);
1456 cpu
->created
= false;
1457 qemu_cond_signal(&qemu_cpu_cond
);
1463 /* Single-threaded TCG
1465 * In the single-threaded case each vCPU is simulated in turn. If
1466 * there is more than a single vCPU we create a simple timer to kick
1467 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1468 * This is done explicitly rather than relying on side-effects
1472 static void *qemu_tcg_rr_cpu_thread_fn(void *arg
)
1474 CPUState
*cpu
= arg
;
1476 assert(tcg_enabled());
1477 rcu_register_thread();
1478 tcg_register_thread();
1480 qemu_mutex_lock_iothread();
1481 qemu_thread_get_self(cpu
->thread
);
1483 cpu
->thread_id
= qemu_get_thread_id();
1484 cpu
->created
= true;
1486 qemu_cond_signal(&qemu_cpu_cond
);
1487 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
1489 /* wait for initial kick-off after machine start */
1490 while (first_cpu
->stopped
) {
1491 qemu_cond_wait(first_cpu
->halt_cond
, &qemu_global_mutex
);
1493 /* process any pending work */
1496 qemu_wait_io_event_common(cpu
);
1500 start_tcg_kick_timer();
1504 /* process any pending work */
1505 cpu
->exit_request
= 1;
1508 qemu_mutex_unlock_iothread();
1509 replay_mutex_lock();
1510 qemu_mutex_lock_iothread();
1511 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1512 qemu_account_warp_timer();
1514 /* Run the timers here. This is much more efficient than
1515 * waking up the I/O thread and waiting for completion.
1517 handle_icount_deadline();
1519 replay_mutex_unlock();
1525 while (cpu
&& !cpu
->queued_work_first
&& !cpu
->exit_request
) {
1527 atomic_mb_set(&tcg_current_rr_cpu
, cpu
);
1530 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
,
1531 (cpu
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
1533 if (cpu_can_run(cpu
)) {
1536 qemu_mutex_unlock_iothread();
1537 prepare_icount_for_run(cpu
);
1539 r
= tcg_cpu_exec(cpu
);
1541 process_icount_data(cpu
);
1542 qemu_mutex_lock_iothread();
1544 if (r
== EXCP_DEBUG
) {
1545 cpu_handle_guest_debug(cpu
);
1547 } else if (r
== EXCP_ATOMIC
) {
1548 qemu_mutex_unlock_iothread();
1549 cpu_exec_step_atomic(cpu
);
1550 qemu_mutex_lock_iothread();
1553 } else if (cpu
->stop
) {
1555 cpu
= CPU_NEXT(cpu
);
1560 cpu
= CPU_NEXT(cpu
);
1561 } /* while (cpu && !cpu->exit_request).. */
1563 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1564 atomic_set(&tcg_current_rr_cpu
, NULL
);
1566 if (cpu
&& cpu
->exit_request
) {
1567 atomic_mb_set(&cpu
->exit_request
, 0);
1570 if (use_icount
&& all_cpu_threads_idle()) {
1572 * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
1573 * in the main_loop, wake it up in order to start the warp timer.
1575 qemu_notify_event();
1578 qemu_tcg_rr_wait_io_event();
1579 deal_with_unplugged_cpus();
1582 rcu_unregister_thread();
1586 static void *qemu_hax_cpu_thread_fn(void *arg
)
1588 CPUState
*cpu
= arg
;
1591 rcu_register_thread();
1592 qemu_mutex_lock_iothread();
1593 qemu_thread_get_self(cpu
->thread
);
1595 cpu
->thread_id
= qemu_get_thread_id();
1596 cpu
->created
= true;
1600 qemu_cond_signal(&qemu_cpu_cond
);
1601 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
1604 if (cpu_can_run(cpu
)) {
1605 r
= hax_smp_cpu_exec(cpu
);
1606 if (r
== EXCP_DEBUG
) {
1607 cpu_handle_guest_debug(cpu
);
1611 qemu_wait_io_event(cpu
);
1612 } while (!cpu
->unplug
|| cpu_can_run(cpu
));
1613 rcu_unregister_thread();
1617 /* The HVF-specific vCPU thread function. This one should only run when the host
1618 * CPU supports the VMX "unrestricted guest" feature. */
1619 static void *qemu_hvf_cpu_thread_fn(void *arg
)
1621 CPUState
*cpu
= arg
;
1625 assert(hvf_enabled());
1627 rcu_register_thread();
1629 qemu_mutex_lock_iothread();
1630 qemu_thread_get_self(cpu
->thread
);
1632 cpu
->thread_id
= qemu_get_thread_id();
1638 /* signal CPU creation */
1639 cpu
->created
= true;
1640 qemu_cond_signal(&qemu_cpu_cond
);
1641 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
1644 if (cpu_can_run(cpu
)) {
1645 r
= hvf_vcpu_exec(cpu
);
1646 if (r
== EXCP_DEBUG
) {
1647 cpu_handle_guest_debug(cpu
);
1650 qemu_wait_io_event(cpu
);
1651 } while (!cpu
->unplug
|| cpu_can_run(cpu
));
1653 hvf_vcpu_destroy(cpu
);
1654 cpu
->created
= false;
1655 qemu_cond_signal(&qemu_cpu_cond
);
1656 qemu_mutex_unlock_iothread();
1657 rcu_unregister_thread();
1661 static void *qemu_whpx_cpu_thread_fn(void *arg
)
1663 CPUState
*cpu
= arg
;
1666 rcu_register_thread();
1668 qemu_mutex_lock_iothread();
1669 qemu_thread_get_self(cpu
->thread
);
1670 cpu
->thread_id
= qemu_get_thread_id();
1673 r
= whpx_init_vcpu(cpu
);
1675 fprintf(stderr
, "whpx_init_vcpu failed: %s\n", strerror(-r
));
1679 /* signal CPU creation */
1680 cpu
->created
= true;
1681 qemu_cond_signal(&qemu_cpu_cond
);
1682 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
1685 if (cpu_can_run(cpu
)) {
1686 r
= whpx_vcpu_exec(cpu
);
1687 if (r
== EXCP_DEBUG
) {
1688 cpu_handle_guest_debug(cpu
);
1691 while (cpu_thread_is_idle(cpu
)) {
1692 qemu_cond_wait(cpu
->halt_cond
, &qemu_global_mutex
);
1694 qemu_wait_io_event_common(cpu
);
1695 } while (!cpu
->unplug
|| cpu_can_run(cpu
));
1697 whpx_destroy_vcpu(cpu
);
1698 cpu
->created
= false;
1699 qemu_cond_signal(&qemu_cpu_cond
);
1700 qemu_mutex_unlock_iothread();
1701 rcu_unregister_thread();
1706 static void CALLBACK
dummy_apc_func(ULONG_PTR unused
)
1711 /* Multi-threaded TCG
1713 * In the multi-threaded case each vCPU has its own thread. The TLS
1714 * variable current_cpu can be used deep in the code to find the
1715 * current CPUState for a given thread.
1718 static void *qemu_tcg_cpu_thread_fn(void *arg
)
1720 CPUState
*cpu
= arg
;
1722 assert(tcg_enabled());
1723 g_assert(!use_icount
);
1725 rcu_register_thread();
1726 tcg_register_thread();
1728 qemu_mutex_lock_iothread();
1729 qemu_thread_get_self(cpu
->thread
);
1731 cpu
->thread_id
= qemu_get_thread_id();
1732 cpu
->created
= true;
1735 qemu_cond_signal(&qemu_cpu_cond
);
1736 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
1738 /* process any pending work */
1739 cpu
->exit_request
= 1;
1742 if (cpu_can_run(cpu
)) {
1744 qemu_mutex_unlock_iothread();
1745 r
= tcg_cpu_exec(cpu
);
1746 qemu_mutex_lock_iothread();
1749 cpu_handle_guest_debug(cpu
);
1752 /* during start-up the vCPU is reset and the thread is
1753 * kicked several times. If we don't ensure we go back
1754 * to sleep in the halted state we won't cleanly
1755 * start-up when the vCPU is enabled.
1757 * cpu->halted should ensure we sleep in wait_io_event
1759 g_assert(cpu
->halted
);
1762 qemu_mutex_unlock_iothread();
1763 cpu_exec_step_atomic(cpu
);
1764 qemu_mutex_lock_iothread();
1766 /* Ignore everything else? */
1771 atomic_mb_set(&cpu
->exit_request
, 0);
1772 qemu_wait_io_event(cpu
);
1773 } while (!cpu
->unplug
|| cpu_can_run(cpu
));
1775 qemu_tcg_destroy_vcpu(cpu
);
1776 cpu
->created
= false;
1777 qemu_cond_signal(&qemu_cpu_cond
);
1778 qemu_mutex_unlock_iothread();
1779 rcu_unregister_thread();
1783 static void qemu_cpu_kick_thread(CPUState
*cpu
)
1788 if (cpu
->thread_kicked
) {
1791 cpu
->thread_kicked
= true;
1792 err
= pthread_kill(cpu
->thread
->thread
, SIG_IPI
);
1793 if (err
&& err
!= ESRCH
) {
1794 fprintf(stderr
, "qemu:%s: %s", __func__
, strerror(err
));
1798 if (!qemu_cpu_is_self(cpu
)) {
1799 if (whpx_enabled()) {
1800 whpx_vcpu_kick(cpu
);
1801 } else if (!QueueUserAPC(dummy_apc_func
, cpu
->hThread
, 0)) {
1802 fprintf(stderr
, "%s: QueueUserAPC failed with error %lu\n",
1803 __func__
, GetLastError());
1810 void qemu_cpu_kick(CPUState
*cpu
)
1812 qemu_cond_broadcast(cpu
->halt_cond
);
1813 if (tcg_enabled()) {
1815 /* NOP unless doing single-thread RR */
1816 qemu_cpu_kick_rr_cpu();
1818 if (hax_enabled()) {
1820 * FIXME: race condition with the exit_request check in
1823 cpu
->exit_request
= 1;
1825 qemu_cpu_kick_thread(cpu
);
1829 void qemu_cpu_kick_self(void)
1831 assert(current_cpu
);
1832 qemu_cpu_kick_thread(current_cpu
);
1835 bool qemu_cpu_is_self(CPUState
*cpu
)
1837 return qemu_thread_is_self(cpu
->thread
);
1840 bool qemu_in_vcpu_thread(void)
1842 return current_cpu
&& qemu_cpu_is_self(current_cpu
);
1845 static __thread
bool iothread_locked
= false;
1847 bool qemu_mutex_iothread_locked(void)
1849 return iothread_locked
;
1853 * The BQL is taken from so many places that it is worth profiling the
1854 * callers directly, instead of funneling them all through a single function.
1856 void qemu_mutex_lock_iothread_impl(const char *file
, int line
)
1858 QemuMutexLockFunc bql_lock
= atomic_read(&qemu_bql_mutex_lock_func
);
1860 g_assert(!qemu_mutex_iothread_locked());
1861 bql_lock(&qemu_global_mutex
, file
, line
);
1862 iothread_locked
= true;
1865 void qemu_mutex_unlock_iothread(void)
1867 g_assert(qemu_mutex_iothread_locked());
1868 iothread_locked
= false;
1869 qemu_mutex_unlock(&qemu_global_mutex
);
1872 static bool all_vcpus_paused(void)
1877 if (!cpu
->stopped
) {
1885 void pause_all_vcpus(void)
1889 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, false);
1891 if (qemu_cpu_is_self(cpu
)) {
1892 qemu_cpu_stop(cpu
, true);
1899 /* We need to drop the replay_lock so any vCPU threads woken up
1900 * can finish their replay tasks
1902 replay_mutex_unlock();
1904 while (!all_vcpus_paused()) {
1905 qemu_cond_wait(&qemu_pause_cond
, &qemu_global_mutex
);
1911 qemu_mutex_unlock_iothread();
1912 replay_mutex_lock();
1913 qemu_mutex_lock_iothread();
1916 void cpu_resume(CPUState
*cpu
)
1919 cpu
->stopped
= false;
1923 void resume_all_vcpus(void)
1927 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, true);
1933 void cpu_remove_sync(CPUState
*cpu
)
1938 qemu_mutex_unlock_iothread();
1939 qemu_thread_join(cpu
->thread
);
1940 qemu_mutex_lock_iothread();
1943 /* For temporary buffers for forming a name */
1944 #define VCPU_THREAD_NAME_SIZE 16
1946 static void qemu_tcg_init_vcpu(CPUState
*cpu
)
1948 char thread_name
[VCPU_THREAD_NAME_SIZE
];
1949 static QemuCond
*single_tcg_halt_cond
;
1950 static QemuThread
*single_tcg_cpu_thread
;
1951 static int tcg_region_inited
;
1953 assert(tcg_enabled());
1955 * Initialize TCG regions--once. Now is a good time, because:
1956 * (1) TCG's init context, prologue and target globals have been set up.
1957 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1958 * -accel flag is processed, so the check doesn't work then).
1960 if (!tcg_region_inited
) {
1961 tcg_region_inited
= 1;
1965 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread
) {
1966 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1967 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1968 qemu_cond_init(cpu
->halt_cond
);
1970 if (qemu_tcg_mttcg_enabled()) {
1971 /* create a thread per vCPU with TCG (MTTCG) */
1972 parallel_cpus
= true;
1973 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/TCG",
1976 qemu_thread_create(cpu
->thread
, thread_name
, qemu_tcg_cpu_thread_fn
,
1977 cpu
, QEMU_THREAD_JOINABLE
);
1980 /* share a single thread for all cpus with TCG */
1981 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "ALL CPUs/TCG");
1982 qemu_thread_create(cpu
->thread
, thread_name
,
1983 qemu_tcg_rr_cpu_thread_fn
,
1984 cpu
, QEMU_THREAD_JOINABLE
);
1986 single_tcg_halt_cond
= cpu
->halt_cond
;
1987 single_tcg_cpu_thread
= cpu
->thread
;
1990 cpu
->hThread
= qemu_thread_get_handle(cpu
->thread
);
1993 /* For non-MTTCG cases we share the thread */
1994 cpu
->thread
= single_tcg_cpu_thread
;
1995 cpu
->halt_cond
= single_tcg_halt_cond
;
1996 cpu
->thread_id
= first_cpu
->thread_id
;
1998 cpu
->created
= true;
2002 static void qemu_hax_start_vcpu(CPUState
*cpu
)
2004 char thread_name
[VCPU_THREAD_NAME_SIZE
];
2006 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
2007 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
2008 qemu_cond_init(cpu
->halt_cond
);
2010 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/HAX",
2012 qemu_thread_create(cpu
->thread
, thread_name
, qemu_hax_cpu_thread_fn
,
2013 cpu
, QEMU_THREAD_JOINABLE
);
2015 cpu
->hThread
= qemu_thread_get_handle(cpu
->thread
);
2019 static void qemu_kvm_start_vcpu(CPUState
*cpu
)
2021 char thread_name
[VCPU_THREAD_NAME_SIZE
];
2023 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
2024 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
2025 qemu_cond_init(cpu
->halt_cond
);
2026 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/KVM",
2028 qemu_thread_create(cpu
->thread
, thread_name
, qemu_kvm_cpu_thread_fn
,
2029 cpu
, QEMU_THREAD_JOINABLE
);
2032 static void qemu_hvf_start_vcpu(CPUState
*cpu
)
2034 char thread_name
[VCPU_THREAD_NAME_SIZE
];
2036 /* HVF currently does not support TCG, and only runs in
2037 * unrestricted-guest mode. */
2038 assert(hvf_enabled());
2040 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
2041 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
2042 qemu_cond_init(cpu
->halt_cond
);
2044 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/HVF",
2046 qemu_thread_create(cpu
->thread
, thread_name
, qemu_hvf_cpu_thread_fn
,
2047 cpu
, QEMU_THREAD_JOINABLE
);
2050 static void qemu_whpx_start_vcpu(CPUState
*cpu
)
2052 char thread_name
[VCPU_THREAD_NAME_SIZE
];
2054 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
2055 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
2056 qemu_cond_init(cpu
->halt_cond
);
2057 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/WHPX",
2059 qemu_thread_create(cpu
->thread
, thread_name
, qemu_whpx_cpu_thread_fn
,
2060 cpu
, QEMU_THREAD_JOINABLE
);
2062 cpu
->hThread
= qemu_thread_get_handle(cpu
->thread
);
2066 static void qemu_dummy_start_vcpu(CPUState
*cpu
)
2068 char thread_name
[VCPU_THREAD_NAME_SIZE
];
2070 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
2071 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
2072 qemu_cond_init(cpu
->halt_cond
);
2073 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/DUMMY",
2075 qemu_thread_create(cpu
->thread
, thread_name
, qemu_dummy_cpu_thread_fn
, cpu
,
2076 QEMU_THREAD_JOINABLE
);
2079 void qemu_init_vcpu(CPUState
*cpu
)
2081 cpu
->nr_cores
= smp_cores
;
2082 cpu
->nr_threads
= smp_threads
;
2083 cpu
->stopped
= true;
2084 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
2087 /* If the target cpu hasn't set up any address spaces itself,
2088 * give it the default one.
2091 cpu_address_space_init(cpu
, 0, "cpu-memory", cpu
->memory
);
2094 if (kvm_enabled()) {
2095 qemu_kvm_start_vcpu(cpu
);
2096 } else if (hax_enabled()) {
2097 qemu_hax_start_vcpu(cpu
);
2098 } else if (hvf_enabled()) {
2099 qemu_hvf_start_vcpu(cpu
);
2100 } else if (tcg_enabled()) {
2101 qemu_tcg_init_vcpu(cpu
);
2102 } else if (whpx_enabled()) {
2103 qemu_whpx_start_vcpu(cpu
);
2105 qemu_dummy_start_vcpu(cpu
);
2108 while (!cpu
->created
) {
2109 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
2113 void cpu_stop_current(void)
2116 current_cpu
->stop
= true;
2117 cpu_exit(current_cpu
);
2121 int vm_stop(RunState state
)
2123 if (qemu_in_vcpu_thread()) {
2124 qemu_system_vmstop_request_prepare();
2125 qemu_system_vmstop_request(state
);
2127 * FIXME: should not return to device code in case
2128 * vm_stop() has been requested.
2134 return do_vm_stop(state
, true);
2138 * Prepare for (re)starting the VM.
2139 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2140 * running or in case of an error condition), 0 otherwise.
2142 int vm_prepare_start(void)
2146 qemu_vmstop_requested(&requested
);
2147 if (runstate_is_running() && requested
== RUN_STATE__MAX
) {
2151 /* Ensure that a STOP/RESUME pair of events is emitted if a
2152 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2153 * example, according to documentation is always followed by
2156 if (runstate_is_running()) {
2157 qapi_event_send_stop();
2158 qapi_event_send_resume();
2162 /* We are sending this now, but the CPUs will be resumed shortly later */
2163 qapi_event_send_resume();
2165 replay_enable_events();
2167 runstate_set(RUN_STATE_RUNNING
);
2168 vm_state_notify(1, RUN_STATE_RUNNING
);
2174 if (!vm_prepare_start()) {
2179 /* does a state transition even if the VM is already stopped,
2180 current state is forgotten forever */
2181 int vm_stop_force_state(RunState state
)
2183 if (runstate_is_running()) {
2184 return vm_stop(state
);
2186 runstate_set(state
);
2189 /* Make sure to return an error if the flush in a previous vm_stop()
2191 return bdrv_flush_all();
2195 void list_cpus(const char *optarg
)
2197 /* XXX: implement xxx_cpu_list for targets that still miss it */
2198 #if defined(cpu_list)
2203 CpuInfoList
*qmp_query_cpus(Error
**errp
)
2205 MachineState
*ms
= MACHINE(qdev_get_machine());
2206 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2207 CpuInfoList
*head
= NULL
, *cur_item
= NULL
;
2212 #if defined(TARGET_I386)
2213 X86CPU
*x86_cpu
= X86_CPU(cpu
);
2214 CPUX86State
*env
= &x86_cpu
->env
;
2215 #elif defined(TARGET_PPC)
2216 PowerPCCPU
*ppc_cpu
= POWERPC_CPU(cpu
);
2217 CPUPPCState
*env
= &ppc_cpu
->env
;
2218 #elif defined(TARGET_SPARC)
2219 SPARCCPU
*sparc_cpu
= SPARC_CPU(cpu
);
2220 CPUSPARCState
*env
= &sparc_cpu
->env
;
2221 #elif defined(TARGET_RISCV)
2222 RISCVCPU
*riscv_cpu
= RISCV_CPU(cpu
);
2223 CPURISCVState
*env
= &riscv_cpu
->env
;
2224 #elif defined(TARGET_MIPS)
2225 MIPSCPU
*mips_cpu
= MIPS_CPU(cpu
);
2226 CPUMIPSState
*env
= &mips_cpu
->env
;
2227 #elif defined(TARGET_TRICORE)
2228 TriCoreCPU
*tricore_cpu
= TRICORE_CPU(cpu
);
2229 CPUTriCoreState
*env
= &tricore_cpu
->env
;
2230 #elif defined(TARGET_S390X)
2231 S390CPU
*s390_cpu
= S390_CPU(cpu
);
2232 CPUS390XState
*env
= &s390_cpu
->env
;
2235 cpu_synchronize_state(cpu
);
2237 info
= g_malloc0(sizeof(*info
));
2238 info
->value
= g_malloc0(sizeof(*info
->value
));
2239 info
->value
->CPU
= cpu
->cpu_index
;
2240 info
->value
->current
= (cpu
== first_cpu
);
2241 info
->value
->halted
= cpu
->halted
;
2242 info
->value
->qom_path
= object_get_canonical_path(OBJECT(cpu
));
2243 info
->value
->thread_id
= cpu
->thread_id
;
2244 #if defined(TARGET_I386)
2245 info
->value
->arch
= CPU_INFO_ARCH_X86
;
2246 info
->value
->u
.x86
.pc
= env
->eip
+ env
->segs
[R_CS
].base
;
2247 #elif defined(TARGET_PPC)
2248 info
->value
->arch
= CPU_INFO_ARCH_PPC
;
2249 info
->value
->u
.ppc
.nip
= env
->nip
;
2250 #elif defined(TARGET_SPARC)
2251 info
->value
->arch
= CPU_INFO_ARCH_SPARC
;
2252 info
->value
->u
.q_sparc
.pc
= env
->pc
;
2253 info
->value
->u
.q_sparc
.npc
= env
->npc
;
2254 #elif defined(TARGET_MIPS)
2255 info
->value
->arch
= CPU_INFO_ARCH_MIPS
;
2256 info
->value
->u
.q_mips
.PC
= env
->active_tc
.PC
;
2257 #elif defined(TARGET_TRICORE)
2258 info
->value
->arch
= CPU_INFO_ARCH_TRICORE
;
2259 info
->value
->u
.tricore
.PC
= env
->PC
;
2260 #elif defined(TARGET_S390X)
2261 info
->value
->arch
= CPU_INFO_ARCH_S390
;
2262 info
->value
->u
.s390
.cpu_state
= env
->cpu_state
;
2263 #elif defined(TARGET_RISCV)
2264 info
->value
->arch
= CPU_INFO_ARCH_RISCV
;
2265 info
->value
->u
.riscv
.pc
= env
->pc
;
2267 info
->value
->arch
= CPU_INFO_ARCH_OTHER
;
2269 info
->value
->has_props
= !!mc
->cpu_index_to_instance_props
;
2270 if (info
->value
->has_props
) {
2271 CpuInstanceProperties
*props
;
2272 props
= g_malloc0(sizeof(*props
));
2273 *props
= mc
->cpu_index_to_instance_props(ms
, cpu
->cpu_index
);
2274 info
->value
->props
= props
;
2277 /* XXX: waiting for the qapi to support GSList */
2279 head
= cur_item
= info
;
2281 cur_item
->next
= info
;
2289 static CpuInfoArch
sysemu_target_to_cpuinfo_arch(SysEmuTarget target
)
2292 * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
2293 * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
2296 case SYS_EMU_TARGET_I386
:
2297 case SYS_EMU_TARGET_X86_64
:
2298 return CPU_INFO_ARCH_X86
;
2300 case SYS_EMU_TARGET_PPC
:
2301 case SYS_EMU_TARGET_PPC64
:
2302 return CPU_INFO_ARCH_PPC
;
2304 case SYS_EMU_TARGET_SPARC
:
2305 case SYS_EMU_TARGET_SPARC64
:
2306 return CPU_INFO_ARCH_SPARC
;
2308 case SYS_EMU_TARGET_MIPS
:
2309 case SYS_EMU_TARGET_MIPSEL
:
2310 case SYS_EMU_TARGET_MIPS64
:
2311 case SYS_EMU_TARGET_MIPS64EL
:
2312 return CPU_INFO_ARCH_MIPS
;
2314 case SYS_EMU_TARGET_TRICORE
:
2315 return CPU_INFO_ARCH_TRICORE
;
2317 case SYS_EMU_TARGET_S390X
:
2318 return CPU_INFO_ARCH_S390
;
2320 case SYS_EMU_TARGET_RISCV32
:
2321 case SYS_EMU_TARGET_RISCV64
:
2322 return CPU_INFO_ARCH_RISCV
;
2325 return CPU_INFO_ARCH_OTHER
;
2329 static void cpustate_to_cpuinfo_s390(CpuInfoS390
*info
, const CPUState
*cpu
)
2332 S390CPU
*s390_cpu
= S390_CPU(cpu
);
2333 CPUS390XState
*env
= &s390_cpu
->env
;
2335 info
->cpu_state
= env
->cpu_state
;
2342 * fast means: we NEVER interrupt vCPU threads to retrieve
2343 * information from KVM.
2345 CpuInfoFastList
*qmp_query_cpus_fast(Error
**errp
)
2347 MachineState
*ms
= MACHINE(qdev_get_machine());
2348 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
2349 CpuInfoFastList
*head
= NULL
, *cur_item
= NULL
;
2350 SysEmuTarget target
= qapi_enum_parse(&SysEmuTarget_lookup
, TARGET_NAME
,
2355 CpuInfoFastList
*info
= g_malloc0(sizeof(*info
));
2356 info
->value
= g_malloc0(sizeof(*info
->value
));
2358 info
->value
->cpu_index
= cpu
->cpu_index
;
2359 info
->value
->qom_path
= object_get_canonical_path(OBJECT(cpu
));
2360 info
->value
->thread_id
= cpu
->thread_id
;
2362 info
->value
->has_props
= !!mc
->cpu_index_to_instance_props
;
2363 if (info
->value
->has_props
) {
2364 CpuInstanceProperties
*props
;
2365 props
= g_malloc0(sizeof(*props
));
2366 *props
= mc
->cpu_index_to_instance_props(ms
, cpu
->cpu_index
);
2367 info
->value
->props
= props
;
2370 info
->value
->arch
= sysemu_target_to_cpuinfo_arch(target
);
2371 info
->value
->target
= target
;
2372 if (target
== SYS_EMU_TARGET_S390X
) {
2373 cpustate_to_cpuinfo_s390(&info
->value
->u
.s390x
, cpu
);
2377 head
= cur_item
= info
;
2379 cur_item
->next
= info
;
2387 void qmp_memsave(int64_t addr
, int64_t size
, const char *filename
,
2388 bool has_cpu
, int64_t cpu_index
, Error
**errp
)
2394 int64_t orig_addr
= addr
, orig_size
= size
;
2400 cpu
= qemu_get_cpu(cpu_index
);
2402 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cpu-index",
2407 f
= fopen(filename
, "wb");
2409 error_setg_file_open(errp
, errno
, filename
);
2417 if (cpu_memory_rw_debug(cpu
, addr
, buf
, l
, 0) != 0) {
2418 error_setg(errp
, "Invalid addr 0x%016" PRIx64
"/size %" PRId64
2419 " specified", orig_addr
, orig_size
);
2422 if (fwrite(buf
, 1, l
, f
) != l
) {
2423 error_setg(errp
, QERR_IO_ERROR
);
2434 void qmp_pmemsave(int64_t addr
, int64_t size
, const char *filename
,
2441 f
= fopen(filename
, "wb");
2443 error_setg_file_open(errp
, errno
, filename
);
2451 cpu_physical_memory_read(addr
, buf
, l
);
2452 if (fwrite(buf
, 1, l
, f
) != l
) {
2453 error_setg(errp
, QERR_IO_ERROR
);
2464 void qmp_inject_nmi(Error
**errp
)
2466 nmi_monitor_handle(monitor_get_cpu_index(), errp
);
2469 void dump_drift_info(void)
2475 qemu_printf("Host - Guest clock %"PRIi64
" ms\n",
2476 (cpu_get_clock() - cpu_get_icount())/SCALE_MS
);
2477 if (icount_align_option
) {
2478 qemu_printf("Max guest delay %"PRIi64
" ms\n",
2479 -max_delay
/ SCALE_MS
);
2480 qemu_printf("Max guest advance %"PRIi64
" ms\n",
2481 max_advance
/ SCALE_MS
);
2483 qemu_printf("Max guest delay NA\n");
2484 qemu_printf("Max guest advance NA\n");