4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor/monitor.h"
29 #include "sysemu/sysemu.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/dma.h"
32 #include "sysemu/kvm.h"
33 #include "qmp-commands.h"
35 #include "qemu/thread.h"
36 #include "sysemu/cpus.h"
37 #include "sysemu/qtest.h"
38 #include "qemu/main-loop.h"
39 #include "qemu/bitmap.h"
42 #include "qemu/compatfd.h"
47 #include <sys/prctl.h>
50 #define PR_MCE_KILL 33
53 #ifndef PR_MCE_KILL_SET
54 #define PR_MCE_KILL_SET 1
57 #ifndef PR_MCE_KILL_EARLY
58 #define PR_MCE_KILL_EARLY 1
61 #endif /* CONFIG_LINUX */
63 static CPUState
*next_cpu
;
65 bool cpu_is_stopped(CPUState
*cpu
)
67 return cpu
->stopped
|| !runstate_is_running();
70 static bool cpu_thread_is_idle(CPUState
*cpu
)
72 if (cpu
->stop
|| cpu
->queued_work_first
) {
75 if (cpu_is_stopped(cpu
)) {
78 if (!cpu
->halted
|| qemu_cpu_has_work(cpu
) ||
79 kvm_halt_in_kernel()) {
85 static bool all_cpu_threads_idle(void)
90 if (!cpu_thread_is_idle(cpu
)) {
97 /***********************************************************/
98 /* guest cycle counter */
100 /* Conversion factor from emulated instructions to virtual clock ticks. */
101 static int icount_time_shift
;
102 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
103 #define MAX_ICOUNT_SHIFT 10
104 /* Compensate for varying guest execution speed. */
105 static int64_t qemu_icount_bias
;
106 static QEMUTimer
*icount_rt_timer
;
107 static QEMUTimer
*icount_vm_timer
;
108 static QEMUTimer
*icount_warp_timer
;
109 static int64_t vm_clock_warp_start
;
110 static int64_t qemu_icount
;
112 typedef struct TimersState
{
113 int64_t cpu_ticks_prev
;
114 int64_t cpu_ticks_offset
;
115 int64_t cpu_clock_offset
;
116 int32_t cpu_ticks_enabled
;
120 static TimersState timers_state
;
122 /* Return the virtual CPU time, based on the instruction counter. */
123 int64_t cpu_get_icount(void)
126 CPUState
*cpu
= current_cpu
;
128 icount
= qemu_icount
;
130 CPUArchState
*env
= cpu
->env_ptr
;
131 if (!can_do_io(env
)) {
132 fprintf(stderr
, "Bad clock read\n");
134 icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
136 return qemu_icount_bias
+ (icount
<< icount_time_shift
);
139 /* return the host CPU cycle counter and handle stop/restart */
140 int64_t cpu_get_ticks(void)
143 return cpu_get_icount();
145 if (!timers_state
.cpu_ticks_enabled
) {
146 return timers_state
.cpu_ticks_offset
;
149 ticks
= cpu_get_real_ticks();
150 if (timers_state
.cpu_ticks_prev
> ticks
) {
151 /* Note: non increasing ticks may happen if the host uses
153 timers_state
.cpu_ticks_offset
+= timers_state
.cpu_ticks_prev
- ticks
;
155 timers_state
.cpu_ticks_prev
= ticks
;
156 return ticks
+ timers_state
.cpu_ticks_offset
;
160 /* return the host CPU monotonic timer and handle stop/restart */
161 int64_t cpu_get_clock(void)
164 if (!timers_state
.cpu_ticks_enabled
) {
165 return timers_state
.cpu_clock_offset
;
168 return ti
+ timers_state
.cpu_clock_offset
;
172 /* enable cpu_get_ticks() */
173 void cpu_enable_ticks(void)
175 if (!timers_state
.cpu_ticks_enabled
) {
176 timers_state
.cpu_ticks_offset
-= cpu_get_real_ticks();
177 timers_state
.cpu_clock_offset
-= get_clock();
178 timers_state
.cpu_ticks_enabled
= 1;
182 /* disable cpu_get_ticks() : the clock is stopped. You must not call
183 cpu_get_ticks() after that. */
184 void cpu_disable_ticks(void)
186 if (timers_state
.cpu_ticks_enabled
) {
187 timers_state
.cpu_ticks_offset
= cpu_get_ticks();
188 timers_state
.cpu_clock_offset
= cpu_get_clock();
189 timers_state
.cpu_ticks_enabled
= 0;
193 /* Correlation between real and virtual time is always going to be
194 fairly approximate, so ignore small variation.
195 When the guest is idle real and virtual time will be aligned in
197 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
199 static void icount_adjust(void)
204 static int64_t last_delta
;
205 /* If the VM is not running, then do nothing. */
206 if (!runstate_is_running()) {
209 cur_time
= cpu_get_clock();
210 cur_icount
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
211 delta
= cur_icount
- cur_time
;
212 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
214 && last_delta
+ ICOUNT_WOBBLE
< delta
* 2
215 && icount_time_shift
> 0) {
216 /* The guest is getting too far ahead. Slow time down. */
220 && last_delta
- ICOUNT_WOBBLE
> delta
* 2
221 && icount_time_shift
< MAX_ICOUNT_SHIFT
) {
222 /* The guest is getting too far behind. Speed time up. */
226 qemu_icount_bias
= cur_icount
- (qemu_icount
<< icount_time_shift
);
229 static void icount_adjust_rt(void *opaque
)
231 timer_mod(icount_rt_timer
,
232 qemu_clock_get_ms(QEMU_CLOCK_REALTIME
) + 1000);
236 static void icount_adjust_vm(void *opaque
)
238 timer_mod(icount_vm_timer
,
239 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
240 get_ticks_per_sec() / 10);
244 static int64_t qemu_icount_round(int64_t count
)
246 return (count
+ (1 << icount_time_shift
) - 1) >> icount_time_shift
;
249 static void icount_warp_rt(void *opaque
)
251 if (vm_clock_warp_start
== -1) {
255 if (runstate_is_running()) {
256 int64_t clock
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
257 int64_t warp_delta
= clock
- vm_clock_warp_start
;
258 if (use_icount
== 1) {
259 qemu_icount_bias
+= warp_delta
;
262 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
263 * far ahead of real time.
265 int64_t cur_time
= cpu_get_clock();
266 int64_t cur_icount
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
267 int64_t delta
= cur_time
- cur_icount
;
268 qemu_icount_bias
+= MIN(warp_delta
, delta
);
270 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL
)) {
271 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
274 vm_clock_warp_start
= -1;
277 void qtest_clock_warp(int64_t dest
)
279 int64_t clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
280 assert(qtest_enabled());
281 while (clock
< dest
) {
282 int64_t deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
283 int64_t warp
= MIN(dest
- clock
, deadline
);
284 qemu_icount_bias
+= warp
;
285 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL
);
286 clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
288 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
291 void qemu_clock_warp(QEMUClockType type
)
296 * There are too many global variables to make the "warp" behavior
297 * applicable to other clocks. But a clock argument removes the
298 * need for if statements all over the place.
300 if (type
!= QEMU_CLOCK_VIRTUAL
|| !use_icount
) {
305 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
306 * This ensures that the deadline for the timer is computed correctly below.
307 * This also makes sure that the insn counter is synchronized before the
308 * CPU starts running, in case the CPU is woken by an event other than
309 * the earliest QEMU_CLOCK_VIRTUAL timer.
311 icount_warp_rt(NULL
);
312 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL
)) {
313 timer_del(icount_warp_timer
);
317 if (qtest_enabled()) {
318 /* When testing, qtest commands advance icount. */
322 vm_clock_warp_start
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
323 /* We want to use the earliest deadline from ALL vm_clocks */
324 deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
326 /* Maintain prior (possibly buggy) behaviour where if no deadline
327 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
328 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
331 if ((deadline
< 0) || (deadline
> INT32_MAX
)) {
332 deadline
= INT32_MAX
;
337 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
338 * sleep. Otherwise, the CPU might be waiting for a future timer
339 * interrupt to wake it up, but the interrupt never comes because
340 * the vCPU isn't running any insns and thus doesn't advance the
341 * QEMU_CLOCK_VIRTUAL.
343 * An extreme solution for this problem would be to never let VCPUs
344 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
345 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
346 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
347 * after some e"real" time, (related to the time left until the next
348 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
349 * This avoids that the warps are visible externally; for example,
350 * you will not be sending network packets continuously instead of
353 timer_mod(icount_warp_timer
, vm_clock_warp_start
+ deadline
);
354 } else if (deadline
== 0) {
355 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
359 static const VMStateDescription vmstate_timers
= {
362 .minimum_version_id
= 1,
363 .minimum_version_id_old
= 1,
364 .fields
= (VMStateField
[]) {
365 VMSTATE_INT64(cpu_ticks_offset
, TimersState
),
366 VMSTATE_INT64(dummy
, TimersState
),
367 VMSTATE_INT64_V(cpu_clock_offset
, TimersState
, 2),
368 VMSTATE_END_OF_LIST()
372 void configure_icount(const char *option
)
374 vmstate_register(NULL
, 0, &vmstate_timers
, &timers_state
);
379 icount_warp_timer
= timer_new_ns(QEMU_CLOCK_REALTIME
,
380 icount_warp_rt
, NULL
);
381 if (strcmp(option
, "auto") != 0) {
382 icount_time_shift
= strtol(option
, NULL
, 0);
389 /* 125MIPS seems a reasonable initial guess at the guest speed.
390 It will be corrected fairly quickly anyway. */
391 icount_time_shift
= 3;
393 /* Have both realtime and virtual time triggers for speed adjustment.
394 The realtime trigger catches emulated time passing too slowly,
395 the virtual time trigger catches emulated time passing too fast.
396 Realtime triggers occur even when idle, so use them less frequently
398 icount_rt_timer
= timer_new_ms(QEMU_CLOCK_REALTIME
,
399 icount_adjust_rt
, NULL
);
400 timer_mod(icount_rt_timer
,
401 qemu_clock_get_ms(QEMU_CLOCK_REALTIME
) + 1000);
402 icount_vm_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
403 icount_adjust_vm
, NULL
);
404 timer_mod(icount_vm_timer
,
405 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
406 get_ticks_per_sec() / 10);
409 /***********************************************************/
410 void hw_error(const char *fmt
, ...)
416 fprintf(stderr
, "qemu: hardware error: ");
417 vfprintf(stderr
, fmt
, ap
);
418 fprintf(stderr
, "\n");
420 fprintf(stderr
, "CPU #%d:\n", cpu
->cpu_index
);
421 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
);
427 void cpu_synchronize_all_states(void)
432 cpu_synchronize_state(cpu
);
436 void cpu_synchronize_all_post_reset(void)
441 cpu_synchronize_post_reset(cpu
);
445 void cpu_synchronize_all_post_init(void)
450 cpu_synchronize_post_init(cpu
);
454 static int do_vm_stop(RunState state
)
458 if (runstate_is_running()) {
462 vm_state_notify(0, state
);
463 monitor_protocol_event(QEVENT_STOP
, NULL
);
467 ret
= bdrv_flush_all();
472 static bool cpu_can_run(CPUState
*cpu
)
477 if (cpu_is_stopped(cpu
)) {
483 static void cpu_handle_guest_debug(CPUState
*cpu
)
485 gdb_set_stop_cpu(cpu
);
486 qemu_system_debug_request();
490 static void cpu_signal(int sig
)
493 cpu_exit(current_cpu
);
499 static void sigbus_reraise(void)
502 struct sigaction action
;
504 memset(&action
, 0, sizeof(action
));
505 action
.sa_handler
= SIG_DFL
;
506 if (!sigaction(SIGBUS
, &action
, NULL
)) {
509 sigaddset(&set
, SIGBUS
);
510 sigprocmask(SIG_UNBLOCK
, &set
, NULL
);
512 perror("Failed to re-raise SIGBUS!\n");
516 static void sigbus_handler(int n
, struct qemu_signalfd_siginfo
*siginfo
,
519 if (kvm_on_sigbus(siginfo
->ssi_code
,
520 (void *)(intptr_t)siginfo
->ssi_addr
)) {
525 static void qemu_init_sigbus(void)
527 struct sigaction action
;
529 memset(&action
, 0, sizeof(action
));
530 action
.sa_flags
= SA_SIGINFO
;
531 action
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))sigbus_handler
;
532 sigaction(SIGBUS
, &action
, NULL
);
534 prctl(PR_MCE_KILL
, PR_MCE_KILL_SET
, PR_MCE_KILL_EARLY
, 0, 0);
537 static void qemu_kvm_eat_signals(CPUState
*cpu
)
539 struct timespec ts
= { 0, 0 };
545 sigemptyset(&waitset
);
546 sigaddset(&waitset
, SIG_IPI
);
547 sigaddset(&waitset
, SIGBUS
);
550 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
551 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
552 perror("sigtimedwait");
558 if (kvm_on_sigbus_vcpu(cpu
, siginfo
.si_code
, siginfo
.si_addr
)) {
566 r
= sigpending(&chkset
);
568 perror("sigpending");
571 } while (sigismember(&chkset
, SIG_IPI
) || sigismember(&chkset
, SIGBUS
));
574 #else /* !CONFIG_LINUX */
576 static void qemu_init_sigbus(void)
580 static void qemu_kvm_eat_signals(CPUState
*cpu
)
583 #endif /* !CONFIG_LINUX */
586 static void dummy_signal(int sig
)
590 static void qemu_kvm_init_cpu_signals(CPUState
*cpu
)
594 struct sigaction sigact
;
596 memset(&sigact
, 0, sizeof(sigact
));
597 sigact
.sa_handler
= dummy_signal
;
598 sigaction(SIG_IPI
, &sigact
, NULL
);
600 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
601 sigdelset(&set
, SIG_IPI
);
602 sigdelset(&set
, SIGBUS
);
603 r
= kvm_set_signal_mask(cpu
, &set
);
605 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
610 static void qemu_tcg_init_cpu_signals(void)
613 struct sigaction sigact
;
615 memset(&sigact
, 0, sizeof(sigact
));
616 sigact
.sa_handler
= cpu_signal
;
617 sigaction(SIG_IPI
, &sigact
, NULL
);
620 sigaddset(&set
, SIG_IPI
);
621 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
625 static void qemu_kvm_init_cpu_signals(CPUState
*cpu
)
630 static void qemu_tcg_init_cpu_signals(void)
635 static QemuMutex qemu_global_mutex
;
636 static QemuCond qemu_io_proceeded_cond
;
637 static bool iothread_requesting_mutex
;
639 static QemuThread io_thread
;
641 static QemuThread
*tcg_cpu_thread
;
642 static QemuCond
*tcg_halt_cond
;
645 static QemuCond qemu_cpu_cond
;
647 static QemuCond qemu_pause_cond
;
648 static QemuCond qemu_work_cond
;
650 void qemu_init_cpu_loop(void)
653 qemu_cond_init(&qemu_cpu_cond
);
654 qemu_cond_init(&qemu_pause_cond
);
655 qemu_cond_init(&qemu_work_cond
);
656 qemu_cond_init(&qemu_io_proceeded_cond
);
657 qemu_mutex_init(&qemu_global_mutex
);
659 qemu_thread_get_self(&io_thread
);
662 void run_on_cpu(CPUState
*cpu
, void (*func
)(void *data
), void *data
)
664 struct qemu_work_item wi
;
666 if (qemu_cpu_is_self(cpu
)) {
674 if (cpu
->queued_work_first
== NULL
) {
675 cpu
->queued_work_first
= &wi
;
677 cpu
->queued_work_last
->next
= &wi
;
679 cpu
->queued_work_last
= &wi
;
685 CPUState
*self_cpu
= current_cpu
;
687 qemu_cond_wait(&qemu_work_cond
, &qemu_global_mutex
);
688 current_cpu
= self_cpu
;
692 void async_run_on_cpu(CPUState
*cpu
, void (*func
)(void *data
), void *data
)
694 struct qemu_work_item
*wi
;
696 if (qemu_cpu_is_self(cpu
)) {
701 wi
= g_malloc0(sizeof(struct qemu_work_item
));
705 if (cpu
->queued_work_first
== NULL
) {
706 cpu
->queued_work_first
= wi
;
708 cpu
->queued_work_last
->next
= wi
;
710 cpu
->queued_work_last
= wi
;
717 static void flush_queued_work(CPUState
*cpu
)
719 struct qemu_work_item
*wi
;
721 if (cpu
->queued_work_first
== NULL
) {
725 while ((wi
= cpu
->queued_work_first
)) {
726 cpu
->queued_work_first
= wi
->next
;
733 cpu
->queued_work_last
= NULL
;
734 qemu_cond_broadcast(&qemu_work_cond
);
737 static void qemu_wait_io_event_common(CPUState
*cpu
)
742 qemu_cond_signal(&qemu_pause_cond
);
744 flush_queued_work(cpu
);
745 cpu
->thread_kicked
= false;
748 static void qemu_tcg_wait_io_event(void)
752 while (all_cpu_threads_idle()) {
753 /* Start accounting real time to the virtual clock if the CPUs
755 qemu_clock_warp(QEMU_CLOCK_VIRTUAL
);
756 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
759 while (iothread_requesting_mutex
) {
760 qemu_cond_wait(&qemu_io_proceeded_cond
, &qemu_global_mutex
);
764 qemu_wait_io_event_common(cpu
);
768 static void qemu_kvm_wait_io_event(CPUState
*cpu
)
770 while (cpu_thread_is_idle(cpu
)) {
771 qemu_cond_wait(cpu
->halt_cond
, &qemu_global_mutex
);
774 qemu_kvm_eat_signals(cpu
);
775 qemu_wait_io_event_common(cpu
);
778 static void *qemu_kvm_cpu_thread_fn(void *arg
)
783 qemu_mutex_lock(&qemu_global_mutex
);
784 qemu_thread_get_self(cpu
->thread
);
785 cpu
->thread_id
= qemu_get_thread_id();
788 r
= kvm_init_vcpu(cpu
);
790 fprintf(stderr
, "kvm_init_vcpu failed: %s\n", strerror(-r
));
794 qemu_kvm_init_cpu_signals(cpu
);
796 /* signal CPU creation */
798 qemu_cond_signal(&qemu_cpu_cond
);
801 if (cpu_can_run(cpu
)) {
802 r
= kvm_cpu_exec(cpu
);
803 if (r
== EXCP_DEBUG
) {
804 cpu_handle_guest_debug(cpu
);
807 qemu_kvm_wait_io_event(cpu
);
813 static void *qemu_dummy_cpu_thread_fn(void *arg
)
816 fprintf(stderr
, "qtest is not supported under Windows\n");
823 qemu_mutex_lock_iothread();
824 qemu_thread_get_self(cpu
->thread
);
825 cpu
->thread_id
= qemu_get_thread_id();
827 sigemptyset(&waitset
);
828 sigaddset(&waitset
, SIG_IPI
);
830 /* signal CPU creation */
832 qemu_cond_signal(&qemu_cpu_cond
);
837 qemu_mutex_unlock_iothread();
840 r
= sigwait(&waitset
, &sig
);
841 } while (r
== -1 && (errno
== EAGAIN
|| errno
== EINTR
));
846 qemu_mutex_lock_iothread();
848 qemu_wait_io_event_common(cpu
);
855 static void tcg_exec_all(void);
857 static void *qemu_tcg_cpu_thread_fn(void *arg
)
861 qemu_tcg_init_cpu_signals();
862 qemu_thread_get_self(cpu
->thread
);
864 qemu_mutex_lock(&qemu_global_mutex
);
866 cpu
->thread_id
= qemu_get_thread_id();
869 qemu_cond_signal(&qemu_cpu_cond
);
871 /* wait for initial kick-off after machine start */
872 while (QTAILQ_FIRST(&cpus
)->stopped
) {
873 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
875 /* process any pending work */
877 qemu_wait_io_event_common(cpu
);
885 int64_t deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
888 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
891 qemu_tcg_wait_io_event();
897 static void qemu_cpu_kick_thread(CPUState
*cpu
)
902 err
= pthread_kill(cpu
->thread
->thread
, SIG_IPI
);
904 fprintf(stderr
, "qemu:%s: %s", __func__
, strerror(err
));
908 if (!qemu_cpu_is_self(cpu
)) {
911 if (SuspendThread(cpu
->hThread
) == (DWORD
)-1) {
912 fprintf(stderr
, "qemu:%s: GetLastError:%lu\n", __func__
,
917 /* On multi-core systems, we are not sure that the thread is actually
918 * suspended until we can get the context.
920 tcgContext
.ContextFlags
= CONTEXT_CONTROL
;
921 while (GetThreadContext(cpu
->hThread
, &tcgContext
) != 0) {
927 if (ResumeThread(cpu
->hThread
) == (DWORD
)-1) {
928 fprintf(stderr
, "qemu:%s: GetLastError:%lu\n", __func__
,
936 void qemu_cpu_kick(CPUState
*cpu
)
938 qemu_cond_broadcast(cpu
->halt_cond
);
939 if (!tcg_enabled() && !cpu
->thread_kicked
) {
940 qemu_cpu_kick_thread(cpu
);
941 cpu
->thread_kicked
= true;
945 void qemu_cpu_kick_self(void)
950 if (!current_cpu
->thread_kicked
) {
951 qemu_cpu_kick_thread(current_cpu
);
952 current_cpu
->thread_kicked
= true;
959 bool qemu_cpu_is_self(CPUState
*cpu
)
961 return qemu_thread_is_self(cpu
->thread
);
964 static bool qemu_in_vcpu_thread(void)
966 return current_cpu
&& qemu_cpu_is_self(current_cpu
);
969 void qemu_mutex_lock_iothread(void)
971 if (!tcg_enabled()) {
972 qemu_mutex_lock(&qemu_global_mutex
);
974 iothread_requesting_mutex
= true;
975 if (qemu_mutex_trylock(&qemu_global_mutex
)) {
976 qemu_cpu_kick_thread(first_cpu
);
977 qemu_mutex_lock(&qemu_global_mutex
);
979 iothread_requesting_mutex
= false;
980 qemu_cond_broadcast(&qemu_io_proceeded_cond
);
984 void qemu_mutex_unlock_iothread(void)
986 qemu_mutex_unlock(&qemu_global_mutex
);
989 static int all_vcpus_paused(void)
1002 void pause_all_vcpus(void)
1006 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, false);
1012 if (qemu_in_vcpu_thread()) {
1014 if (!kvm_enabled()) {
1017 cpu
->stopped
= true;
1023 while (!all_vcpus_paused()) {
1024 qemu_cond_wait(&qemu_pause_cond
, &qemu_global_mutex
);
1031 void cpu_resume(CPUState
*cpu
)
1034 cpu
->stopped
= false;
1038 void resume_all_vcpus(void)
1042 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, true);
1048 static void qemu_tcg_init_vcpu(CPUState
*cpu
)
1050 /* share a single thread for all cpus with TCG */
1051 if (!tcg_cpu_thread
) {
1052 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1053 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1054 qemu_cond_init(cpu
->halt_cond
);
1055 tcg_halt_cond
= cpu
->halt_cond
;
1056 qemu_thread_create(cpu
->thread
, qemu_tcg_cpu_thread_fn
, cpu
,
1057 QEMU_THREAD_JOINABLE
);
1059 cpu
->hThread
= qemu_thread_get_handle(cpu
->thread
);
1061 while (!cpu
->created
) {
1062 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1064 tcg_cpu_thread
= cpu
->thread
;
1066 cpu
->thread
= tcg_cpu_thread
;
1067 cpu
->halt_cond
= tcg_halt_cond
;
1071 static void qemu_kvm_start_vcpu(CPUState
*cpu
)
1073 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1074 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1075 qemu_cond_init(cpu
->halt_cond
);
1076 qemu_thread_create(cpu
->thread
, qemu_kvm_cpu_thread_fn
, cpu
,
1077 QEMU_THREAD_JOINABLE
);
1078 while (!cpu
->created
) {
1079 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1083 static void qemu_dummy_start_vcpu(CPUState
*cpu
)
1085 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1086 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1087 qemu_cond_init(cpu
->halt_cond
);
1088 qemu_thread_create(cpu
->thread
, qemu_dummy_cpu_thread_fn
, cpu
,
1089 QEMU_THREAD_JOINABLE
);
1090 while (!cpu
->created
) {
1091 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1095 void qemu_init_vcpu(CPUState
*cpu
)
1097 cpu
->nr_cores
= smp_cores
;
1098 cpu
->nr_threads
= smp_threads
;
1099 cpu
->stopped
= true;
1100 if (kvm_enabled()) {
1101 qemu_kvm_start_vcpu(cpu
);
1102 } else if (tcg_enabled()) {
1103 qemu_tcg_init_vcpu(cpu
);
1105 qemu_dummy_start_vcpu(cpu
);
1109 void cpu_stop_current(void)
1112 current_cpu
->stop
= false;
1113 current_cpu
->stopped
= true;
1114 cpu_exit(current_cpu
);
1115 qemu_cond_signal(&qemu_pause_cond
);
1119 int vm_stop(RunState state
)
1121 if (qemu_in_vcpu_thread()) {
1122 qemu_system_vmstop_request(state
);
1124 * FIXME: should not return to device code in case
1125 * vm_stop() has been requested.
1131 return do_vm_stop(state
);
1134 /* does a state transition even if the VM is already stopped,
1135 current state is forgotten forever */
1136 int vm_stop_force_state(RunState state
)
1138 if (runstate_is_running()) {
1139 return vm_stop(state
);
1141 runstate_set(state
);
1142 /* Make sure to return an error if the flush in a previous vm_stop()
1144 return bdrv_flush_all();
1148 static int tcg_cpu_exec(CPUArchState
*env
)
1151 #ifdef CONFIG_PROFILER
1155 #ifdef CONFIG_PROFILER
1156 ti
= profile_getclock();
1162 qemu_icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
1163 env
->icount_decr
.u16
.low
= 0;
1164 env
->icount_extra
= 0;
1165 deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
1167 /* Maintain prior (possibly buggy) behaviour where if no deadline
1168 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1169 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1172 if ((deadline
< 0) || (deadline
> INT32_MAX
)) {
1173 deadline
= INT32_MAX
;
1176 count
= qemu_icount_round(deadline
);
1177 qemu_icount
+= count
;
1178 decr
= (count
> 0xffff) ? 0xffff : count
;
1180 env
->icount_decr
.u16
.low
= decr
;
1181 env
->icount_extra
= count
;
1183 ret
= cpu_exec(env
);
1184 #ifdef CONFIG_PROFILER
1185 qemu_time
+= profile_getclock() - ti
;
1188 /* Fold pending instructions back into the
1189 instruction counter, and clear the interrupt flag. */
1190 qemu_icount
-= (env
->icount_decr
.u16
.low
1191 + env
->icount_extra
);
1192 env
->icount_decr
.u32
= 0;
1193 env
->icount_extra
= 0;
1198 static void tcg_exec_all(void)
1202 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1203 qemu_clock_warp(QEMU_CLOCK_VIRTUAL
);
1205 if (next_cpu
== NULL
) {
1206 next_cpu
= first_cpu
;
1208 for (; next_cpu
!= NULL
&& !exit_request
; next_cpu
= CPU_NEXT(next_cpu
)) {
1209 CPUState
*cpu
= next_cpu
;
1210 CPUArchState
*env
= cpu
->env_ptr
;
1212 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
,
1213 (cpu
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
1215 if (cpu_can_run(cpu
)) {
1216 r
= tcg_cpu_exec(env
);
1217 if (r
== EXCP_DEBUG
) {
1218 cpu_handle_guest_debug(cpu
);
1221 } else if (cpu
->stop
|| cpu
->stopped
) {
1228 void set_numa_modes(void)
1234 for (i
= 0; i
< nb_numa_nodes
; i
++) {
1235 if (test_bit(cpu
->cpu_index
, node_cpumask
[i
])) {
1242 void list_cpus(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
1244 /* XXX: implement xxx_cpu_list for targets that still miss it */
1245 #if defined(cpu_list)
1246 cpu_list(f
, cpu_fprintf
);
1250 CpuInfoList
*qmp_query_cpus(Error
**errp
)
1252 CpuInfoList
*head
= NULL
, *cur_item
= NULL
;
1257 #if defined(TARGET_I386)
1258 X86CPU
*x86_cpu
= X86_CPU(cpu
);
1259 CPUX86State
*env
= &x86_cpu
->env
;
1260 #elif defined(TARGET_PPC)
1261 PowerPCCPU
*ppc_cpu
= POWERPC_CPU(cpu
);
1262 CPUPPCState
*env
= &ppc_cpu
->env
;
1263 #elif defined(TARGET_SPARC)
1264 SPARCCPU
*sparc_cpu
= SPARC_CPU(cpu
);
1265 CPUSPARCState
*env
= &sparc_cpu
->env
;
1266 #elif defined(TARGET_MIPS)
1267 MIPSCPU
*mips_cpu
= MIPS_CPU(cpu
);
1268 CPUMIPSState
*env
= &mips_cpu
->env
;
1271 cpu_synchronize_state(cpu
);
1273 info
= g_malloc0(sizeof(*info
));
1274 info
->value
= g_malloc0(sizeof(*info
->value
));
1275 info
->value
->CPU
= cpu
->cpu_index
;
1276 info
->value
->current
= (cpu
== first_cpu
);
1277 info
->value
->halted
= cpu
->halted
;
1278 info
->value
->thread_id
= cpu
->thread_id
;
1279 #if defined(TARGET_I386)
1280 info
->value
->has_pc
= true;
1281 info
->value
->pc
= env
->eip
+ env
->segs
[R_CS
].base
;
1282 #elif defined(TARGET_PPC)
1283 info
->value
->has_nip
= true;
1284 info
->value
->nip
= env
->nip
;
1285 #elif defined(TARGET_SPARC)
1286 info
->value
->has_pc
= true;
1287 info
->value
->pc
= env
->pc
;
1288 info
->value
->has_npc
= true;
1289 info
->value
->npc
= env
->npc
;
1290 #elif defined(TARGET_MIPS)
1291 info
->value
->has_PC
= true;
1292 info
->value
->PC
= env
->active_tc
.PC
;
1295 /* XXX: waiting for the qapi to support GSList */
1297 head
= cur_item
= info
;
1299 cur_item
->next
= info
;
1307 void qmp_memsave(int64_t addr
, int64_t size
, const char *filename
,
1308 bool has_cpu
, int64_t cpu_index
, Error
**errp
)
1319 cpu
= qemu_get_cpu(cpu_index
);
1321 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "cpu-index",
1326 f
= fopen(filename
, "wb");
1328 error_setg_file_open(errp
, errno
, filename
);
1336 cpu_memory_rw_debug(cpu
, addr
, buf
, l
, 0);
1337 if (fwrite(buf
, 1, l
, f
) != l
) {
1338 error_set(errp
, QERR_IO_ERROR
);
1349 void qmp_pmemsave(int64_t addr
, int64_t size
, const char *filename
,
1356 f
= fopen(filename
, "wb");
1358 error_setg_file_open(errp
, errno
, filename
);
1366 cpu_physical_memory_rw(addr
, buf
, l
, 0);
1367 if (fwrite(buf
, 1, l
, f
) != l
) {
1368 error_set(errp
, QERR_IO_ERROR
);
1379 void qmp_inject_nmi(Error
**errp
)
1381 #if defined(TARGET_I386)
1385 X86CPU
*cpu
= X86_CPU(cs
);
1386 CPUX86State
*env
= &cpu
->env
;
1388 if (!env
->apic_state
) {
1389 cpu_interrupt(cs
, CPU_INTERRUPT_NMI
);
1391 apic_deliver_nmi(env
->apic_state
);
1394 #elif defined(TARGET_S390X)
1400 if (cpu
->env
.cpu_num
== monitor_get_cpu_index()) {
1401 if (s390_cpu_restart(S390_CPU(cs
)) == -1) {
1402 error_set(errp
, QERR_UNSUPPORTED
);
1409 error_set(errp
, QERR_UNSUPPORTED
);