4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
33 #include "qmp-commands.h"
35 #include "qemu-thread.h"
38 #include "main-loop.h"
46 #include <sys/prctl.h>
49 #define PR_MCE_KILL 33
52 #ifndef PR_MCE_KILL_SET
53 #define PR_MCE_KILL_SET 1
56 #ifndef PR_MCE_KILL_EARLY
57 #define PR_MCE_KILL_EARLY 1
60 #endif /* CONFIG_LINUX */
62 static CPUArchState
*next_cpu
;
64 static bool cpu_thread_is_idle(CPUArchState
*env
)
66 if (env
->stop
|| env
->queued_work_first
) {
69 if (env
->stopped
|| !runstate_is_running()) {
72 if (!env
->halted
|| qemu_cpu_has_work(env
) || kvm_irqchip_in_kernel()) {
78 static bool all_cpu_threads_idle(void)
82 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
83 if (!cpu_thread_is_idle(env
)) {
90 /***********************************************************/
91 /* guest cycle counter */
93 /* Conversion factor from emulated instructions to virtual clock ticks. */
94 static int icount_time_shift
;
95 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
96 #define MAX_ICOUNT_SHIFT 10
97 /* Compensate for varying guest execution speed. */
98 static int64_t qemu_icount_bias
;
99 static QEMUTimer
*icount_rt_timer
;
100 static QEMUTimer
*icount_vm_timer
;
101 static QEMUTimer
*icount_warp_timer
;
102 static int64_t vm_clock_warp_start
;
103 static int64_t qemu_icount
;
105 typedef struct TimersState
{
106 int64_t cpu_ticks_prev
;
107 int64_t cpu_ticks_offset
;
108 int64_t cpu_clock_offset
;
109 int32_t cpu_ticks_enabled
;
113 TimersState timers_state
;
115 /* Return the virtual CPU time, based on the instruction counter. */
116 int64_t cpu_get_icount(void)
119 CPUArchState
*env
= cpu_single_env
;
121 icount
= qemu_icount
;
123 if (!can_do_io(env
)) {
124 fprintf(stderr
, "Bad clock read\n");
126 icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
128 return qemu_icount_bias
+ (icount
<< icount_time_shift
);
131 /* return the host CPU cycle counter and handle stop/restart */
132 int64_t cpu_get_ticks(void)
135 return cpu_get_icount();
137 if (!timers_state
.cpu_ticks_enabled
) {
138 return timers_state
.cpu_ticks_offset
;
141 ticks
= cpu_get_real_ticks();
142 if (timers_state
.cpu_ticks_prev
> ticks
) {
143 /* Note: non increasing ticks may happen if the host uses
145 timers_state
.cpu_ticks_offset
+= timers_state
.cpu_ticks_prev
- ticks
;
147 timers_state
.cpu_ticks_prev
= ticks
;
148 return ticks
+ timers_state
.cpu_ticks_offset
;
152 /* return the host CPU monotonic timer and handle stop/restart */
153 int64_t cpu_get_clock(void)
156 if (!timers_state
.cpu_ticks_enabled
) {
157 return timers_state
.cpu_clock_offset
;
160 return ti
+ timers_state
.cpu_clock_offset
;
164 /* enable cpu_get_ticks() */
165 void cpu_enable_ticks(void)
167 if (!timers_state
.cpu_ticks_enabled
) {
168 timers_state
.cpu_ticks_offset
-= cpu_get_real_ticks();
169 timers_state
.cpu_clock_offset
-= get_clock();
170 timers_state
.cpu_ticks_enabled
= 1;
174 /* disable cpu_get_ticks() : the clock is stopped. You must not call
175 cpu_get_ticks() after that. */
176 void cpu_disable_ticks(void)
178 if (timers_state
.cpu_ticks_enabled
) {
179 timers_state
.cpu_ticks_offset
= cpu_get_ticks();
180 timers_state
.cpu_clock_offset
= cpu_get_clock();
181 timers_state
.cpu_ticks_enabled
= 0;
185 /* Correlation between real and virtual time is always going to be
186 fairly approximate, so ignore small variation.
187 When the guest is idle real and virtual time will be aligned in
189 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
191 static void icount_adjust(void)
196 static int64_t last_delta
;
197 /* If the VM is not running, then do nothing. */
198 if (!runstate_is_running()) {
201 cur_time
= cpu_get_clock();
202 cur_icount
= qemu_get_clock_ns(vm_clock
);
203 delta
= cur_icount
- cur_time
;
204 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
206 && last_delta
+ ICOUNT_WOBBLE
< delta
* 2
207 && icount_time_shift
> 0) {
208 /* The guest is getting too far ahead. Slow time down. */
212 && last_delta
- ICOUNT_WOBBLE
> delta
* 2
213 && icount_time_shift
< MAX_ICOUNT_SHIFT
) {
214 /* The guest is getting too far behind. Speed time up. */
218 qemu_icount_bias
= cur_icount
- (qemu_icount
<< icount_time_shift
);
221 static void icount_adjust_rt(void *opaque
)
223 qemu_mod_timer(icount_rt_timer
,
224 qemu_get_clock_ms(rt_clock
) + 1000);
228 static void icount_adjust_vm(void *opaque
)
230 qemu_mod_timer(icount_vm_timer
,
231 qemu_get_clock_ns(vm_clock
) + get_ticks_per_sec() / 10);
235 static int64_t qemu_icount_round(int64_t count
)
237 return (count
+ (1 << icount_time_shift
) - 1) >> icount_time_shift
;
240 static void icount_warp_rt(void *opaque
)
242 if (vm_clock_warp_start
== -1) {
246 if (runstate_is_running()) {
247 int64_t clock
= qemu_get_clock_ns(rt_clock
);
248 int64_t warp_delta
= clock
- vm_clock_warp_start
;
249 if (use_icount
== 1) {
250 qemu_icount_bias
+= warp_delta
;
253 * In adaptive mode, do not let the vm_clock run too
254 * far ahead of real time.
256 int64_t cur_time
= cpu_get_clock();
257 int64_t cur_icount
= qemu_get_clock_ns(vm_clock
);
258 int64_t delta
= cur_time
- cur_icount
;
259 qemu_icount_bias
+= MIN(warp_delta
, delta
);
261 if (qemu_clock_expired(vm_clock
)) {
265 vm_clock_warp_start
= -1;
268 void qtest_clock_warp(int64_t dest
)
270 int64_t clock
= qemu_get_clock_ns(vm_clock
);
271 assert(qtest_enabled());
272 while (clock
< dest
) {
273 int64_t deadline
= qemu_clock_deadline(vm_clock
);
274 int64_t warp
= MIN(dest
- clock
, deadline
);
275 qemu_icount_bias
+= warp
;
276 qemu_run_timers(vm_clock
);
277 clock
= qemu_get_clock_ns(vm_clock
);
282 void qemu_clock_warp(QEMUClock
*clock
)
287 * There are too many global variables to make the "warp" behavior
288 * applicable to other clocks. But a clock argument removes the
289 * need for if statements all over the place.
291 if (clock
!= vm_clock
|| !use_icount
) {
296 * If the CPUs have been sleeping, advance the vm_clock timer now. This
297 * ensures that the deadline for the timer is computed correctly below.
298 * This also makes sure that the insn counter is synchronized before the
299 * CPU starts running, in case the CPU is woken by an event other than
300 * the earliest vm_clock timer.
302 icount_warp_rt(NULL
);
303 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock
)) {
304 qemu_del_timer(icount_warp_timer
);
308 if (qtest_enabled()) {
309 /* When testing, qtest commands advance icount. */
313 vm_clock_warp_start
= qemu_get_clock_ns(rt_clock
);
314 deadline
= qemu_clock_deadline(vm_clock
);
317 * Ensure the vm_clock proceeds even when the virtual CPU goes to
318 * sleep. Otherwise, the CPU might be waiting for a future timer
319 * interrupt to wake it up, but the interrupt never comes because
320 * the vCPU isn't running any insns and thus doesn't advance the
323 * An extreme solution for this problem would be to never let VCPUs
324 * sleep in icount mode if there is a pending vm_clock timer; rather
325 * time could just advance to the next vm_clock event. Instead, we
326 * do stop VCPUs and only advance vm_clock after some "real" time,
327 * (related to the time left until the next event) has passed. This
328 * rt_clock timer will do this. This avoids that the warps are too
329 * visible externally---for example, you will not be sending network
330 * packets continuously instead of every 100ms.
332 qemu_mod_timer(icount_warp_timer
, vm_clock_warp_start
+ deadline
);
338 static const VMStateDescription vmstate_timers
= {
341 .minimum_version_id
= 1,
342 .minimum_version_id_old
= 1,
343 .fields
= (VMStateField
[]) {
344 VMSTATE_INT64(cpu_ticks_offset
, TimersState
),
345 VMSTATE_INT64(dummy
, TimersState
),
346 VMSTATE_INT64_V(cpu_clock_offset
, TimersState
, 2),
347 VMSTATE_END_OF_LIST()
351 void configure_icount(const char *option
)
353 vmstate_register(NULL
, 0, &vmstate_timers
, &timers_state
);
358 icount_warp_timer
= qemu_new_timer_ns(rt_clock
, icount_warp_rt
, NULL
);
359 if (strcmp(option
, "auto") != 0) {
360 icount_time_shift
= strtol(option
, NULL
, 0);
367 /* 125MIPS seems a reasonable initial guess at the guest speed.
368 It will be corrected fairly quickly anyway. */
369 icount_time_shift
= 3;
371 /* Have both realtime and virtual time triggers for speed adjustment.
372 The realtime trigger catches emulated time passing too slowly,
373 the virtual time trigger catches emulated time passing too fast.
374 Realtime triggers occur even when idle, so use them less frequently
376 icount_rt_timer
= qemu_new_timer_ms(rt_clock
, icount_adjust_rt
, NULL
);
377 qemu_mod_timer(icount_rt_timer
,
378 qemu_get_clock_ms(rt_clock
) + 1000);
379 icount_vm_timer
= qemu_new_timer_ns(vm_clock
, icount_adjust_vm
, NULL
);
380 qemu_mod_timer(icount_vm_timer
,
381 qemu_get_clock_ns(vm_clock
) + get_ticks_per_sec() / 10);
384 /***********************************************************/
385 void hw_error(const char *fmt
, ...)
391 fprintf(stderr
, "qemu: hardware error: ");
392 vfprintf(stderr
, fmt
, ap
);
393 fprintf(stderr
, "\n");
394 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
395 fprintf(stderr
, "CPU #%d:\n", env
->cpu_index
);
397 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
);
399 cpu_dump_state(env
, stderr
, fprintf
, 0);
406 void cpu_synchronize_all_states(void)
410 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
411 cpu_synchronize_state(cpu
);
415 void cpu_synchronize_all_post_reset(void)
419 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
420 cpu_synchronize_post_reset(cpu
);
424 void cpu_synchronize_all_post_init(void)
428 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
429 cpu_synchronize_post_init(cpu
);
433 int cpu_is_stopped(CPUArchState
*env
)
435 return !runstate_is_running() || env
->stopped
;
438 static void do_vm_stop(RunState state
)
440 if (runstate_is_running()) {
444 vm_state_notify(0, state
);
447 monitor_protocol_event(QEVENT_STOP
, NULL
);
451 static int cpu_can_run(CPUArchState
*env
)
456 if (env
->stopped
|| !runstate_is_running()) {
462 static void cpu_handle_guest_debug(CPUArchState
*env
)
464 gdb_set_stop_cpu(env
);
465 qemu_system_debug_request();
469 static void cpu_signal(int sig
)
471 if (cpu_single_env
) {
472 cpu_exit(cpu_single_env
);
478 static void sigbus_reraise(void)
481 struct sigaction action
;
483 memset(&action
, 0, sizeof(action
));
484 action
.sa_handler
= SIG_DFL
;
485 if (!sigaction(SIGBUS
, &action
, NULL
)) {
488 sigaddset(&set
, SIGBUS
);
489 sigprocmask(SIG_UNBLOCK
, &set
, NULL
);
491 perror("Failed to re-raise SIGBUS!\n");
495 static void sigbus_handler(int n
, struct qemu_signalfd_siginfo
*siginfo
,
498 if (kvm_on_sigbus(siginfo
->ssi_code
,
499 (void *)(intptr_t)siginfo
->ssi_addr
)) {
504 static void qemu_init_sigbus(void)
506 struct sigaction action
;
508 memset(&action
, 0, sizeof(action
));
509 action
.sa_flags
= SA_SIGINFO
;
510 action
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))sigbus_handler
;
511 sigaction(SIGBUS
, &action
, NULL
);
513 prctl(PR_MCE_KILL
, PR_MCE_KILL_SET
, PR_MCE_KILL_EARLY
, 0, 0);
516 static void qemu_kvm_eat_signals(CPUArchState
*env
)
518 struct timespec ts
= { 0, 0 };
524 sigemptyset(&waitset
);
525 sigaddset(&waitset
, SIG_IPI
);
526 sigaddset(&waitset
, SIGBUS
);
529 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
530 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
531 perror("sigtimedwait");
537 if (kvm_on_sigbus_vcpu(env
, siginfo
.si_code
, siginfo
.si_addr
)) {
545 r
= sigpending(&chkset
);
547 perror("sigpending");
550 } while (sigismember(&chkset
, SIG_IPI
) || sigismember(&chkset
, SIGBUS
));
553 #else /* !CONFIG_LINUX */
555 static void qemu_init_sigbus(void)
559 static void qemu_kvm_eat_signals(CPUArchState
*env
)
562 #endif /* !CONFIG_LINUX */
565 static void dummy_signal(int sig
)
569 static void qemu_kvm_init_cpu_signals(CPUArchState
*env
)
573 struct sigaction sigact
;
575 memset(&sigact
, 0, sizeof(sigact
));
576 sigact
.sa_handler
= dummy_signal
;
577 sigaction(SIG_IPI
, &sigact
, NULL
);
579 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
580 sigdelset(&set
, SIG_IPI
);
581 sigdelset(&set
, SIGBUS
);
582 r
= kvm_set_signal_mask(env
, &set
);
584 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
589 static void qemu_tcg_init_cpu_signals(void)
592 struct sigaction sigact
;
594 memset(&sigact
, 0, sizeof(sigact
));
595 sigact
.sa_handler
= cpu_signal
;
596 sigaction(SIG_IPI
, &sigact
, NULL
);
599 sigaddset(&set
, SIG_IPI
);
600 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
604 static void qemu_kvm_init_cpu_signals(CPUArchState
*env
)
609 static void qemu_tcg_init_cpu_signals(void)
614 QemuMutex qemu_global_mutex
;
615 static QemuCond qemu_io_proceeded_cond
;
616 static bool iothread_requesting_mutex
;
618 static QemuThread io_thread
;
620 static QemuThread
*tcg_cpu_thread
;
621 static QemuCond
*tcg_halt_cond
;
624 static QemuCond qemu_cpu_cond
;
626 static QemuCond qemu_pause_cond
;
627 static QemuCond qemu_work_cond
;
629 void qemu_init_cpu_loop(void)
632 qemu_cond_init(&qemu_cpu_cond
);
633 qemu_cond_init(&qemu_pause_cond
);
634 qemu_cond_init(&qemu_work_cond
);
635 qemu_cond_init(&qemu_io_proceeded_cond
);
636 qemu_mutex_init(&qemu_global_mutex
);
638 qemu_thread_get_self(&io_thread
);
641 void run_on_cpu(CPUArchState
*env
, void (*func
)(void *data
), void *data
)
643 struct qemu_work_item wi
;
645 if (qemu_cpu_is_self(env
)) {
652 if (!env
->queued_work_first
) {
653 env
->queued_work_first
= &wi
;
655 env
->queued_work_last
->next
= &wi
;
657 env
->queued_work_last
= &wi
;
663 CPUArchState
*self_env
= cpu_single_env
;
665 qemu_cond_wait(&qemu_work_cond
, &qemu_global_mutex
);
666 cpu_single_env
= self_env
;
670 static void flush_queued_work(CPUArchState
*env
)
672 struct qemu_work_item
*wi
;
674 if (!env
->queued_work_first
) {
678 while ((wi
= env
->queued_work_first
)) {
679 env
->queued_work_first
= wi
->next
;
683 env
->queued_work_last
= NULL
;
684 qemu_cond_broadcast(&qemu_work_cond
);
687 static void qemu_wait_io_event_common(CPUArchState
*env
)
692 qemu_cond_signal(&qemu_pause_cond
);
694 flush_queued_work(env
);
695 env
->thread_kicked
= false;
698 static void qemu_tcg_wait_io_event(void)
702 while (all_cpu_threads_idle()) {
703 /* Start accounting real time to the virtual clock if the CPUs
705 qemu_clock_warp(vm_clock
);
706 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
709 while (iothread_requesting_mutex
) {
710 qemu_cond_wait(&qemu_io_proceeded_cond
, &qemu_global_mutex
);
713 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
714 qemu_wait_io_event_common(env
);
718 static void qemu_kvm_wait_io_event(CPUArchState
*env
)
720 while (cpu_thread_is_idle(env
)) {
721 qemu_cond_wait(env
->halt_cond
, &qemu_global_mutex
);
724 qemu_kvm_eat_signals(env
);
725 qemu_wait_io_event_common(env
);
728 static void *qemu_kvm_cpu_thread_fn(void *arg
)
730 CPUArchState
*env
= arg
;
731 CPUState
*cpu
= ENV_GET_CPU(env
);
734 qemu_mutex_lock(&qemu_global_mutex
);
735 qemu_thread_get_self(cpu
->thread
);
736 env
->thread_id
= qemu_get_thread_id();
737 cpu_single_env
= env
;
739 r
= kvm_init_vcpu(env
);
741 fprintf(stderr
, "kvm_init_vcpu failed: %s\n", strerror(-r
));
745 qemu_kvm_init_cpu_signals(env
);
747 /* signal CPU creation */
749 qemu_cond_signal(&qemu_cpu_cond
);
752 if (cpu_can_run(env
)) {
753 r
= kvm_cpu_exec(env
);
754 if (r
== EXCP_DEBUG
) {
755 cpu_handle_guest_debug(env
);
758 qemu_kvm_wait_io_event(env
);
764 static void *qemu_dummy_cpu_thread_fn(void *arg
)
767 fprintf(stderr
, "qtest is not supported under Windows\n");
770 CPUArchState
*env
= arg
;
771 CPUState
*cpu
= ENV_GET_CPU(env
);
775 qemu_mutex_lock_iothread();
776 qemu_thread_get_self(cpu
->thread
);
777 env
->thread_id
= qemu_get_thread_id();
779 sigemptyset(&waitset
);
780 sigaddset(&waitset
, SIG_IPI
);
782 /* signal CPU creation */
784 qemu_cond_signal(&qemu_cpu_cond
);
786 cpu_single_env
= env
;
788 cpu_single_env
= NULL
;
789 qemu_mutex_unlock_iothread();
792 r
= sigwait(&waitset
, &sig
);
793 } while (r
== -1 && (errno
== EAGAIN
|| errno
== EINTR
));
798 qemu_mutex_lock_iothread();
799 cpu_single_env
= env
;
800 qemu_wait_io_event_common(env
);
807 static void tcg_exec_all(void);
809 static void *qemu_tcg_cpu_thread_fn(void *arg
)
811 CPUArchState
*env
= arg
;
812 CPUState
*cpu
= ENV_GET_CPU(env
);
814 qemu_tcg_init_cpu_signals();
815 qemu_thread_get_self(cpu
->thread
);
817 /* signal CPU creation */
818 qemu_mutex_lock(&qemu_global_mutex
);
819 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
820 env
->thread_id
= qemu_get_thread_id();
823 qemu_cond_signal(&qemu_cpu_cond
);
825 /* wait for initial kick-off after machine start */
826 while (first_cpu
->stopped
) {
827 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
829 /* process any pending work */
830 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
831 qemu_wait_io_event_common(env
);
837 if (use_icount
&& qemu_clock_deadline(vm_clock
) <= 0) {
840 qemu_tcg_wait_io_event();
846 static void qemu_cpu_kick_thread(CPUArchState
*env
)
848 CPUState
*cpu
= ENV_GET_CPU(env
);
852 err
= pthread_kill(cpu
->thread
->thread
, SIG_IPI
);
854 fprintf(stderr
, "qemu:%s: %s", __func__
, strerror(err
));
858 if (!qemu_cpu_is_self(env
)) {
859 SuspendThread(cpu
->hThread
);
861 ResumeThread(cpu
->hThread
);
866 void qemu_cpu_kick(void *_env
)
868 CPUArchState
*env
= _env
;
870 qemu_cond_broadcast(env
->halt_cond
);
871 if (!tcg_enabled() && !env
->thread_kicked
) {
872 qemu_cpu_kick_thread(env
);
873 env
->thread_kicked
= true;
877 void qemu_cpu_kick_self(void)
880 assert(cpu_single_env
);
882 if (!cpu_single_env
->thread_kicked
) {
883 qemu_cpu_kick_thread(cpu_single_env
);
884 cpu_single_env
->thread_kicked
= true;
891 int qemu_cpu_is_self(void *_env
)
893 CPUArchState
*env
= _env
;
894 CPUState
*cpu
= ENV_GET_CPU(env
);
896 return qemu_thread_is_self(cpu
->thread
);
899 void qemu_mutex_lock_iothread(void)
901 if (!tcg_enabled()) {
902 qemu_mutex_lock(&qemu_global_mutex
);
904 iothread_requesting_mutex
= true;
905 if (qemu_mutex_trylock(&qemu_global_mutex
)) {
906 qemu_cpu_kick_thread(first_cpu
);
907 qemu_mutex_lock(&qemu_global_mutex
);
909 iothread_requesting_mutex
= false;
910 qemu_cond_broadcast(&qemu_io_proceeded_cond
);
914 void qemu_mutex_unlock_iothread(void)
916 qemu_mutex_unlock(&qemu_global_mutex
);
919 static int all_vcpus_paused(void)
921 CPUArchState
*penv
= first_cpu
;
924 if (!penv
->stopped
) {
927 penv
= penv
->next_cpu
;
933 void pause_all_vcpus(void)
935 CPUArchState
*penv
= first_cpu
;
937 qemu_clock_enable(vm_clock
, false);
941 penv
= penv
->next_cpu
;
944 if (!qemu_thread_is_self(&io_thread
)) {
946 if (!kvm_enabled()) {
950 penv
= penv
->next_cpu
;
956 while (!all_vcpus_paused()) {
957 qemu_cond_wait(&qemu_pause_cond
, &qemu_global_mutex
);
961 penv
= penv
->next_cpu
;
966 void resume_all_vcpus(void)
968 CPUArchState
*penv
= first_cpu
;
970 qemu_clock_enable(vm_clock
, true);
975 penv
= penv
->next_cpu
;
979 static void qemu_tcg_init_vcpu(void *_env
)
981 CPUArchState
*env
= _env
;
982 CPUState
*cpu
= ENV_GET_CPU(env
);
984 /* share a single thread for all cpus with TCG */
985 if (!tcg_cpu_thread
) {
986 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
987 env
->halt_cond
= g_malloc0(sizeof(QemuCond
));
988 qemu_cond_init(env
->halt_cond
);
989 tcg_halt_cond
= env
->halt_cond
;
990 qemu_thread_create(cpu
->thread
, qemu_tcg_cpu_thread_fn
, env
,
991 QEMU_THREAD_JOINABLE
);
993 cpu
->hThread
= qemu_thread_get_handle(cpu
->thread
);
995 while (env
->created
== 0) {
996 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
998 tcg_cpu_thread
= cpu
->thread
;
1000 cpu
->thread
= tcg_cpu_thread
;
1001 env
->halt_cond
= tcg_halt_cond
;
1005 static void qemu_kvm_start_vcpu(CPUArchState
*env
)
1007 CPUState
*cpu
= ENV_GET_CPU(env
);
1009 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1010 env
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1011 qemu_cond_init(env
->halt_cond
);
1012 qemu_thread_create(cpu
->thread
, qemu_kvm_cpu_thread_fn
, env
,
1013 QEMU_THREAD_JOINABLE
);
1014 while (env
->created
== 0) {
1015 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1019 static void qemu_dummy_start_vcpu(CPUArchState
*env
)
1021 CPUState
*cpu
= ENV_GET_CPU(env
);
1023 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1024 env
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1025 qemu_cond_init(env
->halt_cond
);
1026 qemu_thread_create(cpu
->thread
, qemu_dummy_cpu_thread_fn
, env
,
1027 QEMU_THREAD_JOINABLE
);
1028 while (env
->created
== 0) {
1029 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1033 void qemu_init_vcpu(void *_env
)
1035 CPUArchState
*env
= _env
;
1037 env
->nr_cores
= smp_cores
;
1038 env
->nr_threads
= smp_threads
;
1040 if (kvm_enabled()) {
1041 qemu_kvm_start_vcpu(env
);
1042 } else if (tcg_enabled()) {
1043 qemu_tcg_init_vcpu(env
);
1045 qemu_dummy_start_vcpu(env
);
1049 void cpu_stop_current(void)
1051 if (cpu_single_env
) {
1052 cpu_single_env
->stop
= 0;
1053 cpu_single_env
->stopped
= 1;
1054 cpu_exit(cpu_single_env
);
1055 qemu_cond_signal(&qemu_pause_cond
);
1059 void vm_stop(RunState state
)
1061 if (!qemu_thread_is_self(&io_thread
)) {
1062 qemu_system_vmstop_request(state
);
1064 * FIXME: should not return to device code in case
1065 * vm_stop() has been requested.
1073 /* does a state transition even if the VM is already stopped,
1074 current state is forgotten forever */
1075 void vm_stop_force_state(RunState state
)
1077 if (runstate_is_running()) {
1080 runstate_set(state
);
1084 static int tcg_cpu_exec(CPUArchState
*env
)
1087 #ifdef CONFIG_PROFILER
1091 #ifdef CONFIG_PROFILER
1092 ti
= profile_getclock();
1097 qemu_icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
1098 env
->icount_decr
.u16
.low
= 0;
1099 env
->icount_extra
= 0;
1100 count
= qemu_icount_round(qemu_clock_deadline(vm_clock
));
1101 qemu_icount
+= count
;
1102 decr
= (count
> 0xffff) ? 0xffff : count
;
1104 env
->icount_decr
.u16
.low
= decr
;
1105 env
->icount_extra
= count
;
1107 ret
= cpu_exec(env
);
1108 #ifdef CONFIG_PROFILER
1109 qemu_time
+= profile_getclock() - ti
;
1112 /* Fold pending instructions back into the
1113 instruction counter, and clear the interrupt flag. */
1114 qemu_icount
-= (env
->icount_decr
.u16
.low
1115 + env
->icount_extra
);
1116 env
->icount_decr
.u32
= 0;
1117 env
->icount_extra
= 0;
1122 static void tcg_exec_all(void)
1126 /* Account partial waits to the vm_clock. */
1127 qemu_clock_warp(vm_clock
);
1129 if (next_cpu
== NULL
) {
1130 next_cpu
= first_cpu
;
1132 for (; next_cpu
!= NULL
&& !exit_request
; next_cpu
= next_cpu
->next_cpu
) {
1133 CPUArchState
*env
= next_cpu
;
1135 qemu_clock_enable(vm_clock
,
1136 (env
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
1138 if (cpu_can_run(env
)) {
1139 r
= tcg_cpu_exec(env
);
1140 if (r
== EXCP_DEBUG
) {
1141 cpu_handle_guest_debug(env
);
1144 } else if (env
->stop
|| env
->stopped
) {
1151 void set_numa_modes(void)
1156 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1157 for (i
= 0; i
< nb_numa_nodes
; i
++) {
1158 if (node_cpumask
[i
] & (1 << env
->cpu_index
)) {
1165 void set_cpu_log(const char *optarg
)
1168 const CPULogItem
*item
;
1170 mask
= cpu_str_to_log_mask(optarg
);
1172 printf("Log items (comma separated):\n");
1173 for (item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1174 printf("%-10s %s\n", item
->name
, item
->help
);
1181 void set_cpu_log_filename(const char *optarg
)
1183 cpu_set_log_filename(optarg
);
1186 void list_cpus(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
1188 /* XXX: implement xxx_cpu_list for targets that still miss it */
1189 #if defined(cpu_list_id)
1190 cpu_list_id(f
, cpu_fprintf
, optarg
);
1191 #elif defined(cpu_list)
1192 cpu_list(f
, cpu_fprintf
); /* deprecated */
1196 CpuInfoList
*qmp_query_cpus(Error
**errp
)
1198 CpuInfoList
*head
= NULL
, *cur_item
= NULL
;
1201 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1204 cpu_synchronize_state(env
);
1206 info
= g_malloc0(sizeof(*info
));
1207 info
->value
= g_malloc0(sizeof(*info
->value
));
1208 info
->value
->CPU
= env
->cpu_index
;
1209 info
->value
->current
= (env
== first_cpu
);
1210 info
->value
->halted
= env
->halted
;
1211 info
->value
->thread_id
= env
->thread_id
;
1212 #if defined(TARGET_I386)
1213 info
->value
->has_pc
= true;
1214 info
->value
->pc
= env
->eip
+ env
->segs
[R_CS
].base
;
1215 #elif defined(TARGET_PPC)
1216 info
->value
->has_nip
= true;
1217 info
->value
->nip
= env
->nip
;
1218 #elif defined(TARGET_SPARC)
1219 info
->value
->has_pc
= true;
1220 info
->value
->pc
= env
->pc
;
1221 info
->value
->has_npc
= true;
1222 info
->value
->npc
= env
->npc
;
1223 #elif defined(TARGET_MIPS)
1224 info
->value
->has_PC
= true;
1225 info
->value
->PC
= env
->active_tc
.PC
;
1228 /* XXX: waiting for the qapi to support GSList */
1230 head
= cur_item
= info
;
1232 cur_item
->next
= info
;
1240 void qmp_memsave(int64_t addr
, int64_t size
, const char *filename
,
1241 bool has_cpu
, int64_t cpu_index
, Error
**errp
)
1252 for (env
= first_cpu
; env
; env
= env
->next_cpu
) {
1253 if (cpu_index
== env
->cpu_index
) {
1259 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "cpu-index",
1264 f
= fopen(filename
, "wb");
1266 error_set(errp
, QERR_OPEN_FILE_FAILED
, filename
);
1274 cpu_memory_rw_debug(env
, addr
, buf
, l
, 0);
1275 if (fwrite(buf
, 1, l
, f
) != l
) {
1276 error_set(errp
, QERR_IO_ERROR
);
1287 void qmp_pmemsave(int64_t addr
, int64_t size
, const char *filename
,
1294 f
= fopen(filename
, "wb");
1296 error_set(errp
, QERR_OPEN_FILE_FAILED
, filename
);
1304 cpu_physical_memory_rw(addr
, buf
, l
, 0);
1305 if (fwrite(buf
, 1, l
, f
) != l
) {
1306 error_set(errp
, QERR_IO_ERROR
);
1317 void qmp_inject_nmi(Error
**errp
)
1319 #if defined(TARGET_I386)
1322 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1323 if (!env
->apic_state
) {
1324 cpu_interrupt(env
, CPU_INTERRUPT_NMI
);
1326 apic_deliver_nmi(env
->apic_state
);
1330 error_set(errp
, QERR_UNSUPPORTED
);