4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor/monitor.h"
29 #include "sysemu/sysemu.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/dma.h"
32 #include "sysemu/kvm.h"
33 #include "qmp-commands.h"
35 #include "qemu/thread.h"
36 #include "sysemu/cpus.h"
37 #include "sysemu/qtest.h"
38 #include "qemu/main-loop.h"
39 #include "qemu/bitmap.h"
40 #include "qemu/seqlock.h"
43 #include "qemu/compatfd.h"
48 #include <sys/prctl.h>
51 #define PR_MCE_KILL 33
54 #ifndef PR_MCE_KILL_SET
55 #define PR_MCE_KILL_SET 1
58 #ifndef PR_MCE_KILL_EARLY
59 #define PR_MCE_KILL_EARLY 1
62 #endif /* CONFIG_LINUX */
64 static CPUState
*next_cpu
;
66 bool cpu_is_stopped(CPUState
*cpu
)
68 return cpu
->stopped
|| !runstate_is_running();
71 static bool cpu_thread_is_idle(CPUState
*cpu
)
73 if (cpu
->stop
|| cpu
->queued_work_first
) {
76 if (cpu_is_stopped(cpu
)) {
79 if (!cpu
->halted
|| qemu_cpu_has_work(cpu
) ||
80 kvm_halt_in_kernel()) {
86 static bool all_cpu_threads_idle(void)
91 if (!cpu_thread_is_idle(cpu
)) {
98 /***********************************************************/
99 /* guest cycle counter */
101 /* Protected by TimersState seqlock */
103 /* Compensate for varying guest execution speed. */
104 static int64_t qemu_icount_bias
;
105 static int64_t vm_clock_warp_start
;
106 /* Conversion factor from emulated instructions to virtual clock ticks. */
107 static int icount_time_shift
;
108 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
109 #define MAX_ICOUNT_SHIFT 10
111 /* Only written by TCG thread */
112 static int64_t qemu_icount
;
114 static QEMUTimer
*icount_rt_timer
;
115 static QEMUTimer
*icount_vm_timer
;
116 static QEMUTimer
*icount_warp_timer
;
118 typedef struct TimersState
{
119 /* Protected by BQL. */
120 int64_t cpu_ticks_prev
;
121 int64_t cpu_ticks_offset
;
123 /* cpu_clock_offset can be read out of BQL, so protect it with
126 QemuSeqLock vm_clock_seqlock
;
127 int64_t cpu_clock_offset
;
128 int32_t cpu_ticks_enabled
;
132 static TimersState timers_state
;
134 /* Return the virtual CPU time, based on the instruction counter. */
135 static int64_t cpu_get_icount_locked(void)
138 CPUState
*cpu
= current_cpu
;
140 icount
= qemu_icount
;
142 CPUArchState
*env
= cpu
->env_ptr
;
143 if (!can_do_io(env
)) {
144 fprintf(stderr
, "Bad clock read\n");
146 icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
148 return qemu_icount_bias
+ (icount
<< icount_time_shift
);
151 int64_t cpu_get_icount(void)
157 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
158 icount
= cpu_get_icount_locked();
159 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
164 /* return the host CPU cycle counter and handle stop/restart */
165 /* Caller must hold the BQL */
166 int64_t cpu_get_ticks(void)
171 return cpu_get_icount();
174 ticks
= timers_state
.cpu_ticks_offset
;
175 if (timers_state
.cpu_ticks_enabled
) {
176 ticks
+= cpu_get_real_ticks();
179 if (timers_state
.cpu_ticks_prev
> ticks
) {
180 /* Note: non increasing ticks may happen if the host uses
182 timers_state
.cpu_ticks_offset
+= timers_state
.cpu_ticks_prev
- ticks
;
183 ticks
= timers_state
.cpu_ticks_prev
;
186 timers_state
.cpu_ticks_prev
= ticks
;
190 static int64_t cpu_get_clock_locked(void)
194 ticks
= timers_state
.cpu_clock_offset
;
195 if (timers_state
.cpu_ticks_enabled
) {
196 ticks
+= get_clock();
202 /* return the host CPU monotonic timer and handle stop/restart */
203 int64_t cpu_get_clock(void)
209 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
210 ti
= cpu_get_clock_locked();
211 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
216 /* enable cpu_get_ticks()
217 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
219 void cpu_enable_ticks(void)
221 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
222 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
223 if (!timers_state
.cpu_ticks_enabled
) {
224 timers_state
.cpu_ticks_offset
-= cpu_get_real_ticks();
225 timers_state
.cpu_clock_offset
-= get_clock();
226 timers_state
.cpu_ticks_enabled
= 1;
228 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
231 /* disable cpu_get_ticks() : the clock is stopped. You must not call
232 * cpu_get_ticks() after that.
233 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
235 void cpu_disable_ticks(void)
237 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
238 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
239 if (timers_state
.cpu_ticks_enabled
) {
240 timers_state
.cpu_ticks_offset
+= cpu_get_real_ticks();
241 timers_state
.cpu_clock_offset
= cpu_get_clock_locked();
242 timers_state
.cpu_ticks_enabled
= 0;
244 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
247 /* Correlation between real and virtual time is always going to be
248 fairly approximate, so ignore small variation.
249 When the guest is idle real and virtual time will be aligned in
251 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
253 static void icount_adjust(void)
259 /* Protected by TimersState mutex. */
260 static int64_t last_delta
;
262 /* If the VM is not running, then do nothing. */
263 if (!runstate_is_running()) {
267 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
268 cur_time
= cpu_get_clock_locked();
269 cur_icount
= cpu_get_icount_locked();
271 delta
= cur_icount
- cur_time
;
272 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
274 && last_delta
+ ICOUNT_WOBBLE
< delta
* 2
275 && icount_time_shift
> 0) {
276 /* The guest is getting too far ahead. Slow time down. */
280 && last_delta
- ICOUNT_WOBBLE
> delta
* 2
281 && icount_time_shift
< MAX_ICOUNT_SHIFT
) {
282 /* The guest is getting too far behind. Speed time up. */
286 qemu_icount_bias
= cur_icount
- (qemu_icount
<< icount_time_shift
);
287 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
290 static void icount_adjust_rt(void *opaque
)
292 timer_mod(icount_rt_timer
,
293 qemu_clock_get_ms(QEMU_CLOCK_REALTIME
) + 1000);
297 static void icount_adjust_vm(void *opaque
)
299 timer_mod(icount_vm_timer
,
300 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
301 get_ticks_per_sec() / 10);
305 static int64_t qemu_icount_round(int64_t count
)
307 return (count
+ (1 << icount_time_shift
) - 1) >> icount_time_shift
;
310 static void icount_warp_rt(void *opaque
)
312 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
313 * changes from -1 to another value, so the race here is okay.
315 if (atomic_read(&vm_clock_warp_start
) == -1) {
319 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
320 if (runstate_is_running()) {
321 int64_t clock
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
324 warp_delta
= clock
- vm_clock_warp_start
;
325 if (use_icount
== 2) {
327 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
328 * far ahead of real time.
330 int64_t cur_time
= cpu_get_clock_locked();
331 int64_t cur_icount
= cpu_get_icount_locked();
332 int64_t delta
= cur_time
- cur_icount
;
333 warp_delta
= MIN(warp_delta
, delta
);
335 qemu_icount_bias
+= warp_delta
;
337 vm_clock_warp_start
= -1;
338 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
340 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL
)) {
341 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
345 void qtest_clock_warp(int64_t dest
)
347 int64_t clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
348 assert(qtest_enabled());
349 while (clock
< dest
) {
350 int64_t deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
351 int64_t warp
= MIN(dest
- clock
, deadline
);
352 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
353 qemu_icount_bias
+= warp
;
354 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
356 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL
);
357 clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
359 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
362 void qemu_clock_warp(QEMUClockType type
)
368 * There are too many global variables to make the "warp" behavior
369 * applicable to other clocks. But a clock argument removes the
370 * need for if statements all over the place.
372 if (type
!= QEMU_CLOCK_VIRTUAL
|| !use_icount
) {
377 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
378 * This ensures that the deadline for the timer is computed correctly below.
379 * This also makes sure that the insn counter is synchronized before the
380 * CPU starts running, in case the CPU is woken by an event other than
381 * the earliest QEMU_CLOCK_VIRTUAL timer.
383 icount_warp_rt(NULL
);
384 timer_del(icount_warp_timer
);
385 if (!all_cpu_threads_idle()) {
389 if (qtest_enabled()) {
390 /* When testing, qtest commands advance icount. */
394 /* We want to use the earliest deadline from ALL vm_clocks */
395 clock
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
396 deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
403 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
404 * sleep. Otherwise, the CPU might be waiting for a future timer
405 * interrupt to wake it up, but the interrupt never comes because
406 * the vCPU isn't running any insns and thus doesn't advance the
407 * QEMU_CLOCK_VIRTUAL.
409 * An extreme solution for this problem would be to never let VCPUs
410 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
411 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
412 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
413 * after some e"real" time, (related to the time left until the next
414 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
415 * This avoids that the warps are visible externally; for example,
416 * you will not be sending network packets continuously instead of
419 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
420 if (vm_clock_warp_start
== -1 || vm_clock_warp_start
> clock
) {
421 vm_clock_warp_start
= clock
;
423 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
424 timer_mod_anticipate(icount_warp_timer
, clock
+ deadline
);
425 } else if (deadline
== 0) {
426 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
430 static const VMStateDescription vmstate_timers
= {
433 .minimum_version_id
= 1,
434 .minimum_version_id_old
= 1,
435 .fields
= (VMStateField
[]) {
436 VMSTATE_INT64(cpu_ticks_offset
, TimersState
),
437 VMSTATE_INT64(dummy
, TimersState
),
438 VMSTATE_INT64_V(cpu_clock_offset
, TimersState
, 2),
439 VMSTATE_END_OF_LIST()
443 void configure_icount(const char *option
)
445 seqlock_init(&timers_state
.vm_clock_seqlock
, NULL
);
446 vmstate_register(NULL
, 0, &vmstate_timers
, &timers_state
);
451 icount_warp_timer
= timer_new_ns(QEMU_CLOCK_REALTIME
,
452 icount_warp_rt
, NULL
);
453 if (strcmp(option
, "auto") != 0) {
454 icount_time_shift
= strtol(option
, NULL
, 0);
461 /* 125MIPS seems a reasonable initial guess at the guest speed.
462 It will be corrected fairly quickly anyway. */
463 icount_time_shift
= 3;
465 /* Have both realtime and virtual time triggers for speed adjustment.
466 The realtime trigger catches emulated time passing too slowly,
467 the virtual time trigger catches emulated time passing too fast.
468 Realtime triggers occur even when idle, so use them less frequently
470 icount_rt_timer
= timer_new_ms(QEMU_CLOCK_REALTIME
,
471 icount_adjust_rt
, NULL
);
472 timer_mod(icount_rt_timer
,
473 qemu_clock_get_ms(QEMU_CLOCK_REALTIME
) + 1000);
474 icount_vm_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
475 icount_adjust_vm
, NULL
);
476 timer_mod(icount_vm_timer
,
477 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
478 get_ticks_per_sec() / 10);
481 /***********************************************************/
482 void hw_error(const char *fmt
, ...)
488 fprintf(stderr
, "qemu: hardware error: ");
489 vfprintf(stderr
, fmt
, ap
);
490 fprintf(stderr
, "\n");
492 fprintf(stderr
, "CPU #%d:\n", cpu
->cpu_index
);
493 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
);
499 void cpu_synchronize_all_states(void)
504 cpu_synchronize_state(cpu
);
508 void cpu_synchronize_all_post_reset(void)
513 cpu_synchronize_post_reset(cpu
);
517 void cpu_synchronize_all_post_init(void)
522 cpu_synchronize_post_init(cpu
);
526 static int do_vm_stop(RunState state
)
530 if (runstate_is_running()) {
534 vm_state_notify(0, state
);
535 monitor_protocol_event(QEVENT_STOP
, NULL
);
539 ret
= bdrv_flush_all();
544 static bool cpu_can_run(CPUState
*cpu
)
549 if (cpu_is_stopped(cpu
)) {
555 static void cpu_handle_guest_debug(CPUState
*cpu
)
557 gdb_set_stop_cpu(cpu
);
558 qemu_system_debug_request();
562 static void cpu_signal(int sig
)
565 cpu_exit(current_cpu
);
571 static void sigbus_reraise(void)
574 struct sigaction action
;
576 memset(&action
, 0, sizeof(action
));
577 action
.sa_handler
= SIG_DFL
;
578 if (!sigaction(SIGBUS
, &action
, NULL
)) {
581 sigaddset(&set
, SIGBUS
);
582 sigprocmask(SIG_UNBLOCK
, &set
, NULL
);
584 perror("Failed to re-raise SIGBUS!\n");
588 static void sigbus_handler(int n
, struct qemu_signalfd_siginfo
*siginfo
,
591 if (kvm_on_sigbus(siginfo
->ssi_code
,
592 (void *)(intptr_t)siginfo
->ssi_addr
)) {
597 static void qemu_init_sigbus(void)
599 struct sigaction action
;
601 memset(&action
, 0, sizeof(action
));
602 action
.sa_flags
= SA_SIGINFO
;
603 action
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))sigbus_handler
;
604 sigaction(SIGBUS
, &action
, NULL
);
606 prctl(PR_MCE_KILL
, PR_MCE_KILL_SET
, PR_MCE_KILL_EARLY
, 0, 0);
609 static void qemu_kvm_eat_signals(CPUState
*cpu
)
611 struct timespec ts
= { 0, 0 };
617 sigemptyset(&waitset
);
618 sigaddset(&waitset
, SIG_IPI
);
619 sigaddset(&waitset
, SIGBUS
);
622 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
623 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
624 perror("sigtimedwait");
630 if (kvm_on_sigbus_vcpu(cpu
, siginfo
.si_code
, siginfo
.si_addr
)) {
638 r
= sigpending(&chkset
);
640 perror("sigpending");
643 } while (sigismember(&chkset
, SIG_IPI
) || sigismember(&chkset
, SIGBUS
));
646 #else /* !CONFIG_LINUX */
648 static void qemu_init_sigbus(void)
652 static void qemu_kvm_eat_signals(CPUState
*cpu
)
655 #endif /* !CONFIG_LINUX */
658 static void dummy_signal(int sig
)
662 static void qemu_kvm_init_cpu_signals(CPUState
*cpu
)
666 struct sigaction sigact
;
668 memset(&sigact
, 0, sizeof(sigact
));
669 sigact
.sa_handler
= dummy_signal
;
670 sigaction(SIG_IPI
, &sigact
, NULL
);
672 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
673 sigdelset(&set
, SIG_IPI
);
674 sigdelset(&set
, SIGBUS
);
675 r
= kvm_set_signal_mask(cpu
, &set
);
677 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
682 static void qemu_tcg_init_cpu_signals(void)
685 struct sigaction sigact
;
687 memset(&sigact
, 0, sizeof(sigact
));
688 sigact
.sa_handler
= cpu_signal
;
689 sigaction(SIG_IPI
, &sigact
, NULL
);
692 sigaddset(&set
, SIG_IPI
);
693 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
697 static void qemu_kvm_init_cpu_signals(CPUState
*cpu
)
702 static void qemu_tcg_init_cpu_signals(void)
707 static QemuMutex qemu_global_mutex
;
708 static QemuCond qemu_io_proceeded_cond
;
709 static bool iothread_requesting_mutex
;
711 static QemuThread io_thread
;
713 static QemuThread
*tcg_cpu_thread
;
714 static QemuCond
*tcg_halt_cond
;
717 static QemuCond qemu_cpu_cond
;
719 static QemuCond qemu_pause_cond
;
720 static QemuCond qemu_work_cond
;
722 void qemu_init_cpu_loop(void)
725 qemu_cond_init(&qemu_cpu_cond
);
726 qemu_cond_init(&qemu_pause_cond
);
727 qemu_cond_init(&qemu_work_cond
);
728 qemu_cond_init(&qemu_io_proceeded_cond
);
729 qemu_mutex_init(&qemu_global_mutex
);
731 qemu_thread_get_self(&io_thread
);
734 void run_on_cpu(CPUState
*cpu
, void (*func
)(void *data
), void *data
)
736 struct qemu_work_item wi
;
738 if (qemu_cpu_is_self(cpu
)) {
746 if (cpu
->queued_work_first
== NULL
) {
747 cpu
->queued_work_first
= &wi
;
749 cpu
->queued_work_last
->next
= &wi
;
751 cpu
->queued_work_last
= &wi
;
757 CPUState
*self_cpu
= current_cpu
;
759 qemu_cond_wait(&qemu_work_cond
, &qemu_global_mutex
);
760 current_cpu
= self_cpu
;
764 void async_run_on_cpu(CPUState
*cpu
, void (*func
)(void *data
), void *data
)
766 struct qemu_work_item
*wi
;
768 if (qemu_cpu_is_self(cpu
)) {
773 wi
= g_malloc0(sizeof(struct qemu_work_item
));
777 if (cpu
->queued_work_first
== NULL
) {
778 cpu
->queued_work_first
= wi
;
780 cpu
->queued_work_last
->next
= wi
;
782 cpu
->queued_work_last
= wi
;
789 static void flush_queued_work(CPUState
*cpu
)
791 struct qemu_work_item
*wi
;
793 if (cpu
->queued_work_first
== NULL
) {
797 while ((wi
= cpu
->queued_work_first
)) {
798 cpu
->queued_work_first
= wi
->next
;
805 cpu
->queued_work_last
= NULL
;
806 qemu_cond_broadcast(&qemu_work_cond
);
809 static void qemu_wait_io_event_common(CPUState
*cpu
)
814 qemu_cond_signal(&qemu_pause_cond
);
816 flush_queued_work(cpu
);
817 cpu
->thread_kicked
= false;
820 static void qemu_tcg_wait_io_event(void)
824 while (all_cpu_threads_idle()) {
825 /* Start accounting real time to the virtual clock if the CPUs
827 qemu_clock_warp(QEMU_CLOCK_VIRTUAL
);
828 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
831 while (iothread_requesting_mutex
) {
832 qemu_cond_wait(&qemu_io_proceeded_cond
, &qemu_global_mutex
);
836 qemu_wait_io_event_common(cpu
);
840 static void qemu_kvm_wait_io_event(CPUState
*cpu
)
842 while (cpu_thread_is_idle(cpu
)) {
843 qemu_cond_wait(cpu
->halt_cond
, &qemu_global_mutex
);
846 qemu_kvm_eat_signals(cpu
);
847 qemu_wait_io_event_common(cpu
);
850 static void *qemu_kvm_cpu_thread_fn(void *arg
)
855 qemu_mutex_lock(&qemu_global_mutex
);
856 qemu_thread_get_self(cpu
->thread
);
857 cpu
->thread_id
= qemu_get_thread_id();
860 r
= kvm_init_vcpu(cpu
);
862 fprintf(stderr
, "kvm_init_vcpu failed: %s\n", strerror(-r
));
866 qemu_kvm_init_cpu_signals(cpu
);
868 /* signal CPU creation */
870 qemu_cond_signal(&qemu_cpu_cond
);
873 if (cpu_can_run(cpu
)) {
874 r
= kvm_cpu_exec(cpu
);
875 if (r
== EXCP_DEBUG
) {
876 cpu_handle_guest_debug(cpu
);
879 qemu_kvm_wait_io_event(cpu
);
885 static void *qemu_dummy_cpu_thread_fn(void *arg
)
888 fprintf(stderr
, "qtest is not supported under Windows\n");
895 qemu_mutex_lock_iothread();
896 qemu_thread_get_self(cpu
->thread
);
897 cpu
->thread_id
= qemu_get_thread_id();
899 sigemptyset(&waitset
);
900 sigaddset(&waitset
, SIG_IPI
);
902 /* signal CPU creation */
904 qemu_cond_signal(&qemu_cpu_cond
);
909 qemu_mutex_unlock_iothread();
912 r
= sigwait(&waitset
, &sig
);
913 } while (r
== -1 && (errno
== EAGAIN
|| errno
== EINTR
));
918 qemu_mutex_lock_iothread();
920 qemu_wait_io_event_common(cpu
);
927 static void tcg_exec_all(void);
929 static void *qemu_tcg_cpu_thread_fn(void *arg
)
933 qemu_tcg_init_cpu_signals();
934 qemu_thread_get_self(cpu
->thread
);
936 qemu_mutex_lock(&qemu_global_mutex
);
938 cpu
->thread_id
= qemu_get_thread_id();
941 qemu_cond_signal(&qemu_cpu_cond
);
943 /* wait for initial kick-off after machine start */
944 while (QTAILQ_FIRST(&cpus
)->stopped
) {
945 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
947 /* process any pending work */
949 qemu_wait_io_event_common(cpu
);
957 int64_t deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
960 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
963 qemu_tcg_wait_io_event();
969 static void qemu_cpu_kick_thread(CPUState
*cpu
)
974 err
= pthread_kill(cpu
->thread
->thread
, SIG_IPI
);
976 fprintf(stderr
, "qemu:%s: %s", __func__
, strerror(err
));
980 if (!qemu_cpu_is_self(cpu
)) {
983 if (SuspendThread(cpu
->hThread
) == (DWORD
)-1) {
984 fprintf(stderr
, "qemu:%s: GetLastError:%lu\n", __func__
,
989 /* On multi-core systems, we are not sure that the thread is actually
990 * suspended until we can get the context.
992 tcgContext
.ContextFlags
= CONTEXT_CONTROL
;
993 while (GetThreadContext(cpu
->hThread
, &tcgContext
) != 0) {
999 if (ResumeThread(cpu
->hThread
) == (DWORD
)-1) {
1000 fprintf(stderr
, "qemu:%s: GetLastError:%lu\n", __func__
,
1008 void qemu_cpu_kick(CPUState
*cpu
)
1010 qemu_cond_broadcast(cpu
->halt_cond
);
1011 if (!tcg_enabled() && !cpu
->thread_kicked
) {
1012 qemu_cpu_kick_thread(cpu
);
1013 cpu
->thread_kicked
= true;
1017 void qemu_cpu_kick_self(void)
1020 assert(current_cpu
);
1022 if (!current_cpu
->thread_kicked
) {
1023 qemu_cpu_kick_thread(current_cpu
);
1024 current_cpu
->thread_kicked
= true;
1031 bool qemu_cpu_is_self(CPUState
*cpu
)
1033 return qemu_thread_is_self(cpu
->thread
);
1036 static bool qemu_in_vcpu_thread(void)
1038 return current_cpu
&& qemu_cpu_is_self(current_cpu
);
1041 void qemu_mutex_lock_iothread(void)
1043 if (!tcg_enabled()) {
1044 qemu_mutex_lock(&qemu_global_mutex
);
1046 iothread_requesting_mutex
= true;
1047 if (qemu_mutex_trylock(&qemu_global_mutex
)) {
1048 qemu_cpu_kick_thread(first_cpu
);
1049 qemu_mutex_lock(&qemu_global_mutex
);
1051 iothread_requesting_mutex
= false;
1052 qemu_cond_broadcast(&qemu_io_proceeded_cond
);
1056 void qemu_mutex_unlock_iothread(void)
1058 qemu_mutex_unlock(&qemu_global_mutex
);
1061 static int all_vcpus_paused(void)
1066 if (!cpu
->stopped
) {
1074 void pause_all_vcpus(void)
1078 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, false);
1084 if (qemu_in_vcpu_thread()) {
1086 if (!kvm_enabled()) {
1089 cpu
->stopped
= true;
1095 while (!all_vcpus_paused()) {
1096 qemu_cond_wait(&qemu_pause_cond
, &qemu_global_mutex
);
1103 void cpu_resume(CPUState
*cpu
)
1106 cpu
->stopped
= false;
1110 void resume_all_vcpus(void)
1114 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, true);
1120 static void qemu_tcg_init_vcpu(CPUState
*cpu
)
1122 tcg_cpu_address_space_init(cpu
, cpu
->as
);
1124 /* share a single thread for all cpus with TCG */
1125 if (!tcg_cpu_thread
) {
1126 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1127 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1128 qemu_cond_init(cpu
->halt_cond
);
1129 tcg_halt_cond
= cpu
->halt_cond
;
1130 qemu_thread_create(cpu
->thread
, qemu_tcg_cpu_thread_fn
, cpu
,
1131 QEMU_THREAD_JOINABLE
);
1133 cpu
->hThread
= qemu_thread_get_handle(cpu
->thread
);
1135 while (!cpu
->created
) {
1136 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1138 tcg_cpu_thread
= cpu
->thread
;
1140 cpu
->thread
= tcg_cpu_thread
;
1141 cpu
->halt_cond
= tcg_halt_cond
;
1145 static void qemu_kvm_start_vcpu(CPUState
*cpu
)
1147 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1148 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1149 qemu_cond_init(cpu
->halt_cond
);
1150 qemu_thread_create(cpu
->thread
, qemu_kvm_cpu_thread_fn
, cpu
,
1151 QEMU_THREAD_JOINABLE
);
1152 while (!cpu
->created
) {
1153 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1157 static void qemu_dummy_start_vcpu(CPUState
*cpu
)
1159 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1160 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1161 qemu_cond_init(cpu
->halt_cond
);
1162 qemu_thread_create(cpu
->thread
, qemu_dummy_cpu_thread_fn
, cpu
,
1163 QEMU_THREAD_JOINABLE
);
1164 while (!cpu
->created
) {
1165 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1169 void qemu_init_vcpu(CPUState
*cpu
)
1171 cpu
->nr_cores
= smp_cores
;
1172 cpu
->nr_threads
= smp_threads
;
1173 cpu
->stopped
= true;
1174 if (kvm_enabled()) {
1175 qemu_kvm_start_vcpu(cpu
);
1176 } else if (tcg_enabled()) {
1177 qemu_tcg_init_vcpu(cpu
);
1179 qemu_dummy_start_vcpu(cpu
);
1183 void cpu_stop_current(void)
1186 current_cpu
->stop
= false;
1187 current_cpu
->stopped
= true;
1188 cpu_exit(current_cpu
);
1189 qemu_cond_signal(&qemu_pause_cond
);
1193 int vm_stop(RunState state
)
1195 if (qemu_in_vcpu_thread()) {
1196 qemu_system_vmstop_request(state
);
1198 * FIXME: should not return to device code in case
1199 * vm_stop() has been requested.
1205 return do_vm_stop(state
);
1208 /* does a state transition even if the VM is already stopped,
1209 current state is forgotten forever */
1210 int vm_stop_force_state(RunState state
)
1212 if (runstate_is_running()) {
1213 return vm_stop(state
);
1215 runstate_set(state
);
1216 /* Make sure to return an error if the flush in a previous vm_stop()
1218 return bdrv_flush_all();
1222 static int tcg_cpu_exec(CPUArchState
*env
)
1225 #ifdef CONFIG_PROFILER
1229 #ifdef CONFIG_PROFILER
1230 ti
= profile_getclock();
1236 qemu_icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
1237 env
->icount_decr
.u16
.low
= 0;
1238 env
->icount_extra
= 0;
1239 deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
1241 /* Maintain prior (possibly buggy) behaviour where if no deadline
1242 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1243 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1246 if ((deadline
< 0) || (deadline
> INT32_MAX
)) {
1247 deadline
= INT32_MAX
;
1250 count
= qemu_icount_round(deadline
);
1251 qemu_icount
+= count
;
1252 decr
= (count
> 0xffff) ? 0xffff : count
;
1254 env
->icount_decr
.u16
.low
= decr
;
1255 env
->icount_extra
= count
;
1257 ret
= cpu_exec(env
);
1258 #ifdef CONFIG_PROFILER
1259 qemu_time
+= profile_getclock() - ti
;
1262 /* Fold pending instructions back into the
1263 instruction counter, and clear the interrupt flag. */
1264 qemu_icount
-= (env
->icount_decr
.u16
.low
1265 + env
->icount_extra
);
1266 env
->icount_decr
.u32
= 0;
1267 env
->icount_extra
= 0;
1272 static void tcg_exec_all(void)
1276 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1277 qemu_clock_warp(QEMU_CLOCK_VIRTUAL
);
1279 if (next_cpu
== NULL
) {
1280 next_cpu
= first_cpu
;
1282 for (; next_cpu
!= NULL
&& !exit_request
; next_cpu
= CPU_NEXT(next_cpu
)) {
1283 CPUState
*cpu
= next_cpu
;
1284 CPUArchState
*env
= cpu
->env_ptr
;
1286 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
,
1287 (cpu
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
1289 if (cpu_can_run(cpu
)) {
1290 r
= tcg_cpu_exec(env
);
1291 if (r
== EXCP_DEBUG
) {
1292 cpu_handle_guest_debug(cpu
);
1295 } else if (cpu
->stop
|| cpu
->stopped
) {
1302 void set_numa_modes(void)
1308 for (i
= 0; i
< nb_numa_nodes
; i
++) {
1309 if (test_bit(cpu
->cpu_index
, node_cpumask
[i
])) {
1316 void list_cpus(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
1318 /* XXX: implement xxx_cpu_list for targets that still miss it */
1319 #if defined(cpu_list)
1320 cpu_list(f
, cpu_fprintf
);
1324 CpuInfoList
*qmp_query_cpus(Error
**errp
)
1326 CpuInfoList
*head
= NULL
, *cur_item
= NULL
;
1331 #if defined(TARGET_I386)
1332 X86CPU
*x86_cpu
= X86_CPU(cpu
);
1333 CPUX86State
*env
= &x86_cpu
->env
;
1334 #elif defined(TARGET_PPC)
1335 PowerPCCPU
*ppc_cpu
= POWERPC_CPU(cpu
);
1336 CPUPPCState
*env
= &ppc_cpu
->env
;
1337 #elif defined(TARGET_SPARC)
1338 SPARCCPU
*sparc_cpu
= SPARC_CPU(cpu
);
1339 CPUSPARCState
*env
= &sparc_cpu
->env
;
1340 #elif defined(TARGET_MIPS)
1341 MIPSCPU
*mips_cpu
= MIPS_CPU(cpu
);
1342 CPUMIPSState
*env
= &mips_cpu
->env
;
1345 cpu_synchronize_state(cpu
);
1347 info
= g_malloc0(sizeof(*info
));
1348 info
->value
= g_malloc0(sizeof(*info
->value
));
1349 info
->value
->CPU
= cpu
->cpu_index
;
1350 info
->value
->current
= (cpu
== first_cpu
);
1351 info
->value
->halted
= cpu
->halted
;
1352 info
->value
->thread_id
= cpu
->thread_id
;
1353 #if defined(TARGET_I386)
1354 info
->value
->has_pc
= true;
1355 info
->value
->pc
= env
->eip
+ env
->segs
[R_CS
].base
;
1356 #elif defined(TARGET_PPC)
1357 info
->value
->has_nip
= true;
1358 info
->value
->nip
= env
->nip
;
1359 #elif defined(TARGET_SPARC)
1360 info
->value
->has_pc
= true;
1361 info
->value
->pc
= env
->pc
;
1362 info
->value
->has_npc
= true;
1363 info
->value
->npc
= env
->npc
;
1364 #elif defined(TARGET_MIPS)
1365 info
->value
->has_PC
= true;
1366 info
->value
->PC
= env
->active_tc
.PC
;
1369 /* XXX: waiting for the qapi to support GSList */
1371 head
= cur_item
= info
;
1373 cur_item
->next
= info
;
1381 void qmp_memsave(int64_t addr
, int64_t size
, const char *filename
,
1382 bool has_cpu
, int64_t cpu_index
, Error
**errp
)
1393 cpu
= qemu_get_cpu(cpu_index
);
1395 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "cpu-index",
1400 f
= fopen(filename
, "wb");
1402 error_setg_file_open(errp
, errno
, filename
);
1410 if (cpu_memory_rw_debug(cpu
, addr
, buf
, l
, 0) != 0) {
1411 error_setg(errp
, "Invalid addr 0x%016" PRIx64
"specified", addr
);
1414 if (fwrite(buf
, 1, l
, f
) != l
) {
1415 error_set(errp
, QERR_IO_ERROR
);
1426 void qmp_pmemsave(int64_t addr
, int64_t size
, const char *filename
,
1433 f
= fopen(filename
, "wb");
1435 error_setg_file_open(errp
, errno
, filename
);
1443 cpu_physical_memory_rw(addr
, buf
, l
, 0);
1444 if (fwrite(buf
, 1, l
, f
) != l
) {
1445 error_set(errp
, QERR_IO_ERROR
);
1456 void qmp_inject_nmi(Error
**errp
)
1458 #if defined(TARGET_I386)
1462 X86CPU
*cpu
= X86_CPU(cs
);
1464 if (!cpu
->apic_state
) {
1465 cpu_interrupt(cs
, CPU_INTERRUPT_NMI
);
1467 apic_deliver_nmi(cpu
->apic_state
);
1470 #elif defined(TARGET_S390X)
1476 if (cpu
->env
.cpu_num
== monitor_get_cpu_index()) {
1477 if (s390_cpu_restart(S390_CPU(cs
)) == -1) {
1478 error_set(errp
, QERR_UNSUPPORTED
);
1485 error_set(errp
, QERR_UNSUPPORTED
);