4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "sysemu/sysemu.h"
31 #include "exec/gdbstub.h"
32 #include "sysemu/dma.h"
33 #include "sysemu/kvm.h"
34 #include "qmp-commands.h"
36 #include "qemu/thread.h"
37 #include "sysemu/cpus.h"
38 #include "sysemu/qtest.h"
39 #include "qemu/main-loop.h"
40 #include "qemu/bitmap.h"
41 #include "qemu/seqlock.h"
42 #include "qapi-event.h"
46 #include "qemu/compatfd.h"
51 #include <sys/prctl.h>
54 #define PR_MCE_KILL 33
57 #ifndef PR_MCE_KILL_SET
58 #define PR_MCE_KILL_SET 1
61 #ifndef PR_MCE_KILL_EARLY
62 #define PR_MCE_KILL_EARLY 1
65 #endif /* CONFIG_LINUX */
67 static CPUState
*next_cpu
;
71 bool cpu_is_stopped(CPUState
*cpu
)
73 return cpu
->stopped
|| !runstate_is_running();
76 static bool cpu_thread_is_idle(CPUState
*cpu
)
78 if (cpu
->stop
|| cpu
->queued_work_first
) {
81 if (cpu_is_stopped(cpu
)) {
84 if (!cpu
->halted
|| cpu_has_work(cpu
) ||
85 kvm_halt_in_kernel()) {
91 static bool all_cpu_threads_idle(void)
96 if (!cpu_thread_is_idle(cpu
)) {
103 /***********************************************************/
104 /* guest cycle counter */
106 /* Protected by TimersState seqlock */
108 static int64_t vm_clock_warp_start
= -1;
109 /* Conversion factor from emulated instructions to virtual clock ticks. */
110 static int icount_time_shift
;
111 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
112 #define MAX_ICOUNT_SHIFT 10
114 static QEMUTimer
*icount_rt_timer
;
115 static QEMUTimer
*icount_vm_timer
;
116 static QEMUTimer
*icount_warp_timer
;
118 typedef struct TimersState
{
119 /* Protected by BQL. */
120 int64_t cpu_ticks_prev
;
121 int64_t cpu_ticks_offset
;
123 /* cpu_clock_offset can be read out of BQL, so protect it with
126 QemuSeqLock vm_clock_seqlock
;
127 int64_t cpu_clock_offset
;
128 int32_t cpu_ticks_enabled
;
131 /* Compensate for varying guest execution speed. */
132 int64_t qemu_icount_bias
;
133 /* Only written by TCG thread */
137 static TimersState timers_state
;
139 int64_t cpu_get_icount_raw(void)
142 CPUState
*cpu
= current_cpu
;
144 icount
= timers_state
.qemu_icount
;
146 if (!cpu_can_do_io(cpu
)) {
147 fprintf(stderr
, "Bad icount read\n");
150 icount
-= (cpu
->icount_decr
.u16
.low
+ cpu
->icount_extra
);
155 /* Return the virtual CPU time, based on the instruction counter. */
156 static int64_t cpu_get_icount_locked(void)
158 int64_t icount
= cpu_get_icount_raw();
159 return timers_state
.qemu_icount_bias
+ cpu_icount_to_ns(icount
);
162 int64_t cpu_get_icount(void)
168 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
169 icount
= cpu_get_icount_locked();
170 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
175 int64_t cpu_icount_to_ns(int64_t icount
)
177 return icount
<< icount_time_shift
;
180 /* return the host CPU cycle counter and handle stop/restart */
181 /* Caller must hold the BQL */
182 int64_t cpu_get_ticks(void)
187 return cpu_get_icount();
190 ticks
= timers_state
.cpu_ticks_offset
;
191 if (timers_state
.cpu_ticks_enabled
) {
192 ticks
+= cpu_get_real_ticks();
195 if (timers_state
.cpu_ticks_prev
> ticks
) {
196 /* Note: non increasing ticks may happen if the host uses
198 timers_state
.cpu_ticks_offset
+= timers_state
.cpu_ticks_prev
- ticks
;
199 ticks
= timers_state
.cpu_ticks_prev
;
202 timers_state
.cpu_ticks_prev
= ticks
;
206 static int64_t cpu_get_clock_locked(void)
210 ticks
= timers_state
.cpu_clock_offset
;
211 if (timers_state
.cpu_ticks_enabled
) {
212 ticks
+= get_clock();
218 /* return the host CPU monotonic timer and handle stop/restart */
219 int64_t cpu_get_clock(void)
225 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
226 ti
= cpu_get_clock_locked();
227 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
232 /* return the offset between the host clock and virtual CPU clock */
233 int64_t cpu_get_clock_offset(void)
239 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
240 ti
= timers_state
.cpu_clock_offset
;
241 if (!timers_state
.cpu_ticks_enabled
) {
244 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
249 /* enable cpu_get_ticks()
250 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
252 void cpu_enable_ticks(void)
254 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
255 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
256 if (!timers_state
.cpu_ticks_enabled
) {
257 timers_state
.cpu_ticks_offset
-= cpu_get_real_ticks();
258 timers_state
.cpu_clock_offset
-= get_clock();
259 timers_state
.cpu_ticks_enabled
= 1;
261 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
264 /* disable cpu_get_ticks() : the clock is stopped. You must not call
265 * cpu_get_ticks() after that.
266 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
268 void cpu_disable_ticks(void)
270 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
271 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
272 if (timers_state
.cpu_ticks_enabled
) {
273 timers_state
.cpu_ticks_offset
+= cpu_get_real_ticks();
274 timers_state
.cpu_clock_offset
= cpu_get_clock_locked();
275 timers_state
.cpu_ticks_enabled
= 0;
277 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
280 /* Correlation between real and virtual time is always going to be
281 fairly approximate, so ignore small variation.
282 When the guest is idle real and virtual time will be aligned in
284 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
286 static void icount_adjust(void)
292 /* Protected by TimersState mutex. */
293 static int64_t last_delta
;
295 /* If the VM is not running, then do nothing. */
296 if (!runstate_is_running()) {
300 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
301 cur_time
= cpu_get_clock_locked();
302 cur_icount
= cpu_get_icount_locked();
304 delta
= cur_icount
- cur_time
;
305 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
307 && last_delta
+ ICOUNT_WOBBLE
< delta
* 2
308 && icount_time_shift
> 0) {
309 /* The guest is getting too far ahead. Slow time down. */
313 && last_delta
- ICOUNT_WOBBLE
> delta
* 2
314 && icount_time_shift
< MAX_ICOUNT_SHIFT
) {
315 /* The guest is getting too far behind. Speed time up. */
319 timers_state
.qemu_icount_bias
= cur_icount
320 - (timers_state
.qemu_icount
<< icount_time_shift
);
321 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
324 static void icount_adjust_rt(void *opaque
)
326 timer_mod(icount_rt_timer
,
327 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT
) + 1000);
331 static void icount_adjust_vm(void *opaque
)
333 timer_mod(icount_vm_timer
,
334 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
335 get_ticks_per_sec() / 10);
339 static int64_t qemu_icount_round(int64_t count
)
341 return (count
+ (1 << icount_time_shift
) - 1) >> icount_time_shift
;
344 static void icount_warp_rt(void *opaque
)
346 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
347 * changes from -1 to another value, so the race here is okay.
349 if (atomic_read(&vm_clock_warp_start
) == -1) {
353 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
354 if (runstate_is_running()) {
355 int64_t clock
= cpu_get_clock_locked();
358 warp_delta
= clock
- vm_clock_warp_start
;
359 if (use_icount
== 2) {
361 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
362 * far ahead of real time.
364 int64_t cur_icount
= cpu_get_icount_locked();
365 int64_t delta
= clock
- cur_icount
;
366 warp_delta
= MIN(warp_delta
, delta
);
368 timers_state
.qemu_icount_bias
+= warp_delta
;
370 vm_clock_warp_start
= -1;
371 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
373 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL
)) {
374 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
378 void qtest_clock_warp(int64_t dest
)
380 int64_t clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
381 assert(qtest_enabled());
382 while (clock
< dest
) {
383 int64_t deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
384 int64_t warp
= qemu_soonest_timeout(dest
- clock
, deadline
);
385 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
386 timers_state
.qemu_icount_bias
+= warp
;
387 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
389 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL
);
390 clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
392 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
395 void qemu_clock_warp(QEMUClockType type
)
401 * There are too many global variables to make the "warp" behavior
402 * applicable to other clocks. But a clock argument removes the
403 * need for if statements all over the place.
405 if (type
!= QEMU_CLOCK_VIRTUAL
|| !use_icount
) {
410 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
411 * This ensures that the deadline for the timer is computed correctly below.
412 * This also makes sure that the insn counter is synchronized before the
413 * CPU starts running, in case the CPU is woken by an event other than
414 * the earliest QEMU_CLOCK_VIRTUAL timer.
416 icount_warp_rt(NULL
);
417 timer_del(icount_warp_timer
);
418 if (!all_cpu_threads_idle()) {
422 if (qtest_enabled()) {
423 /* When testing, qtest commands advance icount. */
427 /* We want to use the earliest deadline from ALL vm_clocks */
428 clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
429 deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
436 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
437 * sleep. Otherwise, the CPU might be waiting for a future timer
438 * interrupt to wake it up, but the interrupt never comes because
439 * the vCPU isn't running any insns and thus doesn't advance the
440 * QEMU_CLOCK_VIRTUAL.
442 * An extreme solution for this problem would be to never let VCPUs
443 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
444 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
445 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
446 * after some "real" time, (related to the time left until the next
447 * event) has passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
448 * This avoids that the warps are visible externally; for example,
449 * you will not be sending network packets continuously instead of
452 seqlock_write_lock(&timers_state
.vm_clock_seqlock
);
453 if (vm_clock_warp_start
== -1 || vm_clock_warp_start
> clock
) {
454 vm_clock_warp_start
= clock
;
456 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
);
457 timer_mod_anticipate(icount_warp_timer
, clock
+ deadline
);
458 } else if (deadline
== 0) {
459 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
463 static bool icount_state_needed(void *opaque
)
469 * This is a subsection for icount migration.
471 static const VMStateDescription icount_vmstate_timers
= {
472 .name
= "timer/icount",
474 .minimum_version_id
= 1,
475 .fields
= (VMStateField
[]) {
476 VMSTATE_INT64(qemu_icount_bias
, TimersState
),
477 VMSTATE_INT64(qemu_icount
, TimersState
),
478 VMSTATE_END_OF_LIST()
482 static const VMStateDescription vmstate_timers
= {
485 .minimum_version_id
= 1,
486 .fields
= (VMStateField
[]) {
487 VMSTATE_INT64(cpu_ticks_offset
, TimersState
),
488 VMSTATE_INT64(dummy
, TimersState
),
489 VMSTATE_INT64_V(cpu_clock_offset
, TimersState
, 2),
490 VMSTATE_END_OF_LIST()
492 .subsections
= (VMStateSubsection
[]) {
494 .vmsd
= &icount_vmstate_timers
,
495 .needed
= icount_state_needed
,
502 void cpu_ticks_init(void)
504 seqlock_init(&timers_state
.vm_clock_seqlock
, NULL
);
505 vmstate_register(NULL
, 0, &vmstate_timers
, &timers_state
);
508 void configure_icount(QemuOpts
*opts
, Error
**errp
)
511 char *rem_str
= NULL
;
513 option
= qemu_opt_get(opts
, "shift");
515 if (qemu_opt_get(opts
, "align") != NULL
) {
516 error_setg(errp
, "Please specify shift option when using align");
520 icount_align_option
= qemu_opt_get_bool(opts
, "align", false);
521 icount_warp_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL_RT
,
522 icount_warp_rt
, NULL
);
523 if (strcmp(option
, "auto") != 0) {
525 icount_time_shift
= strtol(option
, &rem_str
, 0);
526 if (errno
!= 0 || *rem_str
!= '\0' || !strlen(option
)) {
527 error_setg(errp
, "icount: Invalid shift value");
531 } else if (icount_align_option
) {
532 error_setg(errp
, "shift=auto and align=on are incompatible");
537 /* 125MIPS seems a reasonable initial guess at the guest speed.
538 It will be corrected fairly quickly anyway. */
539 icount_time_shift
= 3;
541 /* Have both realtime and virtual time triggers for speed adjustment.
542 The realtime trigger catches emulated time passing too slowly,
543 the virtual time trigger catches emulated time passing too fast.
544 Realtime triggers occur even when idle, so use them less frequently
546 icount_rt_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL_RT
,
547 icount_adjust_rt
, NULL
);
548 timer_mod(icount_rt_timer
,
549 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT
) + 1000);
550 icount_vm_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
551 icount_adjust_vm
, NULL
);
552 timer_mod(icount_vm_timer
,
553 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
554 get_ticks_per_sec() / 10);
557 /***********************************************************/
558 void hw_error(const char *fmt
, ...)
564 fprintf(stderr
, "qemu: hardware error: ");
565 vfprintf(stderr
, fmt
, ap
);
566 fprintf(stderr
, "\n");
568 fprintf(stderr
, "CPU #%d:\n", cpu
->cpu_index
);
569 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
);
575 void cpu_synchronize_all_states(void)
580 cpu_synchronize_state(cpu
);
584 void cpu_synchronize_all_post_reset(void)
589 cpu_synchronize_post_reset(cpu
);
593 void cpu_synchronize_all_post_init(void)
598 cpu_synchronize_post_init(cpu
);
602 void cpu_clean_all_dirty(void)
607 cpu_clean_state(cpu
);
611 static int do_vm_stop(RunState state
)
615 if (runstate_is_running()) {
619 vm_state_notify(0, state
);
620 qapi_event_send_stop(&error_abort
);
624 ret
= bdrv_flush_all();
629 static bool cpu_can_run(CPUState
*cpu
)
634 if (cpu_is_stopped(cpu
)) {
640 static void cpu_handle_guest_debug(CPUState
*cpu
)
642 gdb_set_stop_cpu(cpu
);
643 qemu_system_debug_request();
647 static void cpu_signal(int sig
)
650 cpu_exit(current_cpu
);
656 static void sigbus_reraise(void)
659 struct sigaction action
;
661 memset(&action
, 0, sizeof(action
));
662 action
.sa_handler
= SIG_DFL
;
663 if (!sigaction(SIGBUS
, &action
, NULL
)) {
666 sigaddset(&set
, SIGBUS
);
667 sigprocmask(SIG_UNBLOCK
, &set
, NULL
);
669 perror("Failed to re-raise SIGBUS!\n");
673 static void sigbus_handler(int n
, struct qemu_signalfd_siginfo
*siginfo
,
676 if (kvm_on_sigbus(siginfo
->ssi_code
,
677 (void *)(intptr_t)siginfo
->ssi_addr
)) {
682 static void qemu_init_sigbus(void)
684 struct sigaction action
;
686 memset(&action
, 0, sizeof(action
));
687 action
.sa_flags
= SA_SIGINFO
;
688 action
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))sigbus_handler
;
689 sigaction(SIGBUS
, &action
, NULL
);
691 prctl(PR_MCE_KILL
, PR_MCE_KILL_SET
, PR_MCE_KILL_EARLY
, 0, 0);
694 static void qemu_kvm_eat_signals(CPUState
*cpu
)
696 struct timespec ts
= { 0, 0 };
702 sigemptyset(&waitset
);
703 sigaddset(&waitset
, SIG_IPI
);
704 sigaddset(&waitset
, SIGBUS
);
707 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
708 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
709 perror("sigtimedwait");
715 if (kvm_on_sigbus_vcpu(cpu
, siginfo
.si_code
, siginfo
.si_addr
)) {
723 r
= sigpending(&chkset
);
725 perror("sigpending");
728 } while (sigismember(&chkset
, SIG_IPI
) || sigismember(&chkset
, SIGBUS
));
731 #else /* !CONFIG_LINUX */
733 static void qemu_init_sigbus(void)
737 static void qemu_kvm_eat_signals(CPUState
*cpu
)
740 #endif /* !CONFIG_LINUX */
743 static void dummy_signal(int sig
)
747 static void qemu_kvm_init_cpu_signals(CPUState
*cpu
)
751 struct sigaction sigact
;
753 memset(&sigact
, 0, sizeof(sigact
));
754 sigact
.sa_handler
= dummy_signal
;
755 sigaction(SIG_IPI
, &sigact
, NULL
);
757 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
758 sigdelset(&set
, SIG_IPI
);
759 sigdelset(&set
, SIGBUS
);
760 r
= kvm_set_signal_mask(cpu
, &set
);
762 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
767 static void qemu_tcg_init_cpu_signals(void)
770 struct sigaction sigact
;
772 memset(&sigact
, 0, sizeof(sigact
));
773 sigact
.sa_handler
= cpu_signal
;
774 sigaction(SIG_IPI
, &sigact
, NULL
);
777 sigaddset(&set
, SIG_IPI
);
778 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
782 static void qemu_kvm_init_cpu_signals(CPUState
*cpu
)
787 static void qemu_tcg_init_cpu_signals(void)
792 static QemuMutex qemu_global_mutex
;
793 static QemuCond qemu_io_proceeded_cond
;
794 static bool iothread_requesting_mutex
;
796 static QemuThread io_thread
;
798 static QemuThread
*tcg_cpu_thread
;
799 static QemuCond
*tcg_halt_cond
;
802 static QemuCond qemu_cpu_cond
;
804 static QemuCond qemu_pause_cond
;
805 static QemuCond qemu_work_cond
;
807 void qemu_init_cpu_loop(void)
810 qemu_cond_init(&qemu_cpu_cond
);
811 qemu_cond_init(&qemu_pause_cond
);
812 qemu_cond_init(&qemu_work_cond
);
813 qemu_cond_init(&qemu_io_proceeded_cond
);
814 qemu_mutex_init(&qemu_global_mutex
);
816 qemu_thread_get_self(&io_thread
);
819 void run_on_cpu(CPUState
*cpu
, void (*func
)(void *data
), void *data
)
821 struct qemu_work_item wi
;
823 if (qemu_cpu_is_self(cpu
)) {
831 if (cpu
->queued_work_first
== NULL
) {
832 cpu
->queued_work_first
= &wi
;
834 cpu
->queued_work_last
->next
= &wi
;
836 cpu
->queued_work_last
= &wi
;
842 CPUState
*self_cpu
= current_cpu
;
844 qemu_cond_wait(&qemu_work_cond
, &qemu_global_mutex
);
845 current_cpu
= self_cpu
;
849 void async_run_on_cpu(CPUState
*cpu
, void (*func
)(void *data
), void *data
)
851 struct qemu_work_item
*wi
;
853 if (qemu_cpu_is_self(cpu
)) {
858 wi
= g_malloc0(sizeof(struct qemu_work_item
));
862 if (cpu
->queued_work_first
== NULL
) {
863 cpu
->queued_work_first
= wi
;
865 cpu
->queued_work_last
->next
= wi
;
867 cpu
->queued_work_last
= wi
;
874 static void flush_queued_work(CPUState
*cpu
)
876 struct qemu_work_item
*wi
;
878 if (cpu
->queued_work_first
== NULL
) {
882 while ((wi
= cpu
->queued_work_first
)) {
883 cpu
->queued_work_first
= wi
->next
;
890 cpu
->queued_work_last
= NULL
;
891 qemu_cond_broadcast(&qemu_work_cond
);
894 static void qemu_wait_io_event_common(CPUState
*cpu
)
899 qemu_cond_signal(&qemu_pause_cond
);
901 flush_queued_work(cpu
);
902 cpu
->thread_kicked
= false;
905 static void qemu_tcg_wait_io_event(void)
909 while (all_cpu_threads_idle()) {
910 /* Start accounting real time to the virtual clock if the CPUs
912 qemu_clock_warp(QEMU_CLOCK_VIRTUAL
);
913 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
916 while (iothread_requesting_mutex
) {
917 qemu_cond_wait(&qemu_io_proceeded_cond
, &qemu_global_mutex
);
921 qemu_wait_io_event_common(cpu
);
925 static void qemu_kvm_wait_io_event(CPUState
*cpu
)
927 while (cpu_thread_is_idle(cpu
)) {
928 qemu_cond_wait(cpu
->halt_cond
, &qemu_global_mutex
);
931 qemu_kvm_eat_signals(cpu
);
932 qemu_wait_io_event_common(cpu
);
935 static void *qemu_kvm_cpu_thread_fn(void *arg
)
940 qemu_mutex_lock(&qemu_global_mutex
);
941 qemu_thread_get_self(cpu
->thread
);
942 cpu
->thread_id
= qemu_get_thread_id();
946 r
= kvm_init_vcpu(cpu
);
948 fprintf(stderr
, "kvm_init_vcpu failed: %s\n", strerror(-r
));
952 qemu_kvm_init_cpu_signals(cpu
);
954 /* signal CPU creation */
956 qemu_cond_signal(&qemu_cpu_cond
);
959 if (cpu_can_run(cpu
)) {
960 r
= kvm_cpu_exec(cpu
);
961 if (r
== EXCP_DEBUG
) {
962 cpu_handle_guest_debug(cpu
);
965 qemu_kvm_wait_io_event(cpu
);
971 static void *qemu_dummy_cpu_thread_fn(void *arg
)
974 fprintf(stderr
, "qtest is not supported under Windows\n");
981 qemu_mutex_lock_iothread();
982 qemu_thread_get_self(cpu
->thread
);
983 cpu
->thread_id
= qemu_get_thread_id();
986 sigemptyset(&waitset
);
987 sigaddset(&waitset
, SIG_IPI
);
989 /* signal CPU creation */
991 qemu_cond_signal(&qemu_cpu_cond
);
996 qemu_mutex_unlock_iothread();
999 r
= sigwait(&waitset
, &sig
);
1000 } while (r
== -1 && (errno
== EAGAIN
|| errno
== EINTR
));
1005 qemu_mutex_lock_iothread();
1007 qemu_wait_io_event_common(cpu
);
1014 static void tcg_exec_all(void);
1016 static void *qemu_tcg_cpu_thread_fn(void *arg
)
1018 CPUState
*cpu
= arg
;
1020 qemu_tcg_init_cpu_signals();
1021 qemu_thread_get_self(cpu
->thread
);
1023 qemu_mutex_lock(&qemu_global_mutex
);
1025 cpu
->thread_id
= qemu_get_thread_id();
1026 cpu
->created
= true;
1029 qemu_cond_signal(&qemu_cpu_cond
);
1031 /* wait for initial kick-off after machine start */
1032 while (QTAILQ_FIRST(&cpus
)->stopped
) {
1033 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
1035 /* process any pending work */
1037 qemu_wait_io_event_common(cpu
);
1045 int64_t deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
1047 if (deadline
== 0) {
1048 qemu_clock_notify(QEMU_CLOCK_VIRTUAL
);
1051 qemu_tcg_wait_io_event();
1057 static void qemu_cpu_kick_thread(CPUState
*cpu
)
1062 err
= pthread_kill(cpu
->thread
->thread
, SIG_IPI
);
1064 fprintf(stderr
, "qemu:%s: %s", __func__
, strerror(err
));
1068 if (!qemu_cpu_is_self(cpu
)) {
1071 if (SuspendThread(cpu
->hThread
) == (DWORD
)-1) {
1072 fprintf(stderr
, "qemu:%s: GetLastError:%lu\n", __func__
,
1077 /* On multi-core systems, we are not sure that the thread is actually
1078 * suspended until we can get the context.
1080 tcgContext
.ContextFlags
= CONTEXT_CONTROL
;
1081 while (GetThreadContext(cpu
->hThread
, &tcgContext
) != 0) {
1087 if (ResumeThread(cpu
->hThread
) == (DWORD
)-1) {
1088 fprintf(stderr
, "qemu:%s: GetLastError:%lu\n", __func__
,
1096 void qemu_cpu_kick(CPUState
*cpu
)
1098 qemu_cond_broadcast(cpu
->halt_cond
);
1099 if (!tcg_enabled() && !cpu
->thread_kicked
) {
1100 qemu_cpu_kick_thread(cpu
);
1101 cpu
->thread_kicked
= true;
1105 void qemu_cpu_kick_self(void)
1108 assert(current_cpu
);
1110 if (!current_cpu
->thread_kicked
) {
1111 qemu_cpu_kick_thread(current_cpu
);
1112 current_cpu
->thread_kicked
= true;
1119 bool qemu_cpu_is_self(CPUState
*cpu
)
1121 return qemu_thread_is_self(cpu
->thread
);
1124 static bool qemu_in_vcpu_thread(void)
1126 return current_cpu
&& qemu_cpu_is_self(current_cpu
);
1129 void qemu_mutex_lock_iothread(void)
1131 if (!tcg_enabled()) {
1132 qemu_mutex_lock(&qemu_global_mutex
);
1134 iothread_requesting_mutex
= true;
1135 if (qemu_mutex_trylock(&qemu_global_mutex
)) {
1136 qemu_cpu_kick_thread(first_cpu
);
1137 qemu_mutex_lock(&qemu_global_mutex
);
1139 iothread_requesting_mutex
= false;
1140 qemu_cond_broadcast(&qemu_io_proceeded_cond
);
1144 void qemu_mutex_unlock_iothread(void)
1146 qemu_mutex_unlock(&qemu_global_mutex
);
1149 static int all_vcpus_paused(void)
1154 if (!cpu
->stopped
) {
1162 void pause_all_vcpus(void)
1166 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, false);
1172 if (qemu_in_vcpu_thread()) {
1174 if (!kvm_enabled()) {
1177 cpu
->stopped
= true;
1183 while (!all_vcpus_paused()) {
1184 qemu_cond_wait(&qemu_pause_cond
, &qemu_global_mutex
);
1191 void cpu_resume(CPUState
*cpu
)
1194 cpu
->stopped
= false;
1198 void resume_all_vcpus(void)
1202 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, true);
1208 /* For temporary buffers for forming a name */
1209 #define VCPU_THREAD_NAME_SIZE 16
1211 static void qemu_tcg_init_vcpu(CPUState
*cpu
)
1213 char thread_name
[VCPU_THREAD_NAME_SIZE
];
1215 tcg_cpu_address_space_init(cpu
, cpu
->as
);
1217 /* share a single thread for all cpus with TCG */
1218 if (!tcg_cpu_thread
) {
1219 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1220 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1221 qemu_cond_init(cpu
->halt_cond
);
1222 tcg_halt_cond
= cpu
->halt_cond
;
1223 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/TCG",
1225 qemu_thread_create(cpu
->thread
, thread_name
, qemu_tcg_cpu_thread_fn
,
1226 cpu
, QEMU_THREAD_JOINABLE
);
1228 cpu
->hThread
= qemu_thread_get_handle(cpu
->thread
);
1230 while (!cpu
->created
) {
1231 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1233 tcg_cpu_thread
= cpu
->thread
;
1235 cpu
->thread
= tcg_cpu_thread
;
1236 cpu
->halt_cond
= tcg_halt_cond
;
1240 static void qemu_kvm_start_vcpu(CPUState
*cpu
)
1242 char thread_name
[VCPU_THREAD_NAME_SIZE
];
1244 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1245 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1246 qemu_cond_init(cpu
->halt_cond
);
1247 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/KVM",
1249 qemu_thread_create(cpu
->thread
, thread_name
, qemu_kvm_cpu_thread_fn
,
1250 cpu
, QEMU_THREAD_JOINABLE
);
1251 while (!cpu
->created
) {
1252 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1256 static void qemu_dummy_start_vcpu(CPUState
*cpu
)
1258 char thread_name
[VCPU_THREAD_NAME_SIZE
];
1260 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
1261 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
1262 qemu_cond_init(cpu
->halt_cond
);
1263 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/DUMMY",
1265 qemu_thread_create(cpu
->thread
, thread_name
, qemu_dummy_cpu_thread_fn
, cpu
,
1266 QEMU_THREAD_JOINABLE
);
1267 while (!cpu
->created
) {
1268 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
1272 void qemu_init_vcpu(CPUState
*cpu
)
1274 cpu
->nr_cores
= smp_cores
;
1275 cpu
->nr_threads
= smp_threads
;
1276 cpu
->stopped
= true;
1277 if (kvm_enabled()) {
1278 qemu_kvm_start_vcpu(cpu
);
1279 } else if (tcg_enabled()) {
1280 qemu_tcg_init_vcpu(cpu
);
1282 qemu_dummy_start_vcpu(cpu
);
1286 void cpu_stop_current(void)
1289 current_cpu
->stop
= false;
1290 current_cpu
->stopped
= true;
1291 cpu_exit(current_cpu
);
1292 qemu_cond_signal(&qemu_pause_cond
);
1296 int vm_stop(RunState state
)
1298 if (qemu_in_vcpu_thread()) {
1299 qemu_system_vmstop_request_prepare();
1300 qemu_system_vmstop_request(state
);
1302 * FIXME: should not return to device code in case
1303 * vm_stop() has been requested.
1309 return do_vm_stop(state
);
1312 /* does a state transition even if the VM is already stopped,
1313 current state is forgotten forever */
1314 int vm_stop_force_state(RunState state
)
1316 if (runstate_is_running()) {
1317 return vm_stop(state
);
1319 runstate_set(state
);
1320 /* Make sure to return an error if the flush in a previous vm_stop()
1322 return bdrv_flush_all();
1326 static int tcg_cpu_exec(CPUArchState
*env
)
1328 CPUState
*cpu
= ENV_GET_CPU(env
);
1330 #ifdef CONFIG_PROFILER
1334 #ifdef CONFIG_PROFILER
1335 ti
= profile_getclock();
1341 timers_state
.qemu_icount
-= (cpu
->icount_decr
.u16
.low
1342 + cpu
->icount_extra
);
1343 cpu
->icount_decr
.u16
.low
= 0;
1344 cpu
->icount_extra
= 0;
1345 deadline
= qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL
);
1347 /* Maintain prior (possibly buggy) behaviour where if no deadline
1348 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1349 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1352 if ((deadline
< 0) || (deadline
> INT32_MAX
)) {
1353 deadline
= INT32_MAX
;
1356 count
= qemu_icount_round(deadline
);
1357 timers_state
.qemu_icount
+= count
;
1358 decr
= (count
> 0xffff) ? 0xffff : count
;
1360 cpu
->icount_decr
.u16
.low
= decr
;
1361 cpu
->icount_extra
= count
;
1363 ret
= cpu_exec(env
);
1364 #ifdef CONFIG_PROFILER
1365 qemu_time
+= profile_getclock() - ti
;
1368 /* Fold pending instructions back into the
1369 instruction counter, and clear the interrupt flag. */
1370 timers_state
.qemu_icount
-= (cpu
->icount_decr
.u16
.low
1371 + cpu
->icount_extra
);
1372 cpu
->icount_decr
.u32
= 0;
1373 cpu
->icount_extra
= 0;
1378 static void tcg_exec_all(void)
1382 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1383 qemu_clock_warp(QEMU_CLOCK_VIRTUAL
);
1385 if (next_cpu
== NULL
) {
1386 next_cpu
= first_cpu
;
1388 for (; next_cpu
!= NULL
&& !exit_request
; next_cpu
= CPU_NEXT(next_cpu
)) {
1389 CPUState
*cpu
= next_cpu
;
1390 CPUArchState
*env
= cpu
->env_ptr
;
1392 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
,
1393 (cpu
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
1395 if (cpu_can_run(cpu
)) {
1396 r
= tcg_cpu_exec(env
);
1397 if (r
== EXCP_DEBUG
) {
1398 cpu_handle_guest_debug(cpu
);
1401 } else if (cpu
->stop
|| cpu
->stopped
) {
1408 void list_cpus(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
1410 /* XXX: implement xxx_cpu_list for targets that still miss it */
1411 #if defined(cpu_list)
1412 cpu_list(f
, cpu_fprintf
);
1416 CpuInfoList
*qmp_query_cpus(Error
**errp
)
1418 CpuInfoList
*head
= NULL
, *cur_item
= NULL
;
1423 #if defined(TARGET_I386)
1424 X86CPU
*x86_cpu
= X86_CPU(cpu
);
1425 CPUX86State
*env
= &x86_cpu
->env
;
1426 #elif defined(TARGET_PPC)
1427 PowerPCCPU
*ppc_cpu
= POWERPC_CPU(cpu
);
1428 CPUPPCState
*env
= &ppc_cpu
->env
;
1429 #elif defined(TARGET_SPARC)
1430 SPARCCPU
*sparc_cpu
= SPARC_CPU(cpu
);
1431 CPUSPARCState
*env
= &sparc_cpu
->env
;
1432 #elif defined(TARGET_MIPS)
1433 MIPSCPU
*mips_cpu
= MIPS_CPU(cpu
);
1434 CPUMIPSState
*env
= &mips_cpu
->env
;
1435 #elif defined(TARGET_TRICORE)
1436 TriCoreCPU
*tricore_cpu
= TRICORE_CPU(cpu
);
1437 CPUTriCoreState
*env
= &tricore_cpu
->env
;
1440 cpu_synchronize_state(cpu
);
1442 info
= g_malloc0(sizeof(*info
));
1443 info
->value
= g_malloc0(sizeof(*info
->value
));
1444 info
->value
->CPU
= cpu
->cpu_index
;
1445 info
->value
->current
= (cpu
== first_cpu
);
1446 info
->value
->halted
= cpu
->halted
;
1447 info
->value
->thread_id
= cpu
->thread_id
;
1448 #if defined(TARGET_I386)
1449 info
->value
->has_pc
= true;
1450 info
->value
->pc
= env
->eip
+ env
->segs
[R_CS
].base
;
1451 #elif defined(TARGET_PPC)
1452 info
->value
->has_nip
= true;
1453 info
->value
->nip
= env
->nip
;
1454 #elif defined(TARGET_SPARC)
1455 info
->value
->has_pc
= true;
1456 info
->value
->pc
= env
->pc
;
1457 info
->value
->has_npc
= true;
1458 info
->value
->npc
= env
->npc
;
1459 #elif defined(TARGET_MIPS)
1460 info
->value
->has_PC
= true;
1461 info
->value
->PC
= env
->active_tc
.PC
;
1462 #elif defined(TARGET_TRICORE)
1463 info
->value
->has_PC
= true;
1464 info
->value
->PC
= env
->PC
;
1467 /* XXX: waiting for the qapi to support GSList */
1469 head
= cur_item
= info
;
1471 cur_item
->next
= info
;
1479 void qmp_memsave(int64_t addr
, int64_t size
, const char *filename
,
1480 bool has_cpu
, int64_t cpu_index
, Error
**errp
)
1491 cpu
= qemu_get_cpu(cpu_index
);
1493 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "cpu-index",
1498 f
= fopen(filename
, "wb");
1500 error_setg_file_open(errp
, errno
, filename
);
1508 if (cpu_memory_rw_debug(cpu
, addr
, buf
, l
, 0) != 0) {
1509 error_setg(errp
, "Invalid addr 0x%016" PRIx64
"specified", addr
);
1512 if (fwrite(buf
, 1, l
, f
) != l
) {
1513 error_set(errp
, QERR_IO_ERROR
);
1524 void qmp_pmemsave(int64_t addr
, int64_t size
, const char *filename
,
1531 f
= fopen(filename
, "wb");
1533 error_setg_file_open(errp
, errno
, filename
);
1541 cpu_physical_memory_read(addr
, buf
, l
);
1542 if (fwrite(buf
, 1, l
, f
) != l
) {
1543 error_set(errp
, QERR_IO_ERROR
);
1554 void qmp_inject_nmi(Error
**errp
)
1556 #if defined(TARGET_I386)
1560 X86CPU
*cpu
= X86_CPU(cs
);
1562 if (!cpu
->apic_state
) {
1563 cpu_interrupt(cs
, CPU_INTERRUPT_NMI
);
1565 apic_deliver_nmi(cpu
->apic_state
);
1569 nmi_monitor_handle(monitor_get_cpu_index(), errp
);
1573 void dump_drift_info(FILE *f
, fprintf_function cpu_fprintf
)
1579 cpu_fprintf(f
, "Host - Guest clock %"PRIi64
" ms\n",
1580 (cpu_get_clock() - cpu_get_icount())/SCALE_MS
);
1581 if (icount_align_option
) {
1582 cpu_fprintf(f
, "Max guest delay %"PRIi64
" ms\n", -max_delay
/SCALE_MS
);
1583 cpu_fprintf(f
, "Max guest advance %"PRIi64
" ms\n", max_advance
/SCALE_MS
);
1585 cpu_fprintf(f
, "Max guest delay NA\n");
1586 cpu_fprintf(f
, "Max guest advance NA\n");