Fix typo: buf -> bus
[qemu/ar7.git] / cpus.c
blob79a76560cb5a8379caa9701be7c78e65f7eedf17
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor.h"
29 #include "sysemu.h"
30 #include "gdbstub.h"
31 #include "dma.h"
32 #include "kvm.h"
34 #include "qemu-thread.h"
35 #include "cpus.h"
36 #include "main-loop.h"
38 #ifndef _WIN32
39 #include "compatfd.h"
40 #endif
42 #ifdef CONFIG_LINUX
44 #include <sys/prctl.h>
46 #ifndef PR_MCE_KILL
47 #define PR_MCE_KILL 33
48 #endif
50 #ifndef PR_MCE_KILL_SET
51 #define PR_MCE_KILL_SET 1
52 #endif
54 #ifndef PR_MCE_KILL_EARLY
55 #define PR_MCE_KILL_EARLY 1
56 #endif
58 #endif /* CONFIG_LINUX */
60 static CPUState *next_cpu;
62 /***********************************************************/
63 /* guest cycle counter */
65 /* Conversion factor from emulated instructions to virtual clock ticks. */
66 static int icount_time_shift;
67 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
68 #define MAX_ICOUNT_SHIFT 10
69 /* Compensate for varying guest execution speed. */
70 static int64_t qemu_icount_bias;
71 static QEMUTimer *icount_rt_timer;
72 static QEMUTimer *icount_vm_timer;
73 static QEMUTimer *icount_warp_timer;
74 static int64_t vm_clock_warp_start;
75 static int64_t qemu_icount;
77 typedef struct TimersState {
78 int64_t cpu_ticks_prev;
79 int64_t cpu_ticks_offset;
80 int64_t cpu_clock_offset;
81 int32_t cpu_ticks_enabled;
82 int64_t dummy;
83 } TimersState;
85 TimersState timers_state;
87 /* Return the virtual CPU time, based on the instruction counter. */
88 int64_t cpu_get_icount(void)
90 int64_t icount;
91 CPUState *env = cpu_single_env;;
93 icount = qemu_icount;
94 if (env) {
95 if (!can_do_io(env)) {
96 fprintf(stderr, "Bad clock read\n");
98 icount -= (env->icount_decr.u16.low + env->icount_extra);
100 return qemu_icount_bias + (icount << icount_time_shift);
103 /* return the host CPU cycle counter and handle stop/restart */
104 int64_t cpu_get_ticks(void)
106 if (use_icount) {
107 return cpu_get_icount();
109 if (!timers_state.cpu_ticks_enabled) {
110 return timers_state.cpu_ticks_offset;
111 } else {
112 int64_t ticks;
113 ticks = cpu_get_real_ticks();
114 if (timers_state.cpu_ticks_prev > ticks) {
115 /* Note: non increasing ticks may happen if the host uses
116 software suspend */
117 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
119 timers_state.cpu_ticks_prev = ticks;
120 return ticks + timers_state.cpu_ticks_offset;
124 /* return the host CPU monotonic timer and handle stop/restart */
125 int64_t cpu_get_clock(void)
127 int64_t ti;
128 if (!timers_state.cpu_ticks_enabled) {
129 return timers_state.cpu_clock_offset;
130 } else {
131 ti = get_clock();
132 return ti + timers_state.cpu_clock_offset;
136 /* enable cpu_get_ticks() */
137 void cpu_enable_ticks(void)
139 if (!timers_state.cpu_ticks_enabled) {
140 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
141 timers_state.cpu_clock_offset -= get_clock();
142 timers_state.cpu_ticks_enabled = 1;
146 /* disable cpu_get_ticks() : the clock is stopped. You must not call
147 cpu_get_ticks() after that. */
148 void cpu_disable_ticks(void)
150 if (timers_state.cpu_ticks_enabled) {
151 timers_state.cpu_ticks_offset = cpu_get_ticks();
152 timers_state.cpu_clock_offset = cpu_get_clock();
153 timers_state.cpu_ticks_enabled = 0;
157 /* Correlation between real and virtual time is always going to be
158 fairly approximate, so ignore small variation.
159 When the guest is idle real and virtual time will be aligned in
160 the IO wait loop. */
161 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
163 static void icount_adjust(void)
165 int64_t cur_time;
166 int64_t cur_icount;
167 int64_t delta;
168 static int64_t last_delta;
169 /* If the VM is not running, then do nothing. */
170 if (!runstate_is_running()) {
171 return;
173 cur_time = cpu_get_clock();
174 cur_icount = qemu_get_clock_ns(vm_clock);
175 delta = cur_icount - cur_time;
176 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
177 if (delta > 0
178 && last_delta + ICOUNT_WOBBLE < delta * 2
179 && icount_time_shift > 0) {
180 /* The guest is getting too far ahead. Slow time down. */
181 icount_time_shift--;
183 if (delta < 0
184 && last_delta - ICOUNT_WOBBLE > delta * 2
185 && icount_time_shift < MAX_ICOUNT_SHIFT) {
186 /* The guest is getting too far behind. Speed time up. */
187 icount_time_shift++;
189 last_delta = delta;
190 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
193 static void icount_adjust_rt(void *opaque)
195 qemu_mod_timer(icount_rt_timer,
196 qemu_get_clock_ms(rt_clock) + 1000);
197 icount_adjust();
200 static void icount_adjust_vm(void *opaque)
202 qemu_mod_timer(icount_vm_timer,
203 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
204 icount_adjust();
207 static int64_t qemu_icount_round(int64_t count)
209 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
212 static void icount_warp_rt(void *opaque)
214 if (vm_clock_warp_start == -1) {
215 return;
218 if (runstate_is_running()) {
219 int64_t clock = qemu_get_clock_ns(rt_clock);
220 int64_t warp_delta = clock - vm_clock_warp_start;
221 if (use_icount == 1) {
222 qemu_icount_bias += warp_delta;
223 } else {
225 * In adaptive mode, do not let the vm_clock run too
226 * far ahead of real time.
228 int64_t cur_time = cpu_get_clock();
229 int64_t cur_icount = qemu_get_clock_ns(vm_clock);
230 int64_t delta = cur_time - cur_icount;
231 qemu_icount_bias += MIN(warp_delta, delta);
233 if (qemu_clock_expired(vm_clock)) {
234 qemu_notify_event();
237 vm_clock_warp_start = -1;
240 void qemu_clock_warp(QEMUClock *clock)
242 int64_t deadline;
245 * There are too many global variables to make the "warp" behavior
246 * applicable to other clocks. But a clock argument removes the
247 * need for if statements all over the place.
249 if (clock != vm_clock || !use_icount) {
250 return;
254 * If the CPUs have been sleeping, advance the vm_clock timer now. This
255 * ensures that the deadline for the timer is computed correctly below.
256 * This also makes sure that the insn counter is synchronized before the
257 * CPU starts running, in case the CPU is woken by an event other than
258 * the earliest vm_clock timer.
260 icount_warp_rt(NULL);
261 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
262 qemu_del_timer(icount_warp_timer);
263 return;
266 vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
267 deadline = qemu_clock_deadline(vm_clock);
268 if (deadline > 0) {
270 * Ensure the vm_clock proceeds even when the virtual CPU goes to
271 * sleep. Otherwise, the CPU might be waiting for a future timer
272 * interrupt to wake it up, but the interrupt never comes because
273 * the vCPU isn't running any insns and thus doesn't advance the
274 * vm_clock.
276 * An extreme solution for this problem would be to never let VCPUs
277 * sleep in icount mode if there is a pending vm_clock timer; rather
278 * time could just advance to the next vm_clock event. Instead, we
279 * do stop VCPUs and only advance vm_clock after some "real" time,
280 * (related to the time left until the next event) has passed. This
281 * rt_clock timer will do this. This avoids that the warps are too
282 * visible externally---for example, you will not be sending network
283 * packets continously instead of every 100ms.
285 qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
286 } else {
287 qemu_notify_event();
291 static const VMStateDescription vmstate_timers = {
292 .name = "timer",
293 .version_id = 2,
294 .minimum_version_id = 1,
295 .minimum_version_id_old = 1,
296 .fields = (VMStateField[]) {
297 VMSTATE_INT64(cpu_ticks_offset, TimersState),
298 VMSTATE_INT64(dummy, TimersState),
299 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
300 VMSTATE_END_OF_LIST()
304 void configure_icount(const char *option)
306 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
307 if (!option) {
308 return;
311 icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
312 if (strcmp(option, "auto") != 0) {
313 icount_time_shift = strtol(option, NULL, 0);
314 use_icount = 1;
315 return;
318 use_icount = 2;
320 /* 125MIPS seems a reasonable initial guess at the guest speed.
321 It will be corrected fairly quickly anyway. */
322 icount_time_shift = 3;
324 /* Have both realtime and virtual time triggers for speed adjustment.
325 The realtime trigger catches emulated time passing too slowly,
326 the virtual time trigger catches emulated time passing too fast.
327 Realtime triggers occur even when idle, so use them less frequently
328 than VM triggers. */
329 icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
330 qemu_mod_timer(icount_rt_timer,
331 qemu_get_clock_ms(rt_clock) + 1000);
332 icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
333 qemu_mod_timer(icount_vm_timer,
334 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
337 /***********************************************************/
338 void hw_error(const char *fmt, ...)
340 va_list ap;
341 CPUState *env;
343 va_start(ap, fmt);
344 fprintf(stderr, "qemu: hardware error: ");
345 vfprintf(stderr, fmt, ap);
346 fprintf(stderr, "\n");
347 for(env = first_cpu; env != NULL; env = env->next_cpu) {
348 fprintf(stderr, "CPU #%d:\n", env->cpu_index);
349 #ifdef TARGET_I386
350 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
351 #else
352 cpu_dump_state(env, stderr, fprintf, 0);
353 #endif
355 va_end(ap);
356 abort();
359 void cpu_synchronize_all_states(void)
361 CPUState *cpu;
363 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
364 cpu_synchronize_state(cpu);
368 void cpu_synchronize_all_post_reset(void)
370 CPUState *cpu;
372 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
373 cpu_synchronize_post_reset(cpu);
377 void cpu_synchronize_all_post_init(void)
379 CPUState *cpu;
381 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
382 cpu_synchronize_post_init(cpu);
386 int cpu_is_stopped(CPUState *env)
388 return !runstate_is_running() || env->stopped;
391 static void do_vm_stop(RunState state)
393 if (runstate_is_running()) {
394 cpu_disable_ticks();
395 pause_all_vcpus();
396 runstate_set(state);
397 vm_state_notify(0, state);
398 qemu_aio_flush();
399 bdrv_flush_all();
400 monitor_protocol_event(QEVENT_STOP, NULL);
404 static int cpu_can_run(CPUState *env)
406 if (env->stop) {
407 return 0;
409 if (env->stopped || !runstate_is_running()) {
410 return 0;
412 return 1;
415 static bool cpu_thread_is_idle(CPUState *env)
417 if (env->stop || env->queued_work_first) {
418 return false;
420 if (env->stopped || !runstate_is_running()) {
421 return true;
423 if (!env->halted || qemu_cpu_has_work(env) ||
424 (kvm_enabled() && kvm_irqchip_in_kernel())) {
425 return false;
427 return true;
430 bool all_cpu_threads_idle(void)
432 CPUState *env;
434 for (env = first_cpu; env != NULL; env = env->next_cpu) {
435 if (!cpu_thread_is_idle(env)) {
436 return false;
439 return true;
442 static void cpu_handle_guest_debug(CPUState *env)
444 gdb_set_stop_cpu(env);
445 qemu_system_debug_request();
446 env->stopped = 1;
449 static void cpu_signal(int sig)
451 if (cpu_single_env) {
452 cpu_exit(cpu_single_env);
454 exit_request = 1;
457 #ifdef CONFIG_LINUX
458 static void sigbus_reraise(void)
460 sigset_t set;
461 struct sigaction action;
463 memset(&action, 0, sizeof(action));
464 action.sa_handler = SIG_DFL;
465 if (!sigaction(SIGBUS, &action, NULL)) {
466 raise(SIGBUS);
467 sigemptyset(&set);
468 sigaddset(&set, SIGBUS);
469 sigprocmask(SIG_UNBLOCK, &set, NULL);
471 perror("Failed to re-raise SIGBUS!\n");
472 abort();
475 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
476 void *ctx)
478 if (kvm_on_sigbus(siginfo->ssi_code,
479 (void *)(intptr_t)siginfo->ssi_addr)) {
480 sigbus_reraise();
484 static void qemu_init_sigbus(void)
486 struct sigaction action;
488 memset(&action, 0, sizeof(action));
489 action.sa_flags = SA_SIGINFO;
490 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
491 sigaction(SIGBUS, &action, NULL);
493 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
496 static void qemu_kvm_eat_signals(CPUState *env)
498 struct timespec ts = { 0, 0 };
499 siginfo_t siginfo;
500 sigset_t waitset;
501 sigset_t chkset;
502 int r;
504 sigemptyset(&waitset);
505 sigaddset(&waitset, SIG_IPI);
506 sigaddset(&waitset, SIGBUS);
508 do {
509 r = sigtimedwait(&waitset, &siginfo, &ts);
510 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
511 perror("sigtimedwait");
512 exit(1);
515 switch (r) {
516 case SIGBUS:
517 if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
518 sigbus_reraise();
520 break;
521 default:
522 break;
525 r = sigpending(&chkset);
526 if (r == -1) {
527 perror("sigpending");
528 exit(1);
530 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
533 #else /* !CONFIG_LINUX */
535 static void qemu_init_sigbus(void)
539 static void qemu_kvm_eat_signals(CPUState *env)
542 #endif /* !CONFIG_LINUX */
544 #ifndef _WIN32
545 static void dummy_signal(int sig)
549 static void qemu_kvm_init_cpu_signals(CPUState *env)
551 int r;
552 sigset_t set;
553 struct sigaction sigact;
555 memset(&sigact, 0, sizeof(sigact));
556 sigact.sa_handler = dummy_signal;
557 sigaction(SIG_IPI, &sigact, NULL);
559 pthread_sigmask(SIG_BLOCK, NULL, &set);
560 sigdelset(&set, SIG_IPI);
561 sigdelset(&set, SIGBUS);
562 r = kvm_set_signal_mask(env, &set);
563 if (r) {
564 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
565 exit(1);
568 sigdelset(&set, SIG_IPI);
569 sigdelset(&set, SIGBUS);
570 r = kvm_set_signal_mask(env, &set);
571 if (r) {
572 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
573 exit(1);
577 static void qemu_tcg_init_cpu_signals(void)
579 sigset_t set;
580 struct sigaction sigact;
582 memset(&sigact, 0, sizeof(sigact));
583 sigact.sa_handler = cpu_signal;
584 sigaction(SIG_IPI, &sigact, NULL);
586 sigemptyset(&set);
587 sigaddset(&set, SIG_IPI);
588 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
591 #else /* _WIN32 */
592 static void qemu_kvm_init_cpu_signals(CPUState *env)
594 abort();
597 static void qemu_tcg_init_cpu_signals(void)
600 #endif /* _WIN32 */
602 QemuMutex qemu_global_mutex;
603 static QemuCond qemu_io_proceeded_cond;
604 static bool iothread_requesting_mutex;
606 static QemuThread io_thread;
608 static QemuThread *tcg_cpu_thread;
609 static QemuCond *tcg_halt_cond;
611 /* cpu creation */
612 static QemuCond qemu_cpu_cond;
613 /* system init */
614 static QemuCond qemu_pause_cond;
615 static QemuCond qemu_work_cond;
617 void qemu_init_cpu_loop(void)
619 qemu_init_sigbus();
620 qemu_cond_init(&qemu_cpu_cond);
621 qemu_cond_init(&qemu_pause_cond);
622 qemu_cond_init(&qemu_work_cond);
623 qemu_cond_init(&qemu_io_proceeded_cond);
624 qemu_mutex_init(&qemu_global_mutex);
626 qemu_thread_get_self(&io_thread);
629 void run_on_cpu(CPUState *env, void (*func)(void *data), void *data)
631 struct qemu_work_item wi;
633 if (qemu_cpu_is_self(env)) {
634 func(data);
635 return;
638 wi.func = func;
639 wi.data = data;
640 if (!env->queued_work_first) {
641 env->queued_work_first = &wi;
642 } else {
643 env->queued_work_last->next = &wi;
645 env->queued_work_last = &wi;
646 wi.next = NULL;
647 wi.done = false;
649 qemu_cpu_kick(env);
650 while (!wi.done) {
651 CPUState *self_env = cpu_single_env;
653 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
654 cpu_single_env = self_env;
658 static void flush_queued_work(CPUState *env)
660 struct qemu_work_item *wi;
662 if (!env->queued_work_first) {
663 return;
666 while ((wi = env->queued_work_first)) {
667 env->queued_work_first = wi->next;
668 wi->func(wi->data);
669 wi->done = true;
671 env->queued_work_last = NULL;
672 qemu_cond_broadcast(&qemu_work_cond);
675 static void qemu_wait_io_event_common(CPUState *env)
677 if (env->stop) {
678 env->stop = 0;
679 env->stopped = 1;
680 qemu_cond_signal(&qemu_pause_cond);
682 flush_queued_work(env);
683 env->thread_kicked = false;
686 static void qemu_tcg_wait_io_event(void)
688 CPUState *env;
690 while (all_cpu_threads_idle()) {
691 /* Start accounting real time to the virtual clock if the CPUs
692 are idle. */
693 qemu_clock_warp(vm_clock);
694 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
697 while (iothread_requesting_mutex) {
698 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
701 for (env = first_cpu; env != NULL; env = env->next_cpu) {
702 qemu_wait_io_event_common(env);
706 static void qemu_kvm_wait_io_event(CPUState *env)
708 while (cpu_thread_is_idle(env)) {
709 qemu_cond_wait(env->halt_cond, &qemu_global_mutex);
712 qemu_kvm_eat_signals(env);
713 qemu_wait_io_event_common(env);
716 static void *qemu_kvm_cpu_thread_fn(void *arg)
718 CPUState *env = arg;
719 int r;
721 qemu_mutex_lock(&qemu_global_mutex);
722 qemu_thread_get_self(env->thread);
723 env->thread_id = qemu_get_thread_id();
725 r = kvm_init_vcpu(env);
726 if (r < 0) {
727 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
728 exit(1);
731 qemu_kvm_init_cpu_signals(env);
733 /* signal CPU creation */
734 env->created = 1;
735 qemu_cond_signal(&qemu_cpu_cond);
737 while (1) {
738 if (cpu_can_run(env)) {
739 r = kvm_cpu_exec(env);
740 if (r == EXCP_DEBUG) {
741 cpu_handle_guest_debug(env);
744 qemu_kvm_wait_io_event(env);
747 return NULL;
750 static void *qemu_tcg_cpu_thread_fn(void *arg)
752 CPUState *env = arg;
754 qemu_tcg_init_cpu_signals();
755 qemu_thread_get_self(env->thread);
757 /* signal CPU creation */
758 qemu_mutex_lock(&qemu_global_mutex);
759 for (env = first_cpu; env != NULL; env = env->next_cpu) {
760 env->thread_id = qemu_get_thread_id();
761 env->created = 1;
763 qemu_cond_signal(&qemu_cpu_cond);
765 /* wait for initial kick-off after machine start */
766 while (first_cpu->stopped) {
767 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
770 while (1) {
771 cpu_exec_all();
772 if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
773 qemu_notify_event();
775 qemu_tcg_wait_io_event();
778 return NULL;
781 static void qemu_cpu_kick_thread(CPUState *env)
783 #ifndef _WIN32
784 int err;
786 err = pthread_kill(env->thread->thread, SIG_IPI);
787 if (err) {
788 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
789 exit(1);
791 #else /* _WIN32 */
792 if (!qemu_cpu_is_self(env)) {
793 SuspendThread(env->thread->thread);
794 cpu_signal(0);
795 ResumeThread(env->thread->thread);
797 #endif
800 void qemu_cpu_kick(void *_env)
802 CPUState *env = _env;
804 qemu_cond_broadcast(env->halt_cond);
805 if (kvm_enabled() && !env->thread_kicked) {
806 qemu_cpu_kick_thread(env);
807 env->thread_kicked = true;
811 void qemu_cpu_kick_self(void)
813 #ifndef _WIN32
814 assert(cpu_single_env);
816 if (!cpu_single_env->thread_kicked) {
817 qemu_cpu_kick_thread(cpu_single_env);
818 cpu_single_env->thread_kicked = true;
820 #else
821 abort();
822 #endif
825 int qemu_cpu_is_self(void *_env)
827 CPUState *env = _env;
829 return qemu_thread_is_self(env->thread);
832 void qemu_mutex_lock_iothread(void)
834 if (kvm_enabled()) {
835 qemu_mutex_lock(&qemu_global_mutex);
836 } else {
837 iothread_requesting_mutex = true;
838 if (qemu_mutex_trylock(&qemu_global_mutex)) {
839 qemu_cpu_kick_thread(first_cpu);
840 qemu_mutex_lock(&qemu_global_mutex);
842 iothread_requesting_mutex = false;
843 qemu_cond_broadcast(&qemu_io_proceeded_cond);
847 void qemu_mutex_unlock_iothread(void)
849 qemu_mutex_unlock(&qemu_global_mutex);
852 static int all_vcpus_paused(void)
854 CPUState *penv = first_cpu;
856 while (penv) {
857 if (!penv->stopped) {
858 return 0;
860 penv = (CPUState *)penv->next_cpu;
863 return 1;
866 void pause_all_vcpus(void)
868 CPUState *penv = first_cpu;
870 qemu_clock_enable(vm_clock, false);
871 while (penv) {
872 penv->stop = 1;
873 qemu_cpu_kick(penv);
874 penv = (CPUState *)penv->next_cpu;
877 while (!all_vcpus_paused()) {
878 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
879 penv = first_cpu;
880 while (penv) {
881 qemu_cpu_kick(penv);
882 penv = (CPUState *)penv->next_cpu;
887 void resume_all_vcpus(void)
889 CPUState *penv = first_cpu;
891 while (penv) {
892 penv->stop = 0;
893 penv->stopped = 0;
894 qemu_cpu_kick(penv);
895 penv = (CPUState *)penv->next_cpu;
899 static void qemu_tcg_init_vcpu(void *_env)
901 CPUState *env = _env;
903 /* share a single thread for all cpus with TCG */
904 if (!tcg_cpu_thread) {
905 env->thread = g_malloc0(sizeof(QemuThread));
906 env->halt_cond = g_malloc0(sizeof(QemuCond));
907 qemu_cond_init(env->halt_cond);
908 tcg_halt_cond = env->halt_cond;
909 qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env);
910 while (env->created == 0) {
911 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
913 tcg_cpu_thread = env->thread;
914 } else {
915 env->thread = tcg_cpu_thread;
916 env->halt_cond = tcg_halt_cond;
920 static void qemu_kvm_start_vcpu(CPUState *env)
922 env->thread = g_malloc0(sizeof(QemuThread));
923 env->halt_cond = g_malloc0(sizeof(QemuCond));
924 qemu_cond_init(env->halt_cond);
925 qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env);
926 while (env->created == 0) {
927 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
931 void qemu_init_vcpu(void *_env)
933 CPUState *env = _env;
935 env->nr_cores = smp_cores;
936 env->nr_threads = smp_threads;
937 env->stopped = 1;
938 if (kvm_enabled()) {
939 qemu_kvm_start_vcpu(env);
940 } else {
941 qemu_tcg_init_vcpu(env);
945 void cpu_stop_current(void)
947 if (cpu_single_env) {
948 cpu_single_env->stop = 0;
949 cpu_single_env->stopped = 1;
950 cpu_exit(cpu_single_env);
951 qemu_cond_signal(&qemu_pause_cond);
955 void vm_stop(RunState state)
957 if (!qemu_thread_is_self(&io_thread)) {
958 qemu_system_vmstop_request(state);
960 * FIXME: should not return to device code in case
961 * vm_stop() has been requested.
963 cpu_stop_current();
964 return;
966 do_vm_stop(state);
969 /* does a state transition even if the VM is already stopped,
970 current state is forgotten forever */
971 void vm_stop_force_state(RunState state)
973 if (runstate_is_running()) {
974 vm_stop(state);
975 } else {
976 runstate_set(state);
980 static int tcg_cpu_exec(CPUState *env)
982 int ret;
983 #ifdef CONFIG_PROFILER
984 int64_t ti;
985 #endif
987 #ifdef CONFIG_PROFILER
988 ti = profile_getclock();
989 #endif
990 if (use_icount) {
991 int64_t count;
992 int decr;
993 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
994 env->icount_decr.u16.low = 0;
995 env->icount_extra = 0;
996 count = qemu_icount_round(qemu_clock_deadline(vm_clock));
997 qemu_icount += count;
998 decr = (count > 0xffff) ? 0xffff : count;
999 count -= decr;
1000 env->icount_decr.u16.low = decr;
1001 env->icount_extra = count;
1003 ret = cpu_exec(env);
1004 #ifdef CONFIG_PROFILER
1005 qemu_time += profile_getclock() - ti;
1006 #endif
1007 if (use_icount) {
1008 /* Fold pending instructions back into the
1009 instruction counter, and clear the interrupt flag. */
1010 qemu_icount -= (env->icount_decr.u16.low
1011 + env->icount_extra);
1012 env->icount_decr.u32 = 0;
1013 env->icount_extra = 0;
1015 return ret;
1018 bool cpu_exec_all(void)
1020 int r;
1022 /* Account partial waits to the vm_clock. */
1023 qemu_clock_warp(vm_clock);
1025 if (next_cpu == NULL) {
1026 next_cpu = first_cpu;
1028 for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
1029 CPUState *env = next_cpu;
1031 qemu_clock_enable(vm_clock,
1032 (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
1034 if (cpu_can_run(env)) {
1035 if (kvm_enabled()) {
1036 r = kvm_cpu_exec(env);
1037 qemu_kvm_eat_signals(env);
1038 } else {
1039 r = tcg_cpu_exec(env);
1041 if (r == EXCP_DEBUG) {
1042 cpu_handle_guest_debug(env);
1043 break;
1045 } else if (env->stop || env->stopped) {
1046 break;
1049 exit_request = 0;
1050 return !all_cpu_threads_idle();
1053 void set_numa_modes(void)
1055 CPUState *env;
1056 int i;
1058 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1059 for (i = 0; i < nb_numa_nodes; i++) {
1060 if (node_cpumask[i] & (1 << env->cpu_index)) {
1061 env->numa_node = i;
1067 void set_cpu_log(const char *optarg)
1069 int mask;
1070 const CPULogItem *item;
1072 mask = cpu_str_to_log_mask(optarg);
1073 if (!mask) {
1074 printf("Log items (comma separated):\n");
1075 for (item = cpu_log_items; item->mask != 0; item++) {
1076 printf("%-10s %s\n", item->name, item->help);
1078 exit(1);
1080 cpu_set_log(mask);
1083 void set_cpu_log_filename(const char *optarg)
1085 cpu_set_log_filename(optarg);
1088 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1090 /* XXX: implement xxx_cpu_list for targets that still miss it */
1091 #if defined(cpu_list_id)
1092 cpu_list_id(f, cpu_fprintf, optarg);
1093 #elif defined(cpu_list)
1094 cpu_list(f, cpu_fprintf); /* deprecated */
1095 #endif