virtio-blk: drop virtio_blk_set_conf()
[qemu/cris-port.git] / cpus.c
blob5e7f2cf3cfcdf2594bb6711b45cb1a243fd2cab1
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "sysemu/sysemu.h"
31 #include "exec/gdbstub.h"
32 #include "sysemu/dma.h"
33 #include "sysemu/kvm.h"
34 #include "qmp-commands.h"
36 #include "qemu/thread.h"
37 #include "sysemu/cpus.h"
38 #include "sysemu/qtest.h"
39 #include "qemu/main-loop.h"
40 #include "qemu/bitmap.h"
41 #include "qemu/seqlock.h"
42 #include "qapi-event.h"
44 #ifndef _WIN32
45 #include "qemu/compatfd.h"
46 #endif
48 #ifdef CONFIG_LINUX
50 #include <sys/prctl.h>
52 #ifndef PR_MCE_KILL
53 #define PR_MCE_KILL 33
54 #endif
56 #ifndef PR_MCE_KILL_SET
57 #define PR_MCE_KILL_SET 1
58 #endif
60 #ifndef PR_MCE_KILL_EARLY
61 #define PR_MCE_KILL_EARLY 1
62 #endif
64 #endif /* CONFIG_LINUX */
66 static CPUState *next_cpu;
68 bool cpu_is_stopped(CPUState *cpu)
70 return cpu->stopped || !runstate_is_running();
73 static bool cpu_thread_is_idle(CPUState *cpu)
75 if (cpu->stop || cpu->queued_work_first) {
76 return false;
78 if (cpu_is_stopped(cpu)) {
79 return true;
81 if (!cpu->halted || cpu_has_work(cpu) ||
82 kvm_halt_in_kernel()) {
83 return false;
85 return true;
88 static bool all_cpu_threads_idle(void)
90 CPUState *cpu;
92 CPU_FOREACH(cpu) {
93 if (!cpu_thread_is_idle(cpu)) {
94 return false;
97 return true;
100 /***********************************************************/
101 /* guest cycle counter */
103 /* Protected by TimersState seqlock */
105 /* Compensate for varying guest execution speed. */
106 static int64_t qemu_icount_bias;
107 static int64_t vm_clock_warp_start;
108 /* Conversion factor from emulated instructions to virtual clock ticks. */
109 static int icount_time_shift;
110 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
111 #define MAX_ICOUNT_SHIFT 10
113 /* Only written by TCG thread */
114 static int64_t qemu_icount;
116 static QEMUTimer *icount_rt_timer;
117 static QEMUTimer *icount_vm_timer;
118 static QEMUTimer *icount_warp_timer;
120 typedef struct TimersState {
121 /* Protected by BQL. */
122 int64_t cpu_ticks_prev;
123 int64_t cpu_ticks_offset;
125 /* cpu_clock_offset can be read out of BQL, so protect it with
126 * this lock.
128 QemuSeqLock vm_clock_seqlock;
129 int64_t cpu_clock_offset;
130 int32_t cpu_ticks_enabled;
131 int64_t dummy;
132 } TimersState;
134 static TimersState timers_state;
136 /* Return the virtual CPU time, based on the instruction counter. */
137 static int64_t cpu_get_icount_locked(void)
139 int64_t icount;
140 CPUState *cpu = current_cpu;
142 icount = qemu_icount;
143 if (cpu) {
144 if (!cpu_can_do_io(cpu)) {
145 fprintf(stderr, "Bad clock read\n");
147 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
149 return qemu_icount_bias + (icount << icount_time_shift);
152 int64_t cpu_get_icount(void)
154 int64_t icount;
155 unsigned start;
157 do {
158 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
159 icount = cpu_get_icount_locked();
160 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
162 return icount;
165 /* return the host CPU cycle counter and handle stop/restart */
166 /* Caller must hold the BQL */
167 int64_t cpu_get_ticks(void)
169 int64_t ticks;
171 if (use_icount) {
172 return cpu_get_icount();
175 ticks = timers_state.cpu_ticks_offset;
176 if (timers_state.cpu_ticks_enabled) {
177 ticks += cpu_get_real_ticks();
180 if (timers_state.cpu_ticks_prev > ticks) {
181 /* Note: non increasing ticks may happen if the host uses
182 software suspend */
183 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
184 ticks = timers_state.cpu_ticks_prev;
187 timers_state.cpu_ticks_prev = ticks;
188 return ticks;
191 static int64_t cpu_get_clock_locked(void)
193 int64_t ticks;
195 ticks = timers_state.cpu_clock_offset;
196 if (timers_state.cpu_ticks_enabled) {
197 ticks += get_clock();
200 return ticks;
203 /* return the host CPU monotonic timer and handle stop/restart */
204 int64_t cpu_get_clock(void)
206 int64_t ti;
207 unsigned start;
209 do {
210 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
211 ti = cpu_get_clock_locked();
212 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
214 return ti;
217 /* enable cpu_get_ticks()
218 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
220 void cpu_enable_ticks(void)
222 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
223 seqlock_write_lock(&timers_state.vm_clock_seqlock);
224 if (!timers_state.cpu_ticks_enabled) {
225 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
226 timers_state.cpu_clock_offset -= get_clock();
227 timers_state.cpu_ticks_enabled = 1;
229 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
232 /* disable cpu_get_ticks() : the clock is stopped. You must not call
233 * cpu_get_ticks() after that.
234 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
236 void cpu_disable_ticks(void)
238 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
239 seqlock_write_lock(&timers_state.vm_clock_seqlock);
240 if (timers_state.cpu_ticks_enabled) {
241 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
242 timers_state.cpu_clock_offset = cpu_get_clock_locked();
243 timers_state.cpu_ticks_enabled = 0;
245 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
248 /* Correlation between real and virtual time is always going to be
249 fairly approximate, so ignore small variation.
250 When the guest is idle real and virtual time will be aligned in
251 the IO wait loop. */
252 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
254 static void icount_adjust(void)
256 int64_t cur_time;
257 int64_t cur_icount;
258 int64_t delta;
260 /* Protected by TimersState mutex. */
261 static int64_t last_delta;
263 /* If the VM is not running, then do nothing. */
264 if (!runstate_is_running()) {
265 return;
268 seqlock_write_lock(&timers_state.vm_clock_seqlock);
269 cur_time = cpu_get_clock_locked();
270 cur_icount = cpu_get_icount_locked();
272 delta = cur_icount - cur_time;
273 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
274 if (delta > 0
275 && last_delta + ICOUNT_WOBBLE < delta * 2
276 && icount_time_shift > 0) {
277 /* The guest is getting too far ahead. Slow time down. */
278 icount_time_shift--;
280 if (delta < 0
281 && last_delta - ICOUNT_WOBBLE > delta * 2
282 && icount_time_shift < MAX_ICOUNT_SHIFT) {
283 /* The guest is getting too far behind. Speed time up. */
284 icount_time_shift++;
286 last_delta = delta;
287 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
288 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
291 static void icount_adjust_rt(void *opaque)
293 timer_mod(icount_rt_timer,
294 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
295 icount_adjust();
298 static void icount_adjust_vm(void *opaque)
300 timer_mod(icount_vm_timer,
301 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
302 get_ticks_per_sec() / 10);
303 icount_adjust();
306 static int64_t qemu_icount_round(int64_t count)
308 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
311 static void icount_warp_rt(void *opaque)
313 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
314 * changes from -1 to another value, so the race here is okay.
316 if (atomic_read(&vm_clock_warp_start) == -1) {
317 return;
320 seqlock_write_lock(&timers_state.vm_clock_seqlock);
321 if (runstate_is_running()) {
322 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
323 int64_t warp_delta;
325 warp_delta = clock - vm_clock_warp_start;
326 if (use_icount == 2) {
328 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
329 * far ahead of real time.
331 int64_t cur_time = cpu_get_clock_locked();
332 int64_t cur_icount = cpu_get_icount_locked();
333 int64_t delta = cur_time - cur_icount;
334 warp_delta = MIN(warp_delta, delta);
336 qemu_icount_bias += warp_delta;
338 vm_clock_warp_start = -1;
339 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
341 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
342 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
346 void qtest_clock_warp(int64_t dest)
348 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
349 assert(qtest_enabled());
350 while (clock < dest) {
351 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
352 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
353 seqlock_write_lock(&timers_state.vm_clock_seqlock);
354 qemu_icount_bias += warp;
355 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
357 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
358 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
360 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
363 void qemu_clock_warp(QEMUClockType type)
365 int64_t clock;
366 int64_t deadline;
369 * There are too many global variables to make the "warp" behavior
370 * applicable to other clocks. But a clock argument removes the
371 * need for if statements all over the place.
373 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
374 return;
378 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
379 * This ensures that the deadline for the timer is computed correctly below.
380 * This also makes sure that the insn counter is synchronized before the
381 * CPU starts running, in case the CPU is woken by an event other than
382 * the earliest QEMU_CLOCK_VIRTUAL timer.
384 icount_warp_rt(NULL);
385 timer_del(icount_warp_timer);
386 if (!all_cpu_threads_idle()) {
387 return;
390 if (qtest_enabled()) {
391 /* When testing, qtest commands advance icount. */
392 return;
395 /* We want to use the earliest deadline from ALL vm_clocks */
396 clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
397 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
398 if (deadline < 0) {
399 return;
402 if (deadline > 0) {
404 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
405 * sleep. Otherwise, the CPU might be waiting for a future timer
406 * interrupt to wake it up, but the interrupt never comes because
407 * the vCPU isn't running any insns and thus doesn't advance the
408 * QEMU_CLOCK_VIRTUAL.
410 * An extreme solution for this problem would be to never let VCPUs
411 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
412 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
413 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
414 * after some e"real" time, (related to the time left until the next
415 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
416 * This avoids that the warps are visible externally; for example,
417 * you will not be sending network packets continuously instead of
418 * every 100ms.
420 seqlock_write_lock(&timers_state.vm_clock_seqlock);
421 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
422 vm_clock_warp_start = clock;
424 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
425 timer_mod_anticipate(icount_warp_timer, clock + deadline);
426 } else if (deadline == 0) {
427 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
431 static const VMStateDescription vmstate_timers = {
432 .name = "timer",
433 .version_id = 2,
434 .minimum_version_id = 1,
435 .fields = (VMStateField[]) {
436 VMSTATE_INT64(cpu_ticks_offset, TimersState),
437 VMSTATE_INT64(dummy, TimersState),
438 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
439 VMSTATE_END_OF_LIST()
443 void configure_icount(const char *option)
445 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
446 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
447 if (!option) {
448 return;
451 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
452 icount_warp_rt, NULL);
453 if (strcmp(option, "auto") != 0) {
454 icount_time_shift = strtol(option, NULL, 0);
455 use_icount = 1;
456 return;
459 use_icount = 2;
461 /* 125MIPS seems a reasonable initial guess at the guest speed.
462 It will be corrected fairly quickly anyway. */
463 icount_time_shift = 3;
465 /* Have both realtime and virtual time triggers for speed adjustment.
466 The realtime trigger catches emulated time passing too slowly,
467 the virtual time trigger catches emulated time passing too fast.
468 Realtime triggers occur even when idle, so use them less frequently
469 than VM triggers. */
470 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
471 icount_adjust_rt, NULL);
472 timer_mod(icount_rt_timer,
473 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
474 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
475 icount_adjust_vm, NULL);
476 timer_mod(icount_vm_timer,
477 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
478 get_ticks_per_sec() / 10);
481 /***********************************************************/
482 void hw_error(const char *fmt, ...)
484 va_list ap;
485 CPUState *cpu;
487 va_start(ap, fmt);
488 fprintf(stderr, "qemu: hardware error: ");
489 vfprintf(stderr, fmt, ap);
490 fprintf(stderr, "\n");
491 CPU_FOREACH(cpu) {
492 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
493 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
495 va_end(ap);
496 abort();
499 void cpu_synchronize_all_states(void)
501 CPUState *cpu;
503 CPU_FOREACH(cpu) {
504 cpu_synchronize_state(cpu);
508 void cpu_synchronize_all_post_reset(void)
510 CPUState *cpu;
512 CPU_FOREACH(cpu) {
513 cpu_synchronize_post_reset(cpu);
517 void cpu_synchronize_all_post_init(void)
519 CPUState *cpu;
521 CPU_FOREACH(cpu) {
522 cpu_synchronize_post_init(cpu);
526 static int do_vm_stop(RunState state)
528 int ret = 0;
530 if (runstate_is_running()) {
531 cpu_disable_ticks();
532 pause_all_vcpus();
533 runstate_set(state);
534 vm_state_notify(0, state);
535 qapi_event_send_stop(&error_abort);
538 bdrv_drain_all();
539 ret = bdrv_flush_all();
541 return ret;
544 static bool cpu_can_run(CPUState *cpu)
546 if (cpu->stop) {
547 return false;
549 if (cpu_is_stopped(cpu)) {
550 return false;
552 return true;
555 static void cpu_handle_guest_debug(CPUState *cpu)
557 gdb_set_stop_cpu(cpu);
558 qemu_system_debug_request();
559 cpu->stopped = true;
562 static void cpu_signal(int sig)
564 if (current_cpu) {
565 cpu_exit(current_cpu);
567 exit_request = 1;
570 #ifdef CONFIG_LINUX
571 static void sigbus_reraise(void)
573 sigset_t set;
574 struct sigaction action;
576 memset(&action, 0, sizeof(action));
577 action.sa_handler = SIG_DFL;
578 if (!sigaction(SIGBUS, &action, NULL)) {
579 raise(SIGBUS);
580 sigemptyset(&set);
581 sigaddset(&set, SIGBUS);
582 sigprocmask(SIG_UNBLOCK, &set, NULL);
584 perror("Failed to re-raise SIGBUS!\n");
585 abort();
588 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
589 void *ctx)
591 if (kvm_on_sigbus(siginfo->ssi_code,
592 (void *)(intptr_t)siginfo->ssi_addr)) {
593 sigbus_reraise();
597 static void qemu_init_sigbus(void)
599 struct sigaction action;
601 memset(&action, 0, sizeof(action));
602 action.sa_flags = SA_SIGINFO;
603 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
604 sigaction(SIGBUS, &action, NULL);
606 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
609 static void qemu_kvm_eat_signals(CPUState *cpu)
611 struct timespec ts = { 0, 0 };
612 siginfo_t siginfo;
613 sigset_t waitset;
614 sigset_t chkset;
615 int r;
617 sigemptyset(&waitset);
618 sigaddset(&waitset, SIG_IPI);
619 sigaddset(&waitset, SIGBUS);
621 do {
622 r = sigtimedwait(&waitset, &siginfo, &ts);
623 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
624 perror("sigtimedwait");
625 exit(1);
628 switch (r) {
629 case SIGBUS:
630 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
631 sigbus_reraise();
633 break;
634 default:
635 break;
638 r = sigpending(&chkset);
639 if (r == -1) {
640 perror("sigpending");
641 exit(1);
643 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
646 #else /* !CONFIG_LINUX */
648 static void qemu_init_sigbus(void)
652 static void qemu_kvm_eat_signals(CPUState *cpu)
655 #endif /* !CONFIG_LINUX */
657 #ifndef _WIN32
658 static void dummy_signal(int sig)
662 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
664 int r;
665 sigset_t set;
666 struct sigaction sigact;
668 memset(&sigact, 0, sizeof(sigact));
669 sigact.sa_handler = dummy_signal;
670 sigaction(SIG_IPI, &sigact, NULL);
672 pthread_sigmask(SIG_BLOCK, NULL, &set);
673 sigdelset(&set, SIG_IPI);
674 sigdelset(&set, SIGBUS);
675 r = kvm_set_signal_mask(cpu, &set);
676 if (r) {
677 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
678 exit(1);
682 static void qemu_tcg_init_cpu_signals(void)
684 sigset_t set;
685 struct sigaction sigact;
687 memset(&sigact, 0, sizeof(sigact));
688 sigact.sa_handler = cpu_signal;
689 sigaction(SIG_IPI, &sigact, NULL);
691 sigemptyset(&set);
692 sigaddset(&set, SIG_IPI);
693 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
696 #else /* _WIN32 */
697 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
699 abort();
702 static void qemu_tcg_init_cpu_signals(void)
705 #endif /* _WIN32 */
707 static QemuMutex qemu_global_mutex;
708 static QemuCond qemu_io_proceeded_cond;
709 static bool iothread_requesting_mutex;
711 static QemuThread io_thread;
713 static QemuThread *tcg_cpu_thread;
714 static QemuCond *tcg_halt_cond;
716 /* cpu creation */
717 static QemuCond qemu_cpu_cond;
718 /* system init */
719 static QemuCond qemu_pause_cond;
720 static QemuCond qemu_work_cond;
722 void qemu_init_cpu_loop(void)
724 qemu_init_sigbus();
725 qemu_cond_init(&qemu_cpu_cond);
726 qemu_cond_init(&qemu_pause_cond);
727 qemu_cond_init(&qemu_work_cond);
728 qemu_cond_init(&qemu_io_proceeded_cond);
729 qemu_mutex_init(&qemu_global_mutex);
731 qemu_thread_get_self(&io_thread);
734 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
736 struct qemu_work_item wi;
738 if (qemu_cpu_is_self(cpu)) {
739 func(data);
740 return;
743 wi.func = func;
744 wi.data = data;
745 wi.free = false;
746 if (cpu->queued_work_first == NULL) {
747 cpu->queued_work_first = &wi;
748 } else {
749 cpu->queued_work_last->next = &wi;
751 cpu->queued_work_last = &wi;
752 wi.next = NULL;
753 wi.done = false;
755 qemu_cpu_kick(cpu);
756 while (!wi.done) {
757 CPUState *self_cpu = current_cpu;
759 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
760 current_cpu = self_cpu;
764 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
766 struct qemu_work_item *wi;
768 if (qemu_cpu_is_self(cpu)) {
769 func(data);
770 return;
773 wi = g_malloc0(sizeof(struct qemu_work_item));
774 wi->func = func;
775 wi->data = data;
776 wi->free = true;
777 if (cpu->queued_work_first == NULL) {
778 cpu->queued_work_first = wi;
779 } else {
780 cpu->queued_work_last->next = wi;
782 cpu->queued_work_last = wi;
783 wi->next = NULL;
784 wi->done = false;
786 qemu_cpu_kick(cpu);
789 static void flush_queued_work(CPUState *cpu)
791 struct qemu_work_item *wi;
793 if (cpu->queued_work_first == NULL) {
794 return;
797 while ((wi = cpu->queued_work_first)) {
798 cpu->queued_work_first = wi->next;
799 wi->func(wi->data);
800 wi->done = true;
801 if (wi->free) {
802 g_free(wi);
805 cpu->queued_work_last = NULL;
806 qemu_cond_broadcast(&qemu_work_cond);
809 static void qemu_wait_io_event_common(CPUState *cpu)
811 if (cpu->stop) {
812 cpu->stop = false;
813 cpu->stopped = true;
814 qemu_cond_signal(&qemu_pause_cond);
816 flush_queued_work(cpu);
817 cpu->thread_kicked = false;
820 static void qemu_tcg_wait_io_event(void)
822 CPUState *cpu;
824 while (all_cpu_threads_idle()) {
825 /* Start accounting real time to the virtual clock if the CPUs
826 are idle. */
827 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
828 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
831 while (iothread_requesting_mutex) {
832 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
835 CPU_FOREACH(cpu) {
836 qemu_wait_io_event_common(cpu);
840 static void qemu_kvm_wait_io_event(CPUState *cpu)
842 while (cpu_thread_is_idle(cpu)) {
843 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
846 qemu_kvm_eat_signals(cpu);
847 qemu_wait_io_event_common(cpu);
850 static void *qemu_kvm_cpu_thread_fn(void *arg)
852 CPUState *cpu = arg;
853 int r;
855 qemu_mutex_lock(&qemu_global_mutex);
856 qemu_thread_get_self(cpu->thread);
857 cpu->thread_id = qemu_get_thread_id();
858 current_cpu = cpu;
860 r = kvm_init_vcpu(cpu);
861 if (r < 0) {
862 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
863 exit(1);
866 qemu_kvm_init_cpu_signals(cpu);
868 /* signal CPU creation */
869 cpu->created = true;
870 qemu_cond_signal(&qemu_cpu_cond);
872 while (1) {
873 if (cpu_can_run(cpu)) {
874 r = kvm_cpu_exec(cpu);
875 if (r == EXCP_DEBUG) {
876 cpu_handle_guest_debug(cpu);
879 qemu_kvm_wait_io_event(cpu);
882 return NULL;
885 static void *qemu_dummy_cpu_thread_fn(void *arg)
887 #ifdef _WIN32
888 fprintf(stderr, "qtest is not supported under Windows\n");
889 exit(1);
890 #else
891 CPUState *cpu = arg;
892 sigset_t waitset;
893 int r;
895 qemu_mutex_lock_iothread();
896 qemu_thread_get_self(cpu->thread);
897 cpu->thread_id = qemu_get_thread_id();
899 sigemptyset(&waitset);
900 sigaddset(&waitset, SIG_IPI);
902 /* signal CPU creation */
903 cpu->created = true;
904 qemu_cond_signal(&qemu_cpu_cond);
906 current_cpu = cpu;
907 while (1) {
908 current_cpu = NULL;
909 qemu_mutex_unlock_iothread();
910 do {
911 int sig;
912 r = sigwait(&waitset, &sig);
913 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
914 if (r == -1) {
915 perror("sigwait");
916 exit(1);
918 qemu_mutex_lock_iothread();
919 current_cpu = cpu;
920 qemu_wait_io_event_common(cpu);
923 return NULL;
924 #endif
927 static void tcg_exec_all(void);
929 static void *qemu_tcg_cpu_thread_fn(void *arg)
931 CPUState *cpu = arg;
933 qemu_tcg_init_cpu_signals();
934 qemu_thread_get_self(cpu->thread);
936 qemu_mutex_lock(&qemu_global_mutex);
937 CPU_FOREACH(cpu) {
938 cpu->thread_id = qemu_get_thread_id();
939 cpu->created = true;
941 qemu_cond_signal(&qemu_cpu_cond);
943 /* wait for initial kick-off after machine start */
944 while (QTAILQ_FIRST(&cpus)->stopped) {
945 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
947 /* process any pending work */
948 CPU_FOREACH(cpu) {
949 qemu_wait_io_event_common(cpu);
953 while (1) {
954 tcg_exec_all();
956 if (use_icount) {
957 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
959 if (deadline == 0) {
960 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
963 qemu_tcg_wait_io_event();
966 return NULL;
969 static void qemu_cpu_kick_thread(CPUState *cpu)
971 #ifndef _WIN32
972 int err;
974 err = pthread_kill(cpu->thread->thread, SIG_IPI);
975 if (err) {
976 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
977 exit(1);
979 #else /* _WIN32 */
980 if (!qemu_cpu_is_self(cpu)) {
981 CONTEXT tcgContext;
983 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
984 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
985 GetLastError());
986 exit(1);
989 /* On multi-core systems, we are not sure that the thread is actually
990 * suspended until we can get the context.
992 tcgContext.ContextFlags = CONTEXT_CONTROL;
993 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
994 continue;
997 cpu_signal(0);
999 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1000 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1001 GetLastError());
1002 exit(1);
1005 #endif
1008 void qemu_cpu_kick(CPUState *cpu)
1010 qemu_cond_broadcast(cpu->halt_cond);
1011 if (!tcg_enabled() && !cpu->thread_kicked) {
1012 qemu_cpu_kick_thread(cpu);
1013 cpu->thread_kicked = true;
1017 void qemu_cpu_kick_self(void)
1019 #ifndef _WIN32
1020 assert(current_cpu);
1022 if (!current_cpu->thread_kicked) {
1023 qemu_cpu_kick_thread(current_cpu);
1024 current_cpu->thread_kicked = true;
1026 #else
1027 abort();
1028 #endif
1031 bool qemu_cpu_is_self(CPUState *cpu)
1033 return qemu_thread_is_self(cpu->thread);
1036 static bool qemu_in_vcpu_thread(void)
1038 return current_cpu && qemu_cpu_is_self(current_cpu);
1041 void qemu_mutex_lock_iothread(void)
1043 if (!tcg_enabled()) {
1044 qemu_mutex_lock(&qemu_global_mutex);
1045 } else {
1046 iothread_requesting_mutex = true;
1047 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1048 qemu_cpu_kick_thread(first_cpu);
1049 qemu_mutex_lock(&qemu_global_mutex);
1051 iothread_requesting_mutex = false;
1052 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1056 void qemu_mutex_unlock_iothread(void)
1058 qemu_mutex_unlock(&qemu_global_mutex);
1061 static int all_vcpus_paused(void)
1063 CPUState *cpu;
1065 CPU_FOREACH(cpu) {
1066 if (!cpu->stopped) {
1067 return 0;
1071 return 1;
1074 void pause_all_vcpus(void)
1076 CPUState *cpu;
1078 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1079 CPU_FOREACH(cpu) {
1080 cpu->stop = true;
1081 qemu_cpu_kick(cpu);
1084 if (qemu_in_vcpu_thread()) {
1085 cpu_stop_current();
1086 if (!kvm_enabled()) {
1087 CPU_FOREACH(cpu) {
1088 cpu->stop = false;
1089 cpu->stopped = true;
1091 return;
1095 while (!all_vcpus_paused()) {
1096 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1097 CPU_FOREACH(cpu) {
1098 qemu_cpu_kick(cpu);
1103 void cpu_resume(CPUState *cpu)
1105 cpu->stop = false;
1106 cpu->stopped = false;
1107 qemu_cpu_kick(cpu);
1110 void resume_all_vcpus(void)
1112 CPUState *cpu;
1114 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1115 CPU_FOREACH(cpu) {
1116 cpu_resume(cpu);
1120 /* For temporary buffers for forming a name */
1121 #define VCPU_THREAD_NAME_SIZE 16
1123 static void qemu_tcg_init_vcpu(CPUState *cpu)
1125 char thread_name[VCPU_THREAD_NAME_SIZE];
1127 tcg_cpu_address_space_init(cpu, cpu->as);
1129 /* share a single thread for all cpus with TCG */
1130 if (!tcg_cpu_thread) {
1131 cpu->thread = g_malloc0(sizeof(QemuThread));
1132 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1133 qemu_cond_init(cpu->halt_cond);
1134 tcg_halt_cond = cpu->halt_cond;
1135 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1136 cpu->cpu_index);
1137 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1138 cpu, QEMU_THREAD_JOINABLE);
1139 #ifdef _WIN32
1140 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1141 #endif
1142 while (!cpu->created) {
1143 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1145 tcg_cpu_thread = cpu->thread;
1146 } else {
1147 cpu->thread = tcg_cpu_thread;
1148 cpu->halt_cond = tcg_halt_cond;
1152 static void qemu_kvm_start_vcpu(CPUState *cpu)
1154 char thread_name[VCPU_THREAD_NAME_SIZE];
1156 cpu->thread = g_malloc0(sizeof(QemuThread));
1157 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1158 qemu_cond_init(cpu->halt_cond);
1159 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1160 cpu->cpu_index);
1161 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1162 cpu, QEMU_THREAD_JOINABLE);
1163 while (!cpu->created) {
1164 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1168 static void qemu_dummy_start_vcpu(CPUState *cpu)
1170 char thread_name[VCPU_THREAD_NAME_SIZE];
1172 cpu->thread = g_malloc0(sizeof(QemuThread));
1173 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1174 qemu_cond_init(cpu->halt_cond);
1175 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1176 cpu->cpu_index);
1177 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1178 QEMU_THREAD_JOINABLE);
1179 while (!cpu->created) {
1180 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1184 void qemu_init_vcpu(CPUState *cpu)
1186 cpu->nr_cores = smp_cores;
1187 cpu->nr_threads = smp_threads;
1188 cpu->stopped = true;
1189 if (kvm_enabled()) {
1190 qemu_kvm_start_vcpu(cpu);
1191 } else if (tcg_enabled()) {
1192 qemu_tcg_init_vcpu(cpu);
1193 } else {
1194 qemu_dummy_start_vcpu(cpu);
1198 void cpu_stop_current(void)
1200 if (current_cpu) {
1201 current_cpu->stop = false;
1202 current_cpu->stopped = true;
1203 cpu_exit(current_cpu);
1204 qemu_cond_signal(&qemu_pause_cond);
1208 int vm_stop(RunState state)
1210 if (qemu_in_vcpu_thread()) {
1211 qemu_system_vmstop_request_prepare();
1212 qemu_system_vmstop_request(state);
1214 * FIXME: should not return to device code in case
1215 * vm_stop() has been requested.
1217 cpu_stop_current();
1218 return 0;
1221 return do_vm_stop(state);
1224 /* does a state transition even if the VM is already stopped,
1225 current state is forgotten forever */
1226 int vm_stop_force_state(RunState state)
1228 if (runstate_is_running()) {
1229 return vm_stop(state);
1230 } else {
1231 runstate_set(state);
1232 /* Make sure to return an error if the flush in a previous vm_stop()
1233 * failed. */
1234 return bdrv_flush_all();
1238 static int tcg_cpu_exec(CPUArchState *env)
1240 CPUState *cpu = ENV_GET_CPU(env);
1241 int ret;
1242 #ifdef CONFIG_PROFILER
1243 int64_t ti;
1244 #endif
1246 #ifdef CONFIG_PROFILER
1247 ti = profile_getclock();
1248 #endif
1249 if (use_icount) {
1250 int64_t count;
1251 int64_t deadline;
1252 int decr;
1253 qemu_icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
1254 cpu->icount_decr.u16.low = 0;
1255 cpu->icount_extra = 0;
1256 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1258 /* Maintain prior (possibly buggy) behaviour where if no deadline
1259 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1260 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1261 * nanoseconds.
1263 if ((deadline < 0) || (deadline > INT32_MAX)) {
1264 deadline = INT32_MAX;
1267 count = qemu_icount_round(deadline);
1268 qemu_icount += count;
1269 decr = (count > 0xffff) ? 0xffff : count;
1270 count -= decr;
1271 cpu->icount_decr.u16.low = decr;
1272 cpu->icount_extra = count;
1274 ret = cpu_exec(env);
1275 #ifdef CONFIG_PROFILER
1276 qemu_time += profile_getclock() - ti;
1277 #endif
1278 if (use_icount) {
1279 /* Fold pending instructions back into the
1280 instruction counter, and clear the interrupt flag. */
1281 qemu_icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
1282 cpu->icount_decr.u32 = 0;
1283 cpu->icount_extra = 0;
1285 return ret;
1288 static void tcg_exec_all(void)
1290 int r;
1292 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1293 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1295 if (next_cpu == NULL) {
1296 next_cpu = first_cpu;
1298 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1299 CPUState *cpu = next_cpu;
1300 CPUArchState *env = cpu->env_ptr;
1302 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1303 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1305 if (cpu_can_run(cpu)) {
1306 r = tcg_cpu_exec(env);
1307 if (r == EXCP_DEBUG) {
1308 cpu_handle_guest_debug(cpu);
1309 break;
1311 } else if (cpu->stop || cpu->stopped) {
1312 break;
1315 exit_request = 0;
1318 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1320 /* XXX: implement xxx_cpu_list for targets that still miss it */
1321 #if defined(cpu_list)
1322 cpu_list(f, cpu_fprintf);
1323 #endif
1326 CpuInfoList *qmp_query_cpus(Error **errp)
1328 CpuInfoList *head = NULL, *cur_item = NULL;
1329 CPUState *cpu;
1331 CPU_FOREACH(cpu) {
1332 CpuInfoList *info;
1333 #if defined(TARGET_I386)
1334 X86CPU *x86_cpu = X86_CPU(cpu);
1335 CPUX86State *env = &x86_cpu->env;
1336 #elif defined(TARGET_PPC)
1337 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1338 CPUPPCState *env = &ppc_cpu->env;
1339 #elif defined(TARGET_SPARC)
1340 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1341 CPUSPARCState *env = &sparc_cpu->env;
1342 #elif defined(TARGET_MIPS)
1343 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1344 CPUMIPSState *env = &mips_cpu->env;
1345 #endif
1347 cpu_synchronize_state(cpu);
1349 info = g_malloc0(sizeof(*info));
1350 info->value = g_malloc0(sizeof(*info->value));
1351 info->value->CPU = cpu->cpu_index;
1352 info->value->current = (cpu == first_cpu);
1353 info->value->halted = cpu->halted;
1354 info->value->thread_id = cpu->thread_id;
1355 #if defined(TARGET_I386)
1356 info->value->has_pc = true;
1357 info->value->pc = env->eip + env->segs[R_CS].base;
1358 #elif defined(TARGET_PPC)
1359 info->value->has_nip = true;
1360 info->value->nip = env->nip;
1361 #elif defined(TARGET_SPARC)
1362 info->value->has_pc = true;
1363 info->value->pc = env->pc;
1364 info->value->has_npc = true;
1365 info->value->npc = env->npc;
1366 #elif defined(TARGET_MIPS)
1367 info->value->has_PC = true;
1368 info->value->PC = env->active_tc.PC;
1369 #endif
1371 /* XXX: waiting for the qapi to support GSList */
1372 if (!cur_item) {
1373 head = cur_item = info;
1374 } else {
1375 cur_item->next = info;
1376 cur_item = info;
1380 return head;
1383 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1384 bool has_cpu, int64_t cpu_index, Error **errp)
1386 FILE *f;
1387 uint32_t l;
1388 CPUState *cpu;
1389 uint8_t buf[1024];
1391 if (!has_cpu) {
1392 cpu_index = 0;
1395 cpu = qemu_get_cpu(cpu_index);
1396 if (cpu == NULL) {
1397 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1398 "a CPU number");
1399 return;
1402 f = fopen(filename, "wb");
1403 if (!f) {
1404 error_setg_file_open(errp, errno, filename);
1405 return;
1408 while (size != 0) {
1409 l = sizeof(buf);
1410 if (l > size)
1411 l = size;
1412 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1413 error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
1414 goto exit;
1416 if (fwrite(buf, 1, l, f) != l) {
1417 error_set(errp, QERR_IO_ERROR);
1418 goto exit;
1420 addr += l;
1421 size -= l;
1424 exit:
1425 fclose(f);
1428 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1429 Error **errp)
1431 FILE *f;
1432 uint32_t l;
1433 uint8_t buf[1024];
1435 f = fopen(filename, "wb");
1436 if (!f) {
1437 error_setg_file_open(errp, errno, filename);
1438 return;
1441 while (size != 0) {
1442 l = sizeof(buf);
1443 if (l > size)
1444 l = size;
1445 cpu_physical_memory_read(addr, buf, l);
1446 if (fwrite(buf, 1, l, f) != l) {
1447 error_set(errp, QERR_IO_ERROR);
1448 goto exit;
1450 addr += l;
1451 size -= l;
1454 exit:
1455 fclose(f);
1458 void qmp_inject_nmi(Error **errp)
1460 #if defined(TARGET_I386)
1461 CPUState *cs;
1463 CPU_FOREACH(cs) {
1464 X86CPU *cpu = X86_CPU(cs);
1466 if (!cpu->apic_state) {
1467 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1468 } else {
1469 apic_deliver_nmi(cpu->apic_state);
1472 #elif defined(TARGET_S390X)
1473 CPUState *cs;
1474 S390CPU *cpu;
1476 CPU_FOREACH(cs) {
1477 cpu = S390_CPU(cs);
1478 if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1479 if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1480 error_set(errp, QERR_UNSUPPORTED);
1481 return;
1483 break;
1486 #else
1487 error_set(errp, QERR_UNSUPPORTED);
1488 #endif