4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
34 #include "qemu-thread.h"
42 #define SIG_IPI (SIGRTMIN+4)
44 #define SIG_IPI SIGUSR1
49 #include <sys/prctl.h>
52 #define PR_MCE_KILL 33
55 #ifndef PR_MCE_KILL_SET
56 #define PR_MCE_KILL_SET 1
59 #ifndef PR_MCE_KILL_EARLY
60 #define PR_MCE_KILL_EARLY 1
63 #endif /* CONFIG_LINUX */
65 static CPUState
*next_cpu
;
67 /***********************************************************/
68 void hw_error(const char *fmt
, ...)
74 fprintf(stderr
, "qemu: hardware error: ");
75 vfprintf(stderr
, fmt
, ap
);
76 fprintf(stderr
, "\n");
77 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
78 fprintf(stderr
, "CPU #%d:\n", env
->cpu_index
);
80 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
);
82 cpu_dump_state(env
, stderr
, fprintf
, 0);
89 void cpu_synchronize_all_states(void)
93 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
94 cpu_synchronize_state(cpu
);
98 void cpu_synchronize_all_post_reset(void)
102 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
103 cpu_synchronize_post_reset(cpu
);
107 void cpu_synchronize_all_post_init(void)
111 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
112 cpu_synchronize_post_init(cpu
);
116 int cpu_is_stopped(CPUState
*env
)
118 return !vm_running
|| env
->stopped
;
121 static void do_vm_stop(RunState state
)
128 vm_state_notify(0, state
);
131 monitor_protocol_event(QEVENT_STOP
, NULL
);
135 static int cpu_can_run(CPUState
*env
)
140 if (env
->stopped
|| !vm_running
) {
146 static bool cpu_thread_is_idle(CPUState
*env
)
148 if (env
->stop
|| env
->queued_work_first
) {
151 if (env
->stopped
|| !vm_running
) {
154 if (!env
->halted
|| qemu_cpu_has_work(env
) ||
155 (kvm_enabled() && kvm_irqchip_in_kernel())) {
161 bool all_cpu_threads_idle(void)
165 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
166 if (!cpu_thread_is_idle(env
)) {
173 static void cpu_handle_guest_debug(CPUState
*env
)
175 gdb_set_stop_cpu(env
);
176 qemu_system_debug_request();
180 static void cpu_signal(int sig
)
182 if (cpu_single_env
) {
183 cpu_exit(cpu_single_env
);
189 static void sigbus_reraise(void)
192 struct sigaction action
;
194 memset(&action
, 0, sizeof(action
));
195 action
.sa_handler
= SIG_DFL
;
196 if (!sigaction(SIGBUS
, &action
, NULL
)) {
199 sigaddset(&set
, SIGBUS
);
200 sigprocmask(SIG_UNBLOCK
, &set
, NULL
);
202 perror("Failed to re-raise SIGBUS!\n");
206 static void sigbus_handler(int n
, struct qemu_signalfd_siginfo
*siginfo
,
209 if (kvm_on_sigbus(siginfo
->ssi_code
,
210 (void *)(intptr_t)siginfo
->ssi_addr
)) {
215 static void qemu_init_sigbus(void)
217 struct sigaction action
;
219 memset(&action
, 0, sizeof(action
));
220 action
.sa_flags
= SA_SIGINFO
;
221 action
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))sigbus_handler
;
222 sigaction(SIGBUS
, &action
, NULL
);
224 prctl(PR_MCE_KILL
, PR_MCE_KILL_SET
, PR_MCE_KILL_EARLY
, 0, 0);
227 static void qemu_kvm_eat_signals(CPUState
*env
)
229 struct timespec ts
= { 0, 0 };
235 sigemptyset(&waitset
);
236 sigaddset(&waitset
, SIG_IPI
);
237 sigaddset(&waitset
, SIGBUS
);
240 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
241 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
242 perror("sigtimedwait");
248 if (kvm_on_sigbus_vcpu(env
, siginfo
.si_code
, siginfo
.si_addr
)) {
256 r
= sigpending(&chkset
);
258 perror("sigpending");
261 } while (sigismember(&chkset
, SIG_IPI
) || sigismember(&chkset
, SIGBUS
));
264 #else /* !CONFIG_LINUX */
266 static void qemu_init_sigbus(void)
270 static void qemu_kvm_eat_signals(CPUState
*env
)
273 #endif /* !CONFIG_LINUX */
276 static int io_thread_fd
= -1;
278 static void qemu_event_increment(void)
280 /* Write 8 bytes to be compatible with eventfd. */
281 static const uint64_t val
= 1;
284 if (io_thread_fd
== -1) {
288 ret
= write(io_thread_fd
, &val
, sizeof(val
));
289 } while (ret
< 0 && errno
== EINTR
);
291 /* EAGAIN is fine, a read must be pending. */
292 if (ret
< 0 && errno
!= EAGAIN
) {
293 fprintf(stderr
, "qemu_event_increment: write() failed: %s\n",
299 static void qemu_event_read(void *opaque
)
301 int fd
= (intptr_t)opaque
;
305 /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
307 len
= read(fd
, buffer
, sizeof(buffer
));
308 } while ((len
== -1 && errno
== EINTR
) || len
== sizeof(buffer
));
311 static int qemu_event_init(void)
316 err
= qemu_eventfd(fds
);
320 err
= fcntl_setfl(fds
[0], O_NONBLOCK
);
324 err
= fcntl_setfl(fds
[1], O_NONBLOCK
);
328 qemu_set_fd_handler2(fds
[0], NULL
, qemu_event_read
, NULL
,
329 (void *)(intptr_t)fds
[0]);
331 io_thread_fd
= fds
[1];
340 static void dummy_signal(int sig
)
344 /* If we have signalfd, we mask out the signals we want to handle and then
345 * use signalfd to listen for them. We rely on whatever the current signal
346 * handler is to dispatch the signals when we receive them.
348 static void sigfd_handler(void *opaque
)
350 int fd
= (intptr_t)opaque
;
351 struct qemu_signalfd_siginfo info
;
352 struct sigaction action
;
357 len
= read(fd
, &info
, sizeof(info
));
358 } while (len
== -1 && errno
== EINTR
);
360 if (len
== -1 && errno
== EAGAIN
) {
364 if (len
!= sizeof(info
)) {
365 printf("read from sigfd returned %zd: %m\n", len
);
369 sigaction(info
.ssi_signo
, NULL
, &action
);
370 if ((action
.sa_flags
& SA_SIGINFO
) && action
.sa_sigaction
) {
371 action
.sa_sigaction(info
.ssi_signo
,
372 (siginfo_t
*)&info
, NULL
);
373 } else if (action
.sa_handler
) {
374 action
.sa_handler(info
.ssi_signo
);
379 static int qemu_signal_init(void)
384 /* SIGUSR2 used by posix-aio-compat.c */
386 sigaddset(&set
, SIGUSR2
);
387 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
390 * SIG_IPI must be blocked in the main thread and must not be caught
391 * by sigwait() in the signal thread. Otherwise, the cpu thread will
392 * not catch it reliably.
395 sigaddset(&set
, SIG_IPI
);
396 pthread_sigmask(SIG_BLOCK
, &set
, NULL
);
399 sigaddset(&set
, SIGIO
);
400 sigaddset(&set
, SIGALRM
);
401 sigaddset(&set
, SIGBUS
);
402 pthread_sigmask(SIG_BLOCK
, &set
, NULL
);
404 sigfd
= qemu_signalfd(&set
);
406 fprintf(stderr
, "failed to create signalfd\n");
410 fcntl_setfl(sigfd
, O_NONBLOCK
);
412 qemu_set_fd_handler2(sigfd
, NULL
, sigfd_handler
, NULL
,
413 (void *)(intptr_t)sigfd
);
418 static void qemu_kvm_init_cpu_signals(CPUState
*env
)
422 struct sigaction sigact
;
424 memset(&sigact
, 0, sizeof(sigact
));
425 sigact
.sa_handler
= dummy_signal
;
426 sigaction(SIG_IPI
, &sigact
, NULL
);
428 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
429 sigdelset(&set
, SIG_IPI
);
430 sigdelset(&set
, SIGBUS
);
431 r
= kvm_set_signal_mask(env
, &set
);
433 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
437 sigdelset(&set
, SIG_IPI
);
438 sigdelset(&set
, SIGBUS
);
439 r
= kvm_set_signal_mask(env
, &set
);
441 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
446 static void qemu_tcg_init_cpu_signals(void)
449 struct sigaction sigact
;
451 memset(&sigact
, 0, sizeof(sigact
));
452 sigact
.sa_handler
= cpu_signal
;
453 sigaction(SIG_IPI
, &sigact
, NULL
);
456 sigaddset(&set
, SIG_IPI
);
457 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
462 HANDLE qemu_event_handle
;
464 static void dummy_event_handler(void *opaque
)
468 static int qemu_event_init(void)
470 qemu_event_handle
= CreateEvent(NULL
, FALSE
, FALSE
, NULL
);
471 if (!qemu_event_handle
) {
472 fprintf(stderr
, "Failed CreateEvent: %ld\n", GetLastError());
475 qemu_add_wait_object(qemu_event_handle
, dummy_event_handler
, NULL
);
479 static void qemu_event_increment(void)
481 if (!SetEvent(qemu_event_handle
)) {
482 fprintf(stderr
, "qemu_event_increment: SetEvent failed: %ld\n",
488 static int qemu_signal_init(void)
493 static void qemu_kvm_init_cpu_signals(CPUState
*env
)
498 static void qemu_tcg_init_cpu_signals(void)
503 QemuMutex qemu_global_mutex
;
504 static QemuCond qemu_io_proceeded_cond
;
505 static bool iothread_requesting_mutex
;
507 static QemuThread io_thread
;
509 static QemuThread
*tcg_cpu_thread
;
510 static QemuCond
*tcg_halt_cond
;
513 static QemuCond qemu_cpu_cond
;
515 static QemuCond qemu_pause_cond
;
516 static QemuCond qemu_work_cond
;
518 int qemu_init_main_loop(void)
524 ret
= qemu_signal_init();
529 /* Note eventfd must be drained before signalfd handlers run */
530 ret
= qemu_event_init();
535 qemu_cond_init(&qemu_cpu_cond
);
536 qemu_cond_init(&qemu_pause_cond
);
537 qemu_cond_init(&qemu_work_cond
);
538 qemu_cond_init(&qemu_io_proceeded_cond
);
539 qemu_mutex_init(&qemu_global_mutex
);
540 qemu_mutex_lock(&qemu_global_mutex
);
542 qemu_thread_get_self(&io_thread
);
547 void qemu_main_loop_start(void)
552 void run_on_cpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
554 struct qemu_work_item wi
;
556 if (qemu_cpu_is_self(env
)) {
563 if (!env
->queued_work_first
) {
564 env
->queued_work_first
= &wi
;
566 env
->queued_work_last
->next
= &wi
;
568 env
->queued_work_last
= &wi
;
574 CPUState
*self_env
= cpu_single_env
;
576 qemu_cond_wait(&qemu_work_cond
, &qemu_global_mutex
);
577 cpu_single_env
= self_env
;
581 static void flush_queued_work(CPUState
*env
)
583 struct qemu_work_item
*wi
;
585 if (!env
->queued_work_first
) {
589 while ((wi
= env
->queued_work_first
)) {
590 env
->queued_work_first
= wi
->next
;
594 env
->queued_work_last
= NULL
;
595 qemu_cond_broadcast(&qemu_work_cond
);
598 static void qemu_wait_io_event_common(CPUState
*env
)
603 qemu_cond_signal(&qemu_pause_cond
);
605 flush_queued_work(env
);
606 env
->thread_kicked
= false;
609 static void qemu_tcg_wait_io_event(void)
613 while (all_cpu_threads_idle()) {
614 /* Start accounting real time to the virtual clock if the CPUs
616 qemu_clock_warp(vm_clock
);
617 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
620 while (iothread_requesting_mutex
) {
621 qemu_cond_wait(&qemu_io_proceeded_cond
, &qemu_global_mutex
);
624 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
625 qemu_wait_io_event_common(env
);
629 static void qemu_kvm_wait_io_event(CPUState
*env
)
631 while (cpu_thread_is_idle(env
)) {
632 qemu_cond_wait(env
->halt_cond
, &qemu_global_mutex
);
635 qemu_kvm_eat_signals(env
);
636 qemu_wait_io_event_common(env
);
639 static void *qemu_kvm_cpu_thread_fn(void *arg
)
644 qemu_mutex_lock(&qemu_global_mutex
);
645 qemu_thread_get_self(env
->thread
);
646 env
->thread_id
= qemu_get_thread_id();
648 r
= kvm_init_vcpu(env
);
650 fprintf(stderr
, "kvm_init_vcpu failed: %s\n", strerror(-r
));
654 qemu_kvm_init_cpu_signals(env
);
656 /* signal CPU creation */
658 qemu_cond_signal(&qemu_cpu_cond
);
661 if (cpu_can_run(env
)) {
662 r
= kvm_cpu_exec(env
);
663 if (r
== EXCP_DEBUG
) {
664 cpu_handle_guest_debug(env
);
667 qemu_kvm_wait_io_event(env
);
673 static void *qemu_tcg_cpu_thread_fn(void *arg
)
677 qemu_tcg_init_cpu_signals();
678 qemu_thread_get_self(env
->thread
);
680 /* signal CPU creation */
681 qemu_mutex_lock(&qemu_global_mutex
);
682 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
683 env
->thread_id
= qemu_get_thread_id();
686 qemu_cond_signal(&qemu_cpu_cond
);
688 /* wait for initial kick-off after machine start */
689 while (first_cpu
->stopped
) {
690 qemu_cond_wait(tcg_halt_cond
, &qemu_global_mutex
);
695 if (use_icount
&& qemu_next_icount_deadline() <= 0) {
698 qemu_tcg_wait_io_event();
704 static void qemu_cpu_kick_thread(CPUState
*env
)
709 err
= pthread_kill(env
->thread
->thread
, SIG_IPI
);
711 fprintf(stderr
, "qemu:%s: %s", __func__
, strerror(err
));
715 if (!qemu_cpu_is_self(env
)) {
716 SuspendThread(env
->thread
->thread
);
718 ResumeThread(env
->thread
->thread
);
723 void qemu_cpu_kick(void *_env
)
725 CPUState
*env
= _env
;
727 qemu_cond_broadcast(env
->halt_cond
);
728 if (kvm_enabled() && !env
->thread_kicked
) {
729 qemu_cpu_kick_thread(env
);
730 env
->thread_kicked
= true;
734 void qemu_cpu_kick_self(void)
737 assert(cpu_single_env
);
739 if (!cpu_single_env
->thread_kicked
) {
740 qemu_cpu_kick_thread(cpu_single_env
);
741 cpu_single_env
->thread_kicked
= true;
748 int qemu_cpu_is_self(void *_env
)
750 CPUState
*env
= _env
;
752 return qemu_thread_is_self(env
->thread
);
755 void qemu_mutex_lock_iothread(void)
758 qemu_mutex_lock(&qemu_global_mutex
);
760 iothread_requesting_mutex
= true;
761 if (qemu_mutex_trylock(&qemu_global_mutex
)) {
762 qemu_cpu_kick_thread(first_cpu
);
763 qemu_mutex_lock(&qemu_global_mutex
);
765 iothread_requesting_mutex
= false;
766 qemu_cond_broadcast(&qemu_io_proceeded_cond
);
770 void qemu_mutex_unlock_iothread(void)
772 qemu_mutex_unlock(&qemu_global_mutex
);
775 static int all_vcpus_paused(void)
777 CPUState
*penv
= first_cpu
;
780 if (!penv
->stopped
) {
783 penv
= (CPUState
*)penv
->next_cpu
;
789 void pause_all_vcpus(void)
791 CPUState
*penv
= first_cpu
;
796 penv
= (CPUState
*)penv
->next_cpu
;
799 while (!all_vcpus_paused()) {
800 qemu_cond_wait(&qemu_pause_cond
, &qemu_global_mutex
);
804 penv
= (CPUState
*)penv
->next_cpu
;
809 void resume_all_vcpus(void)
811 CPUState
*penv
= first_cpu
;
817 penv
= (CPUState
*)penv
->next_cpu
;
821 static void qemu_tcg_init_vcpu(void *_env
)
823 CPUState
*env
= _env
;
825 /* share a single thread for all cpus with TCG */
826 if (!tcg_cpu_thread
) {
827 env
->thread
= g_malloc0(sizeof(QemuThread
));
828 env
->halt_cond
= g_malloc0(sizeof(QemuCond
));
829 qemu_cond_init(env
->halt_cond
);
830 tcg_halt_cond
= env
->halt_cond
;
831 qemu_thread_create(env
->thread
, qemu_tcg_cpu_thread_fn
, env
);
832 while (env
->created
== 0) {
833 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
835 tcg_cpu_thread
= env
->thread
;
837 env
->thread
= tcg_cpu_thread
;
838 env
->halt_cond
= tcg_halt_cond
;
842 static void qemu_kvm_start_vcpu(CPUState
*env
)
844 env
->thread
= g_malloc0(sizeof(QemuThread
));
845 env
->halt_cond
= g_malloc0(sizeof(QemuCond
));
846 qemu_cond_init(env
->halt_cond
);
847 qemu_thread_create(env
->thread
, qemu_kvm_cpu_thread_fn
, env
);
848 while (env
->created
== 0) {
849 qemu_cond_wait(&qemu_cpu_cond
, &qemu_global_mutex
);
853 void qemu_init_vcpu(void *_env
)
855 CPUState
*env
= _env
;
857 env
->nr_cores
= smp_cores
;
858 env
->nr_threads
= smp_threads
;
861 qemu_kvm_start_vcpu(env
);
863 qemu_tcg_init_vcpu(env
);
867 void qemu_notify_event(void)
869 qemu_event_increment();
872 void cpu_stop_current(void)
874 if (cpu_single_env
) {
875 cpu_single_env
->stop
= 0;
876 cpu_single_env
->stopped
= 1;
877 cpu_exit(cpu_single_env
);
878 qemu_cond_signal(&qemu_pause_cond
);
882 void vm_stop(RunState state
)
884 if (!qemu_thread_is_self(&io_thread
)) {
885 qemu_system_vmstop_request(state
);
887 * FIXME: should not return to device code in case
888 * vm_stop() has been requested.
896 static int tcg_cpu_exec(CPUState
*env
)
899 #ifdef CONFIG_PROFILER
903 #ifdef CONFIG_PROFILER
904 ti
= profile_getclock();
909 qemu_icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
910 env
->icount_decr
.u16
.low
= 0;
911 env
->icount_extra
= 0;
912 count
= qemu_icount_round(qemu_next_icount_deadline());
913 qemu_icount
+= count
;
914 decr
= (count
> 0xffff) ? 0xffff : count
;
916 env
->icount_decr
.u16
.low
= decr
;
917 env
->icount_extra
= count
;
920 #ifdef CONFIG_PROFILER
921 qemu_time
+= profile_getclock() - ti
;
924 /* Fold pending instructions back into the
925 instruction counter, and clear the interrupt flag. */
926 qemu_icount
-= (env
->icount_decr
.u16
.low
927 + env
->icount_extra
);
928 env
->icount_decr
.u32
= 0;
929 env
->icount_extra
= 0;
934 bool cpu_exec_all(void)
938 /* Account partial waits to the vm_clock. */
939 qemu_clock_warp(vm_clock
);
941 if (next_cpu
== NULL
) {
942 next_cpu
= first_cpu
;
944 for (; next_cpu
!= NULL
&& !exit_request
; next_cpu
= next_cpu
->next_cpu
) {
945 CPUState
*env
= next_cpu
;
947 qemu_clock_enable(vm_clock
,
948 (env
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
950 if (cpu_can_run(env
)) {
952 r
= kvm_cpu_exec(env
);
953 qemu_kvm_eat_signals(env
);
955 r
= tcg_cpu_exec(env
);
957 if (r
== EXCP_DEBUG
) {
958 cpu_handle_guest_debug(env
);
961 } else if (env
->stop
|| env
->stopped
) {
966 return !all_cpu_threads_idle();
969 void set_numa_modes(void)
974 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
975 for (i
= 0; i
< nb_numa_nodes
; i
++) {
976 if (node_cpumask
[i
] & (1 << env
->cpu_index
)) {
983 void set_cpu_log(const char *optarg
)
986 const CPULogItem
*item
;
988 mask
= cpu_str_to_log_mask(optarg
);
990 printf("Log items (comma separated):\n");
991 for (item
= cpu_log_items
; item
->mask
!= 0; item
++) {
992 printf("%-10s %s\n", item
->name
, item
->help
);
999 void set_cpu_log_filename(const char *optarg
)
1001 cpu_set_log_filename(optarg
);
1004 /* Return the virtual CPU time, based on the instruction counter. */
1005 int64_t cpu_get_icount(void)
1008 CPUState
*env
= cpu_single_env
;;
1010 icount
= qemu_icount
;
1012 if (!can_do_io(env
)) {
1013 fprintf(stderr
, "Bad clock read\n");
1015 icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
1017 return qemu_icount_bias
+ (icount
<< icount_time_shift
);
1020 void list_cpus(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
1022 /* XXX: implement xxx_cpu_list for targets that still miss it */
1023 #if defined(cpu_list_id)
1024 cpu_list_id(f
, cpu_fprintf
, optarg
);
1025 #elif defined(cpu_list)
1026 cpu_list(f
, cpu_fprintf
); /* deprecated */