4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
36 static CPUState
*cur_cpu
;
37 static CPUState
*next_cpu
;
39 /***********************************************************/
40 void hw_error(const char *fmt
, ...)
46 fprintf(stderr
, "qemu: hardware error: ");
47 vfprintf(stderr
, fmt
, ap
);
48 fprintf(stderr
, "\n");
49 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
50 fprintf(stderr
, "CPU #%d:\n", env
->cpu_index
);
52 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
);
54 cpu_dump_state(env
, stderr
, fprintf
, 0);
61 void cpu_synchronize_all_states(void)
65 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
66 cpu_synchronize_state(cpu
);
70 void cpu_synchronize_all_post_reset(void)
74 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
75 cpu_synchronize_post_reset(cpu
);
79 void cpu_synchronize_all_post_init(void)
83 for (cpu
= first_cpu
; cpu
; cpu
= cpu
->next_cpu
) {
84 cpu_synchronize_post_init(cpu
);
88 static void do_vm_stop(int reason
)
94 vm_state_notify(0, reason
);
95 monitor_protocol_event(QEVENT_STOP
, NULL
);
99 static int cpu_can_run(CPUState
*env
)
110 static int cpu_has_work(CPUState
*env
)
118 if (qemu_cpu_has_work(env
))
123 static int tcg_has_work(void)
127 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
128 if (cpu_has_work(env
))
134 static int io_thread_fd
= -1;
136 static void qemu_event_increment(void)
138 /* Write 8 bytes to be compatible with eventfd. */
139 static uint64_t val
= 1;
142 if (io_thread_fd
== -1)
146 ret
= write(io_thread_fd
, &val
, sizeof(val
));
147 } while (ret
< 0 && errno
== EINTR
);
149 /* EAGAIN is fine, a read must be pending. */
150 if (ret
< 0 && errno
!= EAGAIN
) {
151 fprintf(stderr
, "qemu_event_increment: write() filed: %s\n",
157 static void qemu_event_read(void *opaque
)
159 int fd
= (unsigned long)opaque
;
163 /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
165 len
= read(fd
, buffer
, sizeof(buffer
));
166 } while ((len
== -1 && errno
== EINTR
) || len
== sizeof(buffer
));
169 static int qemu_event_init(void)
174 err
= qemu_eventfd(fds
);
178 err
= fcntl_setfl(fds
[0], O_NONBLOCK
);
182 err
= fcntl_setfl(fds
[1], O_NONBLOCK
);
186 qemu_set_fd_handler2(fds
[0], NULL
, qemu_event_read
, NULL
,
187 (void *)(unsigned long)fds
[0]);
189 io_thread_fd
= fds
[1];
198 HANDLE qemu_event_handle
;
200 static void dummy_event_handler(void *opaque
)
204 static int qemu_event_init(void)
206 qemu_event_handle
= CreateEvent(NULL
, FALSE
, FALSE
, NULL
);
207 if (!qemu_event_handle
) {
208 fprintf(stderr
, "Failed CreateEvent: %ld\n", GetLastError());
211 qemu_add_wait_object(qemu_event_handle
, dummy_event_handler
, NULL
);
215 static void qemu_event_increment(void)
217 if (!SetEvent(qemu_event_handle
)) {
218 fprintf(stderr
, "qemu_event_increment: SetEvent failed: %ld\n",
225 #ifndef CONFIG_IOTHREAD
226 int qemu_init_main_loop(void)
228 return qemu_event_init();
231 void qemu_init_vcpu(void *_env
)
233 CPUState
*env
= _env
;
235 env
->nr_cores
= smp_cores
;
236 env
->nr_threads
= smp_threads
;
242 int qemu_cpu_self(void *env
)
247 void resume_all_vcpus(void)
251 void pause_all_vcpus(void)
255 void qemu_cpu_kick(void *env
)
260 void qemu_notify_event(void)
262 CPUState
*env
= cpu_single_env
;
264 qemu_event_increment ();
268 if (next_cpu
&& env
!= next_cpu
) {
273 void qemu_mutex_lock_iothread(void) {}
274 void qemu_mutex_unlock_iothread(void) {}
276 void vm_stop(int reason
)
281 #else /* CONFIG_IOTHREAD */
283 #include "qemu-thread.h"
285 QemuMutex qemu_global_mutex
;
286 static QemuMutex qemu_fair_mutex
;
288 static QemuThread io_thread
;
290 static QemuThread
*tcg_cpu_thread
;
291 static QemuCond
*tcg_halt_cond
;
293 static int qemu_system_ready
;
295 static QemuCond qemu_cpu_cond
;
297 static QemuCond qemu_system_cond
;
298 static QemuCond qemu_pause_cond
;
300 static void tcg_block_io_signals(void);
301 static void kvm_block_io_signals(CPUState
*env
);
302 static void unblock_io_signals(void);
304 int qemu_init_main_loop(void)
308 ret
= qemu_event_init();
312 qemu_cond_init(&qemu_pause_cond
);
313 qemu_mutex_init(&qemu_fair_mutex
);
314 qemu_mutex_init(&qemu_global_mutex
);
315 qemu_mutex_lock(&qemu_global_mutex
);
317 unblock_io_signals();
318 qemu_thread_self(&io_thread
);
323 static void qemu_wait_io_event_common(CPUState
*env
)
328 qemu_cond_signal(&qemu_pause_cond
);
332 static void qemu_wait_io_event(CPUState
*env
)
334 while (!tcg_has_work())
335 qemu_cond_timedwait(env
->halt_cond
, &qemu_global_mutex
, 1000);
337 qemu_mutex_unlock(&qemu_global_mutex
);
340 * Users of qemu_global_mutex can be starved, having no chance
341 * to acquire it since this path will get to it first.
342 * So use another lock to provide fairness.
344 qemu_mutex_lock(&qemu_fair_mutex
);
345 qemu_mutex_unlock(&qemu_fair_mutex
);
347 qemu_mutex_lock(&qemu_global_mutex
);
348 qemu_wait_io_event_common(env
);
351 static void qemu_kvm_eat_signal(CPUState
*env
, int timeout
)
358 ts
.tv_sec
= timeout
/ 1000;
359 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
361 sigemptyset(&waitset
);
362 sigaddset(&waitset
, SIG_IPI
);
364 qemu_mutex_unlock(&qemu_global_mutex
);
365 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
367 qemu_mutex_lock(&qemu_global_mutex
);
369 if (r
== -1 && !(e
== EAGAIN
|| e
== EINTR
)) {
370 fprintf(stderr
, "sigtimedwait: %s\n", strerror(e
));
375 static void qemu_kvm_wait_io_event(CPUState
*env
)
377 while (!cpu_has_work(env
))
378 qemu_cond_timedwait(env
->halt_cond
, &qemu_global_mutex
, 1000);
380 qemu_kvm_eat_signal(env
, 0);
381 qemu_wait_io_event_common(env
);
384 static int qemu_cpu_exec(CPUState
*env
);
386 static void *kvm_cpu_thread_fn(void *arg
)
390 qemu_thread_self(env
->thread
);
394 kvm_block_io_signals(env
);
396 /* signal CPU creation */
397 qemu_mutex_lock(&qemu_global_mutex
);
399 qemu_cond_signal(&qemu_cpu_cond
);
401 /* and wait for machine initialization */
402 while (!qemu_system_ready
)
403 qemu_cond_timedwait(&qemu_system_cond
, &qemu_global_mutex
, 100);
406 if (cpu_can_run(env
))
408 qemu_kvm_wait_io_event(env
);
414 static void *tcg_cpu_thread_fn(void *arg
)
418 tcg_block_io_signals();
419 qemu_thread_self(env
->thread
);
421 /* signal CPU creation */
422 qemu_mutex_lock(&qemu_global_mutex
);
423 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
425 qemu_cond_signal(&qemu_cpu_cond
);
427 /* and wait for machine initialization */
428 while (!qemu_system_ready
)
429 qemu_cond_timedwait(&qemu_system_cond
, &qemu_global_mutex
, 100);
433 qemu_wait_io_event(cur_cpu
);
439 void qemu_cpu_kick(void *_env
)
441 CPUState
*env
= _env
;
442 qemu_cond_broadcast(env
->halt_cond
);
444 qemu_thread_signal(env
->thread
, SIG_IPI
);
447 int qemu_cpu_self(void *_env
)
449 CPUState
*env
= _env
;
452 qemu_thread_self(&this);
454 return qemu_thread_equal(&this, env
->thread
);
457 static void cpu_signal(int sig
)
460 cpu_exit(cpu_single_env
);
463 static void tcg_block_io_signals(void)
466 struct sigaction sigact
;
469 sigaddset(&set
, SIGUSR2
);
470 sigaddset(&set
, SIGIO
);
471 sigaddset(&set
, SIGALRM
);
472 sigaddset(&set
, SIGCHLD
);
473 pthread_sigmask(SIG_BLOCK
, &set
, NULL
);
476 sigaddset(&set
, SIG_IPI
);
477 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
479 memset(&sigact
, 0, sizeof(sigact
));
480 sigact
.sa_handler
= cpu_signal
;
481 sigaction(SIG_IPI
, &sigact
, NULL
);
484 static void dummy_signal(int sig
)
488 static void kvm_block_io_signals(CPUState
*env
)
492 struct sigaction sigact
;
495 sigaddset(&set
, SIGUSR2
);
496 sigaddset(&set
, SIGIO
);
497 sigaddset(&set
, SIGALRM
);
498 sigaddset(&set
, SIGCHLD
);
499 sigaddset(&set
, SIG_IPI
);
500 pthread_sigmask(SIG_BLOCK
, &set
, NULL
);
502 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
503 sigdelset(&set
, SIG_IPI
);
505 memset(&sigact
, 0, sizeof(sigact
));
506 sigact
.sa_handler
= dummy_signal
;
507 sigaction(SIG_IPI
, &sigact
, NULL
);
509 r
= kvm_set_signal_mask(env
, &set
);
511 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(r
));
516 static void unblock_io_signals(void)
521 sigaddset(&set
, SIGUSR2
);
522 sigaddset(&set
, SIGIO
);
523 sigaddset(&set
, SIGALRM
);
524 pthread_sigmask(SIG_UNBLOCK
, &set
, NULL
);
527 sigaddset(&set
, SIG_IPI
);
528 pthread_sigmask(SIG_BLOCK
, &set
, NULL
);
531 static void qemu_signal_lock(unsigned int msecs
)
533 qemu_mutex_lock(&qemu_fair_mutex
);
535 while (qemu_mutex_trylock(&qemu_global_mutex
)) {
536 qemu_thread_signal(tcg_cpu_thread
, SIG_IPI
);
537 if (!qemu_mutex_timedlock(&qemu_global_mutex
, msecs
))
540 qemu_mutex_unlock(&qemu_fair_mutex
);
543 void qemu_mutex_lock_iothread(void)
546 qemu_mutex_lock(&qemu_fair_mutex
);
547 qemu_mutex_lock(&qemu_global_mutex
);
548 qemu_mutex_unlock(&qemu_fair_mutex
);
550 qemu_signal_lock(100);
553 void qemu_mutex_unlock_iothread(void)
555 qemu_mutex_unlock(&qemu_global_mutex
);
558 static int all_vcpus_paused(void)
560 CPUState
*penv
= first_cpu
;
565 penv
= (CPUState
*)penv
->next_cpu
;
571 void pause_all_vcpus(void)
573 CPUState
*penv
= first_cpu
;
577 qemu_thread_signal(penv
->thread
, SIG_IPI
);
579 penv
= (CPUState
*)penv
->next_cpu
;
582 while (!all_vcpus_paused()) {
583 qemu_cond_timedwait(&qemu_pause_cond
, &qemu_global_mutex
, 100);
586 qemu_thread_signal(penv
->thread
, SIG_IPI
);
587 penv
= (CPUState
*)penv
->next_cpu
;
592 void resume_all_vcpus(void)
594 CPUState
*penv
= first_cpu
;
599 qemu_thread_signal(penv
->thread
, SIG_IPI
);
601 penv
= (CPUState
*)penv
->next_cpu
;
605 static void tcg_init_vcpu(void *_env
)
607 CPUState
*env
= _env
;
608 /* share a single thread for all cpus with TCG */
609 if (!tcg_cpu_thread
) {
610 env
->thread
= qemu_mallocz(sizeof(QemuThread
));
611 env
->halt_cond
= qemu_mallocz(sizeof(QemuCond
));
612 qemu_cond_init(env
->halt_cond
);
613 qemu_thread_create(env
->thread
, tcg_cpu_thread_fn
, env
);
614 while (env
->created
== 0)
615 qemu_cond_timedwait(&qemu_cpu_cond
, &qemu_global_mutex
, 100);
616 tcg_cpu_thread
= env
->thread
;
617 tcg_halt_cond
= env
->halt_cond
;
619 env
->thread
= tcg_cpu_thread
;
620 env
->halt_cond
= tcg_halt_cond
;
624 static void kvm_start_vcpu(CPUState
*env
)
626 env
->thread
= qemu_mallocz(sizeof(QemuThread
));
627 env
->halt_cond
= qemu_mallocz(sizeof(QemuCond
));
628 qemu_cond_init(env
->halt_cond
);
629 qemu_thread_create(env
->thread
, kvm_cpu_thread_fn
, env
);
630 while (env
->created
== 0)
631 qemu_cond_timedwait(&qemu_cpu_cond
, &qemu_global_mutex
, 100);
634 void qemu_init_vcpu(void *_env
)
636 CPUState
*env
= _env
;
638 env
->nr_cores
= smp_cores
;
639 env
->nr_threads
= smp_threads
;
646 void qemu_notify_event(void)
648 qemu_event_increment();
651 static void qemu_system_vmstop_request(int reason
)
653 vmstop_requested
= reason
;
657 void vm_stop(int reason
)
660 qemu_thread_self(&me
);
662 if (!qemu_thread_equal(&me
, &io_thread
)) {
663 qemu_system_vmstop_request(reason
);
665 * FIXME: should not return to device code in case
666 * vm_stop() has been requested.
668 if (cpu_single_env
) {
669 cpu_exit(cpu_single_env
);
670 cpu_single_env
->stop
= 1;
679 static int qemu_cpu_exec(CPUState
*env
)
682 #ifdef CONFIG_PROFILER
686 #ifdef CONFIG_PROFILER
687 ti
= profile_getclock();
692 qemu_icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
693 env
->icount_decr
.u16
.low
= 0;
694 env
->icount_extra
= 0;
695 count
= qemu_icount_round (qemu_next_deadline());
696 qemu_icount
+= count
;
697 decr
= (count
> 0xffff) ? 0xffff : count
;
699 env
->icount_decr
.u16
.low
= decr
;
700 env
->icount_extra
= count
;
703 #ifdef CONFIG_PROFILER
704 qemu_time
+= profile_getclock() - ti
;
707 /* Fold pending instructions back into the
708 instruction counter, and clear the interrupt flag. */
709 qemu_icount
-= (env
->icount_decr
.u16
.low
710 + env
->icount_extra
);
711 env
->icount_decr
.u32
= 0;
712 env
->icount_extra
= 0;
717 bool tcg_cpu_exec(void)
721 if (next_cpu
== NULL
)
722 next_cpu
= first_cpu
;
723 for (; next_cpu
!= NULL
; next_cpu
= next_cpu
->next_cpu
) {
724 CPUState
*env
= cur_cpu
= next_cpu
;
726 qemu_clock_enable(vm_clock
,
727 (cur_cpu
->singlestep_enabled
& SSTEP_NOTIMER
) == 0);
729 if (qemu_alarm_pending())
731 if (cpu_can_run(env
))
732 ret
= qemu_cpu_exec(env
);
736 if (ret
== EXCP_DEBUG
) {
737 gdb_set_stop_cpu(env
);
738 debug_requested
= EXCP_DEBUG
;
742 return tcg_has_work();
745 void set_numa_modes(void)
750 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
751 for (i
= 0; i
< nb_numa_nodes
; i
++) {
752 if (node_cpumask
[i
] & (1 << env
->cpu_index
)) {
759 void set_cpu_log(const char *optarg
)
762 const CPULogItem
*item
;
764 mask
= cpu_str_to_log_mask(optarg
);
766 printf("Log items (comma separated):\n");
767 for (item
= cpu_log_items
; item
->mask
!= 0; item
++) {
768 printf("%-10s %s\n", item
->name
, item
->help
);
775 /* Return the virtual CPU time, based on the instruction counter. */
776 int64_t cpu_get_icount(void)
779 CPUState
*env
= cpu_single_env
;;
781 icount
= qemu_icount
;
783 if (!can_do_io(env
)) {
784 fprintf(stderr
, "Bad clock read\n");
786 icount
-= (env
->icount_decr
.u16
.low
+ env
->icount_extra
);
788 return qemu_icount_bias
+ (icount
<< icount_time_shift
);