2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "qemu/qemu-print.h"
24 #include "hw/core/tcg-cpu-ops.h"
26 #include "disas/disas.h"
27 #include "exec/exec-all.h"
29 #include "qemu/atomic.h"
30 #include "qemu/compiler.h"
31 #include "sysemu/qtest.h"
32 #include "qemu/timer.h"
34 #include "exec/tb-hash.h"
35 #include "exec/tb-lookup.h"
37 #include "qemu/main-loop.h"
38 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
39 #include "hw/i386/apic.h"
41 #include "sysemu/cpus.h"
42 #include "exec/cpu-all.h"
43 #include "sysemu/cpu-timers.h"
44 #include "sysemu/replay.h"
47 /* -icount align implementation. */
49 typedef struct SyncClocks
{
51 int64_t last_cpu_icount
;
52 int64_t realtime_clock
;
55 #if !defined(CONFIG_USER_ONLY)
56 /* Allow the guest to have a max 3ms advance.
57 * The difference between the 2 clocks could therefore
60 #define VM_CLOCK_ADVANCE 3000000
61 #define THRESHOLD_REDUCE 1.5
62 #define MAX_DELAY_PRINT_RATE 2000000000LL
63 #define MAX_NB_PRINTS 100
65 static int64_t max_delay
;
66 static int64_t max_advance
;
68 static void align_clocks(SyncClocks
*sc
, CPUState
*cpu
)
72 if (!icount_align_option
) {
76 cpu_icount
= cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
77 sc
->diff_clk
+= icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
78 sc
->last_cpu_icount
= cpu_icount
;
80 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
82 struct timespec sleep_delay
, rem_delay
;
83 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
84 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
85 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
86 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
91 Sleep(sc
->diff_clk
/ SCALE_MS
);
97 static void print_delay(const SyncClocks
*sc
)
99 static float threshold_delay
;
100 static int64_t last_realtime_clock
;
101 static int nb_prints
;
103 if (icount_align_option
&&
104 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
105 nb_prints
< MAX_NB_PRINTS
) {
106 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
107 (-sc
->diff_clk
/ (float)1000000000LL <
108 (threshold_delay
- THRESHOLD_REDUCE
))) {
109 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
110 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
114 last_realtime_clock
= sc
->realtime_clock
;
119 static void init_delay_params(SyncClocks
*sc
, CPUState
*cpu
)
121 if (!icount_align_option
) {
124 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
125 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
127 = cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
128 if (sc
->diff_clk
< max_delay
) {
129 max_delay
= sc
->diff_clk
;
131 if (sc
->diff_clk
> max_advance
) {
132 max_advance
= sc
->diff_clk
;
135 /* Print every 2s max if the guest is late. We limit the number
136 of printed messages to NB_PRINT_MAX(currently 100) */
140 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
144 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
147 #endif /* CONFIG USER ONLY */
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
151 * Disable CFI checks.
152 * TCG creates binary blobs at runtime, with the transformed code.
153 * A TB is a blob of binary code, created at runtime and called with an
154 * indirect function call. Since such function did not exist at compile time,
155 * the CFI runtime has no way to verify its signature and would fail.
156 * TCG is not considered a security-sensitive part of QEMU so this does not
157 * affect the impact of CFI in environment with high security requirements
159 static inline TranslationBlock
* QEMU_DISABLE_CFI
160 cpu_tb_exec(CPUState
*cpu
, TranslationBlock
*itb
, int *tb_exit
)
162 CPUArchState
*env
= cpu
->env_ptr
;
164 TranslationBlock
*last_tb
;
165 const void *tb_ptr
= itb
->tc
.ptr
;
167 qemu_log_mask_and_addr(CPU_LOG_EXEC
, itb
->pc
,
169 TARGET_FMT_lx
"/" TARGET_FMT_lx
"/%#x] %s\n",
170 cpu
->cpu_index
, itb
->tc
.ptr
,
171 itb
->cs_base
, itb
->pc
, itb
->flags
,
172 lookup_symbol(itb
->pc
));
174 #if defined(DEBUG_DISAS)
175 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)
176 && qemu_log_in_addr_range(itb
->pc
)) {
177 FILE *logfile
= qemu_log_lock();
179 if (qemu_loglevel_mask(CPU_LOG_TB_FPU
)) {
180 flags
|= CPU_DUMP_FPU
;
182 #if defined(TARGET_I386)
183 flags
|= CPU_DUMP_CCOP
;
185 log_cpu_state(cpu
, flags
);
186 qemu_log_unlock(logfile
);
188 #endif /* DEBUG_DISAS */
190 qemu_thread_jit_execute();
191 ret
= tcg_qemu_tb_exec(env
, tb_ptr
);
194 * TODO: Delay swapping back to the read-write region of the TB
195 * until we actually need to modify the TB. The read-only copy,
196 * coming from the rx region, shares the same host TLB entry as
197 * the code that executed the exit_tb opcode that arrived here.
198 * If we insist on touching both the RX and the RW pages, we
199 * double the host TLB pressure.
201 last_tb
= tcg_splitwx_to_rw((void *)(ret
& ~TB_EXIT_MASK
));
202 *tb_exit
= ret
& TB_EXIT_MASK
;
204 trace_exec_tb_exit(last_tb
, *tb_exit
);
206 if (*tb_exit
> TB_EXIT_IDX1
) {
207 /* We didn't start executing this TB (eg because the instruction
208 * counter hit zero); we must restore the guest PC to the address
209 * of the start of the TB.
211 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
212 qemu_log_mask_and_addr(CPU_LOG_EXEC
, last_tb
->pc
,
213 "Stopped execution of TB chain before %p ["
214 TARGET_FMT_lx
"] %s\n",
215 last_tb
->tc
.ptr
, last_tb
->pc
,
216 lookup_symbol(last_tb
->pc
));
217 if (cc
->tcg_ops
->synchronize_from_tb
) {
218 cc
->tcg_ops
->synchronize_from_tb(cpu
, last_tb
);
221 cc
->set_pc(cpu
, last_tb
->pc
);
228 static void cpu_exec_enter(CPUState
*cpu
)
230 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
232 if (cc
->tcg_ops
->cpu_exec_enter
) {
233 cc
->tcg_ops
->cpu_exec_enter(cpu
);
237 static void cpu_exec_exit(CPUState
*cpu
)
239 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
241 if (cc
->tcg_ops
->cpu_exec_exit
) {
242 cc
->tcg_ops
->cpu_exec_exit(cpu
);
246 void cpu_exec_step_atomic(CPUState
*cpu
)
248 CPUArchState
*env
= (CPUArchState
*)cpu
->env_ptr
;
249 TranslationBlock
*tb
;
250 target_ulong cs_base
, pc
;
252 uint32_t cflags
= (curr_cflags(cpu
) & ~CF_PARALLEL
) | 1;
255 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
257 g_assert(cpu
== current_cpu
);
258 g_assert(!cpu
->running
);
261 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
262 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
266 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
271 /* execute the generated code */
272 trace_exec_tb(tb
, pc
);
273 cpu_tb_exec(cpu
, tb
, &tb_exit
);
277 * The mmap_lock is dropped by tb_gen_code if it runs out of
280 #ifndef CONFIG_SOFTMMU
281 tcg_debug_assert(!have_mmap_lock());
283 if (qemu_mutex_iothread_locked()) {
284 qemu_mutex_unlock_iothread();
286 assert_no_pages_locked();
287 qemu_plugin_disable_mem_helpers(cpu
);
292 * As we start the exclusive region before codegen we must still
293 * be in the region if we longjump out of either the codegen or
296 g_assert(cpu_in_exclusive_context(cpu
));
297 cpu
->running
= false;
303 target_ulong cs_base
;
305 tb_page_addr_t phys_page1
;
308 uint32_t trace_vcpu_dstate
;
311 static bool tb_lookup_cmp(const void *p
, const void *d
)
313 const TranslationBlock
*tb
= p
;
314 const struct tb_desc
*desc
= d
;
316 if (tb
->pc
== desc
->pc
&&
317 tb
->page_addr
[0] == desc
->phys_page1
&&
318 tb
->cs_base
== desc
->cs_base
&&
319 tb
->flags
== desc
->flags
&&
320 tb
->trace_vcpu_dstate
== desc
->trace_vcpu_dstate
&&
321 tb_cflags(tb
) == desc
->cflags
) {
322 /* check next page if needed */
323 if (tb
->page_addr
[1] == -1) {
326 tb_page_addr_t phys_page2
;
327 target_ulong virt_page2
;
329 virt_page2
= (desc
->pc
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
330 phys_page2
= get_page_addr_code(desc
->env
, virt_page2
);
331 if (tb
->page_addr
[1] == phys_page2
) {
339 TranslationBlock
*tb_htable_lookup(CPUState
*cpu
, target_ulong pc
,
340 target_ulong cs_base
, uint32_t flags
,
343 tb_page_addr_t phys_pc
;
347 desc
.env
= (CPUArchState
*)cpu
->env_ptr
;
348 desc
.cs_base
= cs_base
;
350 desc
.cflags
= cflags
;
351 desc
.trace_vcpu_dstate
= *cpu
->trace_dstate
;
353 phys_pc
= get_page_addr_code(desc
.env
, pc
);
357 desc
.phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
358 h
= tb_hash_func(phys_pc
, pc
, flags
, cflags
, *cpu
->trace_dstate
);
359 return qht_lookup_custom(&tb_ctx
.htable
, &desc
, h
, tb_lookup_cmp
);
362 void tb_set_jmp_target(TranslationBlock
*tb
, int n
, uintptr_t addr
)
364 if (TCG_TARGET_HAS_direct_jump
) {
365 uintptr_t offset
= tb
->jmp_target_arg
[n
];
366 uintptr_t tc_ptr
= (uintptr_t)tb
->tc
.ptr
;
367 uintptr_t jmp_rx
= tc_ptr
+ offset
;
368 uintptr_t jmp_rw
= jmp_rx
- tcg_splitwx_diff
;
369 tb_target_set_jmp_target(tc_ptr
, jmp_rx
, jmp_rw
, addr
);
371 tb
->jmp_target_arg
[n
] = addr
;
375 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
376 TranslationBlock
*tb_next
)
380 qemu_thread_jit_write();
381 assert(n
< ARRAY_SIZE(tb
->jmp_list_next
));
382 qemu_spin_lock(&tb_next
->jmp_lock
);
384 /* make sure the destination TB is valid */
385 if (tb_next
->cflags
& CF_INVALID
) {
386 goto out_unlock_next
;
388 /* Atomically claim the jump destination slot only if it was NULL */
389 old
= qatomic_cmpxchg(&tb
->jmp_dest
[n
], (uintptr_t)NULL
,
392 goto out_unlock_next
;
395 /* patch the native jump address */
396 tb_set_jmp_target(tb
, n
, (uintptr_t)tb_next
->tc
.ptr
);
398 /* add in TB jmp list */
399 tb
->jmp_list_next
[n
] = tb_next
->jmp_list_head
;
400 tb_next
->jmp_list_head
= (uintptr_t)tb
| n
;
402 qemu_spin_unlock(&tb_next
->jmp_lock
);
404 qemu_log_mask_and_addr(CPU_LOG_EXEC
, tb
->pc
,
405 "Linking TBs %p [" TARGET_FMT_lx
406 "] index %d -> %p [" TARGET_FMT_lx
"]\n",
407 tb
->tc
.ptr
, tb
->pc
, n
,
408 tb_next
->tc
.ptr
, tb_next
->pc
);
412 qemu_spin_unlock(&tb_next
->jmp_lock
);
416 static inline TranslationBlock
*tb_find(CPUState
*cpu
,
417 TranslationBlock
*last_tb
,
418 int tb_exit
, uint32_t cflags
)
420 CPUArchState
*env
= (CPUArchState
*)cpu
->env_ptr
;
421 TranslationBlock
*tb
;
422 target_ulong cs_base
, pc
;
425 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
427 tb
= tb_lookup(cpu
, pc
, cs_base
, flags
, cflags
);
430 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
432 /* We add the TB in the virtual pc hash table for the fast lookup */
433 qatomic_set(&cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)], tb
);
435 #ifndef CONFIG_USER_ONLY
436 /* We don't take care of direct jumps when address mapping changes in
437 * system emulation. So it's not safe to make a direct jump to a TB
438 * spanning two pages because the mapping for the second page can change.
440 if (tb
->page_addr
[1] != -1) {
444 /* See if we can patch the calling TB. */
446 tb_add_jump(last_tb
, tb_exit
, tb
);
451 static inline bool cpu_handle_halt(CPUState
*cpu
)
454 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
455 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
456 X86CPU
*x86_cpu
= X86_CPU(cpu
);
457 qemu_mutex_lock_iothread();
458 apic_poll_irq(x86_cpu
->apic_state
);
459 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
460 qemu_mutex_unlock_iothread();
463 if (!cpu_has_work(cpu
)) {
473 static inline void cpu_handle_debug_exception(CPUState
*cpu
)
475 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
478 if (!cpu
->watchpoint_hit
) {
479 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
480 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
484 if (cc
->tcg_ops
->debug_excp_handler
) {
485 cc
->tcg_ops
->debug_excp_handler(cpu
);
489 static inline bool cpu_handle_exception(CPUState
*cpu
, int *ret
)
491 if (cpu
->exception_index
< 0) {
492 #ifndef CONFIG_USER_ONLY
493 if (replay_has_exception()
494 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0) {
495 /* Execute just one insn to trigger exception pending in the log */
496 cpu
->cflags_next_tb
= (curr_cflags(cpu
) & ~CF_USE_ICOUNT
) | 1;
501 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
502 /* exit request from the cpu execution loop */
503 *ret
= cpu
->exception_index
;
504 if (*ret
== EXCP_DEBUG
) {
505 cpu_handle_debug_exception(cpu
);
507 cpu
->exception_index
= -1;
510 #if defined(CONFIG_USER_ONLY)
511 /* if user mode only, we simulate a fake exception
512 which will be handled outside the cpu execution
514 #if defined(TARGET_I386)
515 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
516 cc
->tcg_ops
->do_interrupt(cpu
);
518 *ret
= cpu
->exception_index
;
519 cpu
->exception_index
= -1;
522 if (replay_exception()) {
523 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
524 qemu_mutex_lock_iothread();
525 cc
->tcg_ops
->do_interrupt(cpu
);
526 qemu_mutex_unlock_iothread();
527 cpu
->exception_index
= -1;
529 if (unlikely(cpu
->singlestep_enabled
)) {
531 * After processing the exception, ensure an EXCP_DEBUG is
532 * raised when single-stepping so that GDB doesn't miss the
536 cpu_handle_debug_exception(cpu
);
539 } else if (!replay_has_interrupt()) {
540 /* give a chance to iothread in replay mode */
541 *ret
= EXCP_INTERRUPT
;
551 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
552 * "real" interrupt event later. It does not need to be recorded for
555 static inline bool need_replay_interrupt(int interrupt_request
)
557 #if defined(TARGET_I386)
558 return !(interrupt_request
& CPU_INTERRUPT_POLL
);
564 static inline bool cpu_handle_interrupt(CPUState
*cpu
,
565 TranslationBlock
**last_tb
)
567 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
569 /* Clear the interrupt flag now since we're processing
570 * cpu->interrupt_request and cpu->exit_request.
571 * Ensure zeroing happens before reading cpu->exit_request or
572 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
574 qatomic_mb_set(&cpu_neg(cpu
)->icount_decr
.u16
.high
, 0);
576 if (unlikely(qatomic_read(&cpu
->interrupt_request
))) {
577 int interrupt_request
;
578 qemu_mutex_lock_iothread();
579 interrupt_request
= cpu
->interrupt_request
;
580 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
581 /* Mask out external interrupts for this step. */
582 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
584 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
585 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
586 cpu
->exception_index
= EXCP_DEBUG
;
587 qemu_mutex_unlock_iothread();
590 if (replay_mode
== REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
592 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
594 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
596 cpu
->exception_index
= EXCP_HLT
;
597 qemu_mutex_unlock_iothread();
600 #if defined(TARGET_I386)
601 else if (interrupt_request
& CPU_INTERRUPT_INIT
) {
602 X86CPU
*x86_cpu
= X86_CPU(cpu
);
603 CPUArchState
*env
= &x86_cpu
->env
;
605 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0, 0);
606 do_cpu_init(x86_cpu
);
607 cpu
->exception_index
= EXCP_HALTED
;
608 qemu_mutex_unlock_iothread();
612 else if (interrupt_request
& CPU_INTERRUPT_RESET
) {
615 qemu_mutex_unlock_iothread();
619 /* The target hook has 3 exit conditions:
620 False when the interrupt isn't processed,
621 True when it is, and we should restart on a new TB,
622 and via longjmp via cpu_loop_exit. */
624 if (cc
->tcg_ops
->cpu_exec_interrupt
&&
625 cc
->tcg_ops
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
626 if (need_replay_interrupt(interrupt_request
)) {
630 * After processing the interrupt, ensure an EXCP_DEBUG is
631 * raised when single-stepping so that GDB doesn't miss the
634 cpu
->exception_index
=
635 (cpu
->singlestep_enabled
? EXCP_DEBUG
: -1);
638 /* The target hook may have updated the 'cpu->interrupt_request';
639 * reload the 'interrupt_request' value */
640 interrupt_request
= cpu
->interrupt_request
;
642 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
643 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
644 /* ensure that no TB jump will be modified as
645 the program flow was changed */
649 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
650 qemu_mutex_unlock_iothread();
653 /* Finally, check if we need to exit to the main loop. */
654 if (unlikely(qatomic_read(&cpu
->exit_request
))
656 && (cpu
->cflags_next_tb
== -1 || cpu
->cflags_next_tb
& CF_USE_ICOUNT
)
657 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0)) {
658 qatomic_set(&cpu
->exit_request
, 0);
659 if (cpu
->exception_index
== -1) {
660 cpu
->exception_index
= EXCP_INTERRUPT
;
668 static inline void cpu_loop_exec_tb(CPUState
*cpu
, TranslationBlock
*tb
,
669 TranslationBlock
**last_tb
, int *tb_exit
)
673 trace_exec_tb(tb
, tb
->pc
);
674 tb
= cpu_tb_exec(cpu
, tb
, tb_exit
);
675 if (*tb_exit
!= TB_EXIT_REQUESTED
) {
681 insns_left
= qatomic_read(&cpu_neg(cpu
)->icount_decr
.u32
);
682 if (insns_left
< 0) {
683 /* Something asked us to stop executing chained TBs; just
684 * continue round the main loop. Whatever requested the exit
685 * will also have set something else (eg exit_request or
686 * interrupt_request) which will be handled by
687 * cpu_handle_interrupt. cpu_handle_interrupt will also
688 * clear cpu->icount_decr.u16.high.
693 /* Instruction counter expired. */
694 assert(icount_enabled());
695 #ifndef CONFIG_USER_ONLY
696 /* Ensure global icount has gone forward */
698 /* Refill decrementer and continue execution. */
699 insns_left
= MIN(CF_COUNT_MASK
, cpu
->icount_budget
);
700 cpu_neg(cpu
)->icount_decr
.u16
.low
= insns_left
;
701 cpu
->icount_extra
= cpu
->icount_budget
- insns_left
;
704 * If the next tb has more instructions than we have left to
705 * execute we need to ensure we find/generate a TB with exactly
706 * insns_left instructions in it.
708 if (!cpu
->icount_extra
&& insns_left
> 0 && insns_left
< tb
->icount
) {
709 cpu
->cflags_next_tb
= (tb
->cflags
& ~CF_COUNT_MASK
) | insns_left
;
714 /* main execution loop */
716 int cpu_exec(CPUState
*cpu
)
718 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
720 SyncClocks sc
= { 0 };
722 /* replay_interrupt may need current_cpu */
725 if (cpu_handle_halt(cpu
)) {
733 /* Calculate difference between guest clock and host clock.
734 * This delay includes the delay of the last cycle, so
735 * what we have to do is sleep until it is 0. As for the
736 * advance/delay we gain here, we try to fix it next time.
738 init_delay_params(&sc
, cpu
);
740 /* prepare setjmp context for exception handling */
741 if (sigsetjmp(cpu
->jmp_env
, 0) != 0) {
742 #if defined(__clang__)
744 * Some compilers wrongly smash all local variables after
745 * siglongjmp (the spec requires that only non-volatile locals
746 * which are changed between the sigsetjmp and siglongjmp are
747 * permitted to be trashed). There were bug reports for gcc
748 * 4.5.0 and clang. The bug is fixed in all versions of gcc
749 * that we support, but is still unfixed in clang:
750 * https://bugs.llvm.org/show_bug.cgi?id=21183
752 * Reload essential local variables here for those compilers.
753 * Newer versions of gcc would complain about this code (-Wclobbered),
754 * so we only perform the workaround for clang.
757 cc
= CPU_GET_CLASS(cpu
);
760 * Non-buggy compilers preserve these locals; assert that
761 * they have the correct value.
763 g_assert(cpu
== current_cpu
);
764 g_assert(cc
== CPU_GET_CLASS(cpu
));
767 #ifndef CONFIG_SOFTMMU
768 tcg_debug_assert(!have_mmap_lock());
770 if (qemu_mutex_iothread_locked()) {
771 qemu_mutex_unlock_iothread();
773 qemu_plugin_disable_mem_helpers(cpu
);
775 assert_no_pages_locked();
778 /* if an exception is pending, we execute it here */
779 while (!cpu_handle_exception(cpu
, &ret
)) {
780 TranslationBlock
*last_tb
= NULL
;
783 while (!cpu_handle_interrupt(cpu
, &last_tb
)) {
784 uint32_t cflags
= cpu
->cflags_next_tb
;
785 TranslationBlock
*tb
;
787 /* When requested, use an exact setting for cflags for the next
788 execution. This is used for icount, precise smc, and stop-
789 after-access watchpoints. Since this request should never
790 have CF_INVALID set, -1 is a convenient invalid value that
791 does not require tcg headers for cpu_common_reset. */
793 cflags
= curr_cflags(cpu
);
795 cpu
->cflags_next_tb
= -1;
798 tb
= tb_find(cpu
, last_tb
, tb_exit
, cflags
);
799 cpu_loop_exec_tb(cpu
, tb
, &last_tb
, &tb_exit
);
800 /* Try to align the host and virtual clocks
801 if the guest is in advance */
802 align_clocks(&sc
, cpu
);
812 void tcg_exec_realizefn(CPUState
*cpu
, Error
**errp
)
814 static bool tcg_target_initialized
;
815 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
817 if (!tcg_target_initialized
) {
818 cc
->tcg_ops
->initialize();
819 tcg_target_initialized
= true;
822 qemu_plugin_vcpu_init_hook(cpu
);
824 #ifndef CONFIG_USER_ONLY
825 tcg_iommu_init_notifier_list(cpu
);
826 #endif /* !CONFIG_USER_ONLY */
829 /* undo the initializations in reverse order */
830 void tcg_exec_unrealizefn(CPUState
*cpu
)
832 #ifndef CONFIG_USER_ONLY
833 tcg_iommu_free_notifier_list(cpu
);
834 #endif /* !CONFIG_USER_ONLY */
836 qemu_plugin_vcpu_exit_hook(cpu
);
840 #ifndef CONFIG_USER_ONLY
842 void dump_drift_info(void)
844 if (!icount_enabled()) {
848 qemu_printf("Host - Guest clock %"PRIi64
" ms\n",
849 (cpu_get_clock() - icount_get()) / SCALE_MS
);
850 if (icount_align_option
) {
851 qemu_printf("Max guest delay %"PRIi64
" ms\n",
852 -max_delay
/ SCALE_MS
);
853 qemu_printf("Max guest advance %"PRIi64
" ms\n",
854 max_advance
/ SCALE_MS
);
856 qemu_printf("Max guest delay NA\n");
857 qemu_printf("Max guest advance NA\n");
861 #endif /* !CONFIG_USER_ONLY */