2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
27 #include "exec/address-spaces.h"
28 #include "exec/memory-internal.h"
30 #include "exec/tb-hash.h"
33 /* On w64, sigsetjmp is implemented by _setjmp which needs a second parameter.
34 * If this parameter is NULL, longjump does no stack unwinding.
35 * That is what we need for QEMU. Passing the value of register rsp (default)
36 * lets longjmp try a stack unwinding which will crash with generated code. */
37 #define sigsetjmp(env, savesigs) _setjmp(env, NULL)
40 /* -icount align implementation. */
42 typedef struct SyncClocks
{
44 int64_t last_cpu_icount
;
45 int64_t realtime_clock
;
48 #if !defined(CONFIG_USER_ONLY)
49 /* Allow the guest to have a max 3ms advance.
50 * The difference between the 2 clocks could therefore
53 #define VM_CLOCK_ADVANCE 3000000
54 #define THRESHOLD_REDUCE 1.5
55 #define MAX_DELAY_PRINT_RATE 2000000000LL
56 #define MAX_NB_PRINTS 100
58 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
62 if (!icount_align_option
) {
66 cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
67 sc
->diff_clk
+= cpu_icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
68 sc
->last_cpu_icount
= cpu_icount
;
70 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
72 struct timespec sleep_delay
, rem_delay
;
73 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
74 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
75 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
76 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
81 Sleep(sc
->diff_clk
/ SCALE_MS
);
87 static void print_delay(const SyncClocks
*sc
)
89 static float threshold_delay
;
90 static int64_t last_realtime_clock
;
93 if (icount_align_option
&&
94 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
95 nb_prints
< MAX_NB_PRINTS
) {
96 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
97 (-sc
->diff_clk
/ (float)1000000000LL <
98 (threshold_delay
- THRESHOLD_REDUCE
))) {
99 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
100 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
104 last_realtime_clock
= sc
->realtime_clock
;
109 static void init_delay_params(SyncClocks
*sc
,
112 if (!icount_align_option
) {
115 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
116 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
117 sc
->last_cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
118 if (sc
->diff_clk
< max_delay
) {
119 max_delay
= sc
->diff_clk
;
121 if (sc
->diff_clk
> max_advance
) {
122 max_advance
= sc
->diff_clk
;
125 /* Print every 2s max if the guest is late. We limit the number
126 of printed messages to NB_PRINT_MAX(currently 100) */
130 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
134 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
137 #endif /* CONFIG USER ONLY */
139 void QEMU_NORETURN
cpu_loop_exit(CPUState
*cpu
)
141 cpu
->current_tb
= NULL
;
142 siglongjmp(cpu
->jmp_env
, 1);
145 /* exit the current TB from a signal handler. The host registers are
146 restored in a state compatible with the CPU emulator
148 #if defined(CONFIG_SOFTMMU)
149 void QEMU_NORETURN
cpu_resume_from_signal(CPUState
*cpu
, void *puc
)
151 /* XXX: restore cpu registers saved in host registers */
153 cpu
->exception_index
= -1;
154 siglongjmp(cpu
->jmp_env
, 1);
157 void cpu_reload_memory_map(CPUState
*cpu
)
159 AddressSpaceDispatch
*d
;
161 if (qemu_in_vcpu_thread()) {
162 /* Do not let the guest prolong the critical section as much as it
165 * Currently, this is prevented by the I/O thread's periodinc kicking
166 * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
167 * but this will go away once TCG's execution moves out of the global
170 * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
171 * only protects cpu->as->dispatch. Since we reload it below, we can
172 * split the critical section.
178 /* The CPU and TLB are protected by the iothread lock. */
179 d
= atomic_rcu_read(&cpu
->as
->dispatch
);
180 cpu
->memory_dispatch
= d
;
185 /* Execute a TB, and fix up the CPU state afterwards if necessary */
186 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
188 CPUArchState
*env
= cpu
->env_ptr
;
191 #if defined(DEBUG_DISAS)
192 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
193 #if defined(TARGET_I386)
194 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
195 #elif defined(TARGET_M68K)
196 /* ??? Should not modify env state for dumping. */
197 cpu_m68k_flush_flags(env
, env
->cc_op
);
198 env
->cc_op
= CC_OP_FLAGS
;
199 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
200 log_cpu_state(cpu
, 0);
202 log_cpu_state(cpu
, 0);
205 #endif /* DEBUG_DISAS */
208 next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
210 trace_exec_tb_exit((void *) (next_tb
& ~TB_EXIT_MASK
),
211 next_tb
& TB_EXIT_MASK
);
213 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
214 /* We didn't start executing this TB (eg because the instruction
215 * counter hit zero); we must restore the guest PC to the address
216 * of the start of the TB.
218 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
219 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
220 if (cc
->synchronize_from_tb
) {
221 cc
->synchronize_from_tb(cpu
, tb
);
224 cc
->set_pc(cpu
, tb
->pc
);
227 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
228 /* We were asked to stop executing TBs (probably a pending
229 * interrupt. We've now stopped, so clear the flag.
231 cpu
->tcg_exit_req
= 0;
236 /* Execute the code without caching the generated code. An interpreter
237 could be used if available. */
238 static void cpu_exec_nocache(CPUState
*cpu
, int max_cycles
,
239 TranslationBlock
*orig_tb
)
241 TranslationBlock
*tb
;
242 target_ulong pc
= orig_tb
->pc
;
243 target_ulong cs_base
= orig_tb
->cs_base
;
244 uint64_t flags
= orig_tb
->flags
;
246 /* Should never happen.
247 We only end up here when an existing TB is too long. */
248 if (max_cycles
> CF_COUNT_MASK
)
249 max_cycles
= CF_COUNT_MASK
;
251 /* tb_gen_code can flush our orig_tb, invalidate it now */
252 tb_phys_invalidate(orig_tb
, -1);
253 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
,
254 max_cycles
| CF_NOCACHE
);
255 cpu
->current_tb
= tb
;
256 /* execute the generated code */
257 trace_exec_tb_nocache(tb
, tb
->pc
);
258 cpu_tb_exec(cpu
, tb
->tc_ptr
);
259 cpu
->current_tb
= NULL
;
260 tb_phys_invalidate(tb
, -1);
264 static TranslationBlock
*tb_find_slow(CPUState
*cpu
,
266 target_ulong cs_base
,
269 CPUArchState
*env
= (CPUArchState
*)cpu
->env_ptr
;
270 TranslationBlock
*tb
, **ptb1
;
272 tb_page_addr_t phys_pc
, phys_page1
;
273 target_ulong virt_page2
;
275 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
277 /* find translated block using physical mappings */
278 phys_pc
= get_page_addr_code(env
, pc
);
279 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
280 h
= tb_phys_hash_func(phys_pc
);
281 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
287 tb
->page_addr
[0] == phys_page1
&&
288 tb
->cs_base
== cs_base
&&
289 tb
->flags
== flags
) {
290 /* check next page if needed */
291 if (tb
->page_addr
[1] != -1) {
292 tb_page_addr_t phys_page2
;
294 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
296 phys_page2
= get_page_addr_code(env
, virt_page2
);
297 if (tb
->page_addr
[1] == phys_page2
)
303 ptb1
= &tb
->phys_hash_next
;
306 /* if no translated code available, then translate it now */
307 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, 0);
310 /* Move the last found TB to the head of the list */
312 *ptb1
= tb
->phys_hash_next
;
313 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
314 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
316 /* we add the TB in the virtual pc hash table */
317 cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
321 static inline TranslationBlock
*tb_find_fast(CPUState
*cpu
)
323 CPUArchState
*env
= (CPUArchState
*)cpu
->env_ptr
;
324 TranslationBlock
*tb
;
325 target_ulong cs_base
, pc
;
328 /* we record a subset of the CPU state. It will
329 always be the same before a given translated block
331 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
332 tb
= cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
333 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
334 tb
->flags
!= flags
)) {
335 tb
= tb_find_slow(cpu
, pc
, cs_base
, flags
);
340 static void cpu_handle_debug_exception(CPUState
*cpu
)
342 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
345 if (!cpu
->watchpoint_hit
) {
346 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
347 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
351 cc
->debug_excp_handler(cpu
);
354 /* main execution loop */
356 volatile sig_atomic_t exit_request
;
358 int cpu_exec(CPUState
*cpu
)
360 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
362 X86CPU
*x86_cpu
= X86_CPU(cpu
);
363 CPUArchState
*env
= &x86_cpu
->env
;
365 int ret
, interrupt_request
;
366 TranslationBlock
*tb
;
371 /* This must be volatile so it is not trashed by longjmp() */
372 volatile bool have_tb_lock
= false;
375 if (!cpu_has_work(cpu
)) {
384 /* As long as current_cpu is null, up to the assignment just above,
385 * requests by other threads to exit the execution loop are expected to
386 * be issued using the exit_request global. We must make sure that our
387 * evaluation of the global value is performed past the current_cpu
388 * value transition point, which requires a memory barrier as well as
389 * an instruction scheduling constraint on modern architectures. */
394 if (unlikely(exit_request
)) {
395 cpu
->exit_request
= 1;
398 cc
->cpu_exec_enter(cpu
);
400 /* Calculate difference between guest clock and host clock.
401 * This delay includes the delay of the last cycle, so
402 * what we have to do is sleep until it is 0. As for the
403 * advance/delay we gain here, we try to fix it next time.
405 init_delay_params(&sc
, cpu
);
407 /* prepare setjmp context for exception handling */
409 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
410 /* if an exception is pending, we execute it here */
411 if (cpu
->exception_index
>= 0) {
412 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
413 /* exit request from the cpu execution loop */
414 ret
= cpu
->exception_index
;
415 if (ret
== EXCP_DEBUG
) {
416 cpu_handle_debug_exception(cpu
);
418 cpu
->exception_index
= -1;
421 #if defined(CONFIG_USER_ONLY)
422 /* if user mode only, we simulate a fake exception
423 which will be handled outside the cpu execution
425 #if defined(TARGET_I386)
426 cc
->do_interrupt(cpu
);
428 ret
= cpu
->exception_index
;
429 cpu
->exception_index
= -1;
432 cc
->do_interrupt(cpu
);
433 cpu
->exception_index
= -1;
438 next_tb
= 0; /* force lookup of first TB */
440 interrupt_request
= cpu
->interrupt_request
;
441 if (unlikely(interrupt_request
)) {
442 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
443 /* Mask out external interrupts for this step. */
444 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
446 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
447 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
448 cpu
->exception_index
= EXCP_DEBUG
;
451 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
452 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
454 cpu
->exception_index
= EXCP_HLT
;
457 #if defined(TARGET_I386)
458 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
459 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0);
460 do_cpu_init(x86_cpu
);
461 cpu
->exception_index
= EXCP_HALTED
;
465 if (interrupt_request
& CPU_INTERRUPT_RESET
) {
469 /* The target hook has 3 exit conditions:
470 False when the interrupt isn't processed,
471 True when it is, and we should restart on a new TB,
472 and via longjmp via cpu_loop_exit. */
473 if (cc
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
476 /* Don't use the cached interrupt_request value,
477 do_interrupt may have updated the EXITTB flag. */
478 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
479 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
480 /* ensure that no TB jump will be modified as
481 the program flow was changed */
485 if (unlikely(cpu
->exit_request
)) {
486 cpu
->exit_request
= 0;
487 cpu
->exception_index
= EXCP_INTERRUPT
;
490 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
492 tb
= tb_find_fast(cpu
);
493 /* Note: we do it here to avoid a gcc bug on Mac OS X when
494 doing it in tb_find_slow */
495 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
496 /* as some TB could have been invalidated because
497 of memory exceptions while generating the code, we
498 must recompute the hash index here */
500 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
502 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
503 qemu_log("Trace %p [" TARGET_FMT_lx
"] %s\n",
504 tb
->tc_ptr
, tb
->pc
, lookup_symbol(tb
->pc
));
506 /* see if we can patch the calling TB. When the TB
507 spans two pages, we cannot safely do a direct
509 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
510 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
511 next_tb
& TB_EXIT_MASK
, tb
);
513 have_tb_lock
= false;
514 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
516 /* cpu_interrupt might be called while translating the
517 TB, but before it is linked into a potentially
518 infinite loop and becomes env->current_tb. Avoid
519 starting execution if there is a pending interrupt. */
520 cpu
->current_tb
= tb
;
522 if (likely(!cpu
->exit_request
)) {
523 trace_exec_tb(tb
, tb
->pc
);
525 /* execute the generated code */
526 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
527 switch (next_tb
& TB_EXIT_MASK
) {
528 case TB_EXIT_REQUESTED
:
529 /* Something asked us to stop executing
530 * chained TBs; just continue round the main
531 * loop. Whatever requested the exit will also
532 * have set something else (eg exit_request or
533 * interrupt_request) which we will handle
534 * next time around the loop.
538 case TB_EXIT_ICOUNT_EXPIRED
:
540 /* Instruction counter expired. */
541 int insns_left
= cpu
->icount_decr
.u32
;
542 if (cpu
->icount_extra
&& insns_left
>= 0) {
543 /* Refill decrementer and continue execution. */
544 cpu
->icount_extra
+= insns_left
;
545 insns_left
= MIN(0xffff, cpu
->icount_extra
);
546 cpu
->icount_extra
-= insns_left
;
547 cpu
->icount_decr
.u16
.low
= insns_left
;
549 if (insns_left
> 0) {
550 /* Execute remaining instructions. */
551 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
552 cpu_exec_nocache(cpu
, insns_left
, tb
);
553 align_clocks(&sc
, cpu
);
555 cpu
->exception_index
= EXCP_INTERRUPT
;
565 cpu
->current_tb
= NULL
;
566 /* Try to align the host and virtual clocks
567 if the guest is in advance */
568 align_clocks(&sc
, cpu
);
569 /* reset soft MMU for next block (it can currently
570 only be set by a memory fault) */
573 /* Reload env after longjmp - the compiler may have smashed all
574 * local variables as longjmp is marked 'noreturn'. */
575 /* TODO: Fix comment in 2014 - then it was wrong. */
576 g_assert(cpu
== current_cpu
);
577 g_assert(cc
== CPU_GET_CLASS(cpu
));
580 g_assert(x86_cpu
== X86_CPU(cpu
));
581 g_assert(env
== cpu
->env_ptr
);
584 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
585 have_tb_lock
= false;
590 cc
->cpu_exec_exit(cpu
);
593 /* fail safe : never use current_cpu outside cpu_exec() */