2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "disas/disas.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
27 #include "exec/address-spaces.h"
29 #include "exec/tb-hash.h"
31 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
32 #include "hw/i386/apic.h"
34 #include "sysemu/replay.h"
36 /* -icount align implementation. */
38 typedef struct SyncClocks
{
40 int64_t last_cpu_icount
;
41 int64_t realtime_clock
;
44 #if !defined(CONFIG_USER_ONLY)
45 /* Allow the guest to have a max 3ms advance.
46 * The difference between the 2 clocks could therefore
49 #define VM_CLOCK_ADVANCE 3000000
50 #define THRESHOLD_REDUCE 1.5
51 #define MAX_DELAY_PRINT_RATE 2000000000LL
52 #define MAX_NB_PRINTS 100
54 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
58 if (!icount_align_option
) {
62 cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
63 sc
->diff_clk
+= cpu_icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
64 sc
->last_cpu_icount
= cpu_icount
;
66 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
68 struct timespec sleep_delay
, rem_delay
;
69 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
70 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
71 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
72 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
77 Sleep(sc
->diff_clk
/ SCALE_MS
);
83 static void print_delay(const SyncClocks
*sc
)
85 static float threshold_delay
;
86 static int64_t last_realtime_clock
;
89 if (icount_align_option
&&
90 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
91 nb_prints
< MAX_NB_PRINTS
) {
92 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
93 (-sc
->diff_clk
/ (float)1000000000LL <
94 (threshold_delay
- THRESHOLD_REDUCE
))) {
95 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
96 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
100 last_realtime_clock
= sc
->realtime_clock
;
105 static void init_delay_params(SyncClocks
*sc
,
108 if (!icount_align_option
) {
111 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
112 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
113 sc
->last_cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
114 if (sc
->diff_clk
< max_delay
) {
115 max_delay
= sc
->diff_clk
;
117 if (sc
->diff_clk
> max_advance
) {
118 max_advance
= sc
->diff_clk
;
121 /* Print every 2s max if the guest is late. We limit the number
122 of printed messages to NB_PRINT_MAX(currently 100) */
126 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
130 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
133 #endif /* CONFIG USER ONLY */
135 /* Execute a TB, and fix up the CPU state afterwards if necessary */
136 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, TranslationBlock
*itb
)
138 CPUArchState
*env
= cpu
->env_ptr
;
140 TranslationBlock
*last_tb
;
142 uint8_t *tb_ptr
= itb
->tc_ptr
;
144 qemu_log_mask_and_addr(CPU_LOG_EXEC
, itb
->pc
,
145 "Trace %p [" TARGET_FMT_lx
"] %s\n",
146 itb
->tc_ptr
, itb
->pc
, lookup_symbol(itb
->pc
));
148 #if defined(DEBUG_DISAS)
149 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
150 #if defined(TARGET_I386)
151 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
152 #elif defined(TARGET_M68K)
153 /* ??? Should not modify env state for dumping. */
154 cpu_m68k_flush_flags(env
, env
->cc_op
);
155 env
->cc_op
= CC_OP_FLAGS
;
156 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
157 log_cpu_state(cpu
, 0);
159 log_cpu_state(cpu
, 0);
162 #endif /* DEBUG_DISAS */
164 cpu
->can_do_io
= !use_icount
;
165 ret
= tcg_qemu_tb_exec(env
, tb_ptr
);
167 last_tb
= (TranslationBlock
*)(ret
& ~TB_EXIT_MASK
);
168 tb_exit
= ret
& TB_EXIT_MASK
;
169 trace_exec_tb_exit(last_tb
, tb_exit
);
171 if (tb_exit
> TB_EXIT_IDX1
) {
172 /* We didn't start executing this TB (eg because the instruction
173 * counter hit zero); we must restore the guest PC to the address
174 * of the start of the TB.
176 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
177 qemu_log_mask_and_addr(CPU_LOG_EXEC
, last_tb
->pc
,
178 "Stopped execution of TB chain before %p ["
179 TARGET_FMT_lx
"] %s\n",
180 last_tb
->tc_ptr
, last_tb
->pc
,
181 lookup_symbol(last_tb
->pc
));
182 if (cc
->synchronize_from_tb
) {
183 cc
->synchronize_from_tb(cpu
, last_tb
);
186 cc
->set_pc(cpu
, last_tb
->pc
);
189 if (tb_exit
== TB_EXIT_REQUESTED
) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
193 cpu
->tcg_exit_req
= 0;
198 #ifndef CONFIG_USER_ONLY
199 /* Execute the code without caching the generated code. An interpreter
200 could be used if available. */
201 static void cpu_exec_nocache(CPUState
*cpu
, int max_cycles
,
202 TranslationBlock
*orig_tb
, bool ignore_icount
)
204 TranslationBlock
*tb
;
207 /* Should never happen.
208 We only end up here when an existing TB is too long. */
209 if (max_cycles
> CF_COUNT_MASK
)
210 max_cycles
= CF_COUNT_MASK
;
212 old_tb_flushed
= cpu
->tb_flushed
;
213 cpu
->tb_flushed
= false;
214 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
215 max_cycles
| CF_NOCACHE
216 | (ignore_icount
? CF_IGNORE_ICOUNT
: 0));
217 tb
->orig_tb
= cpu
->tb_flushed
? NULL
: orig_tb
;
218 cpu
->tb_flushed
|= old_tb_flushed
;
219 cpu
->current_tb
= tb
;
220 /* execute the generated code */
221 trace_exec_tb_nocache(tb
, tb
->pc
);
222 cpu_tb_exec(cpu
, tb
);
223 cpu
->current_tb
= NULL
;
224 tb_phys_invalidate(tb
, -1);
229 static TranslationBlock
*tb_find_physical(CPUState
*cpu
,
231 target_ulong cs_base
,
234 CPUArchState
*env
= (CPUArchState
*)cpu
->env_ptr
;
235 TranslationBlock
*tb
, **tb_hash_head
, **ptb1
;
237 tb_page_addr_t phys_pc
, phys_page1
;
239 /* find translated block using physical mappings */
240 phys_pc
= get_page_addr_code(env
, pc
);
241 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
242 h
= tb_phys_hash_func(phys_pc
);
244 /* Start at head of the hash entry */
245 ptb1
= tb_hash_head
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
250 tb
->page_addr
[0] == phys_page1
&&
251 tb
->cs_base
== cs_base
&&
252 tb
->flags
== flags
) {
254 if (tb
->page_addr
[1] == -1) {
255 /* done, we have a match */
258 /* check next page if needed */
259 target_ulong virt_page2
= (pc
& TARGET_PAGE_MASK
) +
261 tb_page_addr_t phys_page2
= get_page_addr_code(env
, virt_page2
);
263 if (tb
->page_addr
[1] == phys_page2
) {
269 ptb1
= &tb
->phys_hash_next
;
274 /* Move the TB to the head of the list */
275 *ptb1
= tb
->phys_hash_next
;
276 tb
->phys_hash_next
= *tb_hash_head
;
282 static TranslationBlock
*tb_find_slow(CPUState
*cpu
,
284 target_ulong cs_base
,
287 TranslationBlock
*tb
;
289 tb
= tb_find_physical(cpu
, pc
, cs_base
, flags
);
294 #ifdef CONFIG_USER_ONLY
295 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
296 * taken outside tb_lock. Since we're momentarily dropping
297 * tb_lock, there's a chance that our desired tb has been
303 tb
= tb_find_physical(cpu
, pc
, cs_base
, flags
);
310 /* if no translated code available, then translate it now */
311 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, 0);
313 #ifdef CONFIG_USER_ONLY
318 /* we add the TB in the virtual pc hash table */
319 cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
323 static inline TranslationBlock
*tb_find_fast(CPUState
*cpu
)
325 CPUArchState
*env
= (CPUArchState
*)cpu
->env_ptr
;
326 TranslationBlock
*tb
;
327 target_ulong cs_base
, pc
;
330 /* we record a subset of the CPU state. It will
331 always be the same before a given translated block
333 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
334 tb
= cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
335 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
336 tb
->flags
!= flags
)) {
337 tb
= tb_find_slow(cpu
, pc
, cs_base
, flags
);
342 static void cpu_handle_debug_exception(CPUState
*cpu
)
344 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
347 if (!cpu
->watchpoint_hit
) {
348 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
349 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
353 cc
->debug_excp_handler(cpu
);
356 /* main execution loop */
358 int cpu_exec(CPUState
*cpu
)
360 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
362 X86CPU
*x86_cpu
= X86_CPU(cpu
);
363 CPUArchState
*env
= &x86_cpu
->env
;
365 int ret
, interrupt_request
;
366 TranslationBlock
*tb
, *last_tb
;
370 /* replay_interrupt may need current_cpu */
374 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
375 if ((cpu
->interrupt_request
& CPU_INTERRUPT_POLL
)
376 && replay_interrupt()) {
377 apic_poll_irq(x86_cpu
->apic_state
);
378 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
381 if (!cpu_has_work(cpu
)) {
389 atomic_mb_set(&tcg_current_cpu
, cpu
);
392 if (unlikely(atomic_mb_read(&exit_request
))) {
393 cpu
->exit_request
= 1;
396 cc
->cpu_exec_enter(cpu
);
398 /* Calculate difference between guest clock and host clock.
399 * This delay includes the delay of the last cycle, so
400 * what we have to do is sleep until it is 0. As for the
401 * advance/delay we gain here, we try to fix it next time.
403 init_delay_params(&sc
, cpu
);
405 /* prepare setjmp context for exception handling */
407 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
408 /* if an exception is pending, we execute it here */
409 if (cpu
->exception_index
>= 0) {
410 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
411 /* exit request from the cpu execution loop */
412 ret
= cpu
->exception_index
;
413 if (ret
== EXCP_DEBUG
) {
414 cpu_handle_debug_exception(cpu
);
416 cpu
->exception_index
= -1;
419 #if defined(CONFIG_USER_ONLY)
420 /* if user mode only, we simulate a fake exception
421 which will be handled outside the cpu execution
423 #if defined(TARGET_I386)
424 cc
->do_interrupt(cpu
);
426 ret
= cpu
->exception_index
;
427 cpu
->exception_index
= -1;
430 if (replay_exception()) {
431 cc
->do_interrupt(cpu
);
432 cpu
->exception_index
= -1;
433 } else if (!replay_has_interrupt()) {
434 /* give a chance to iothread in replay mode */
435 ret
= EXCP_INTERRUPT
;
440 #ifndef CONFIG_USER_ONLY
441 } else if (replay_has_exception()
442 && cpu
->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0) {
443 /* try to cause an exception pending in the log */
444 cpu_exec_nocache(cpu
, 1, tb_find_fast(cpu
), true);
450 last_tb
= NULL
; /* forget the last executed TB after exception */
451 cpu
->tb_flushed
= false; /* reset before first TB lookup */
453 interrupt_request
= cpu
->interrupt_request
;
454 if (unlikely(interrupt_request
)) {
455 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
456 /* Mask out external interrupts for this step. */
457 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
459 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
460 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
461 cpu
->exception_index
= EXCP_DEBUG
;
464 if (replay_mode
== REPLAY_MODE_PLAY
465 && !replay_has_interrupt()) {
467 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
469 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
471 cpu
->exception_index
= EXCP_HLT
;
474 #if defined(TARGET_I386)
475 else if (interrupt_request
& CPU_INTERRUPT_INIT
) {
477 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0);
478 do_cpu_init(x86_cpu
);
479 cpu
->exception_index
= EXCP_HALTED
;
483 else if (interrupt_request
& CPU_INTERRUPT_RESET
) {
489 /* The target hook has 3 exit conditions:
490 False when the interrupt isn't processed,
491 True when it is, and we should restart on a new TB,
492 and via longjmp via cpu_loop_exit. */
495 if (cc
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
499 /* Don't use the cached interrupt_request value,
500 do_interrupt may have updated the EXITTB flag. */
501 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
502 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
503 /* ensure that no TB jump will be modified as
504 the program flow was changed */
508 if (unlikely(cpu
->exit_request
509 || replay_has_interrupt())) {
510 cpu
->exit_request
= 0;
511 cpu
->exception_index
= EXCP_INTERRUPT
;
515 tb
= tb_find_fast(cpu
);
516 if (cpu
->tb_flushed
) {
517 /* Ensure that no TB jump will be modified as the
518 * translation buffer has been flushed.
521 cpu
->tb_flushed
= false;
523 /* See if we can patch the calling TB. */
524 if (last_tb
&& !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
525 tb_add_jump(last_tb
, tb_exit
, tb
);
528 if (likely(!cpu
->exit_request
)) {
530 trace_exec_tb(tb
, tb
->pc
);
531 /* execute the generated code */
532 cpu
->current_tb
= tb
;
533 ret
= cpu_tb_exec(cpu
, tb
);
534 cpu
->current_tb
= NULL
;
535 last_tb
= (TranslationBlock
*)(ret
& ~TB_EXIT_MASK
);
536 tb_exit
= ret
& TB_EXIT_MASK
;
538 case TB_EXIT_REQUESTED
:
539 /* Something asked us to stop executing
540 * chained TBs; just continue round the main
541 * loop. Whatever requested the exit will also
542 * have set something else (eg exit_request or
543 * interrupt_request) which we will handle
544 * next time around the loop. But we need to
545 * ensure the tcg_exit_req read in generated code
546 * comes before the next read of cpu->exit_request
547 * or cpu->interrupt_request.
552 case TB_EXIT_ICOUNT_EXPIRED
:
554 /* Instruction counter expired. */
555 #ifdef CONFIG_USER_ONLY
558 int insns_left
= cpu
->icount_decr
.u32
;
559 if (cpu
->icount_extra
&& insns_left
>= 0) {
560 /* Refill decrementer and continue execution. */
561 cpu
->icount_extra
+= insns_left
;
562 insns_left
= MIN(0xffff, cpu
->icount_extra
);
563 cpu
->icount_extra
-= insns_left
;
564 cpu
->icount_decr
.u16
.low
= insns_left
;
566 if (insns_left
> 0) {
567 /* Execute remaining instructions. */
568 cpu_exec_nocache(cpu
, insns_left
,
570 align_clocks(&sc
, cpu
);
572 cpu
->exception_index
= EXCP_INTERRUPT
;
583 /* Try to align the host and virtual clocks
584 if the guest is in advance */
585 align_clocks(&sc
, cpu
);
586 /* reset soft MMU for next block (it can currently
587 only be set by a memory fault) */
590 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
591 /* Some compilers wrongly smash all local variables after
592 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
593 * Reload essential local variables here for those compilers.
594 * Newer versions of gcc would complain about this code (-Wclobbered). */
596 cc
= CPU_GET_CLASS(cpu
);
598 x86_cpu
= X86_CPU(cpu
);
601 #else /* buggy compiler */
602 /* Assert that the compiler does not smash local variables. */
603 g_assert(cpu
== current_cpu
);
604 g_assert(cc
== CPU_GET_CLASS(cpu
));
606 g_assert(x86_cpu
== X86_CPU(cpu
));
607 g_assert(env
== &x86_cpu
->env
);
609 #endif /* buggy compiler */
615 cc
->cpu_exec_exit(cpu
);
618 /* fail safe : never use current_cpu outside cpu_exec() */
621 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
622 atomic_set(&tcg_current_cpu
, NULL
);