2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "qemu/qemu-print.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
28 #include "qemu/atomic.h"
29 #include "sysemu/qtest.h"
30 #include "qemu/timer.h"
32 #include "exec/tb-hash.h"
33 #include "exec/tb-lookup.h"
35 #include "qemu/main-loop.h"
36 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
37 #include "hw/i386/apic.h"
39 #include "sysemu/cpus.h"
40 #include "exec/cpu-all.h"
41 #include "sysemu/cpu-timers.h"
42 #include "sysemu/replay.h"
44 /* -icount align implementation. */
46 typedef struct SyncClocks
{
48 int64_t last_cpu_icount
;
49 int64_t realtime_clock
;
52 #if !defined(CONFIG_USER_ONLY)
53 /* Allow the guest to have a max 3ms advance.
54 * The difference between the 2 clocks could therefore
57 #define VM_CLOCK_ADVANCE 3000000
58 #define THRESHOLD_REDUCE 1.5
59 #define MAX_DELAY_PRINT_RATE 2000000000LL
60 #define MAX_NB_PRINTS 100
62 static int64_t max_delay
;
63 static int64_t max_advance
;
65 static void align_clocks(SyncClocks
*sc
, CPUState
*cpu
)
69 if (!icount_align_option
) {
73 cpu_icount
= cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
74 sc
->diff_clk
+= icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
75 sc
->last_cpu_icount
= cpu_icount
;
77 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
79 struct timespec sleep_delay
, rem_delay
;
80 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
81 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
82 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
83 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
88 Sleep(sc
->diff_clk
/ SCALE_MS
);
94 static void print_delay(const SyncClocks
*sc
)
96 static float threshold_delay
;
97 static int64_t last_realtime_clock
;
100 if (icount_align_option
&&
101 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
102 nb_prints
< MAX_NB_PRINTS
) {
103 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
104 (-sc
->diff_clk
/ (float)1000000000LL <
105 (threshold_delay
- THRESHOLD_REDUCE
))) {
106 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
107 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
111 last_realtime_clock
= sc
->realtime_clock
;
116 static void init_delay_params(SyncClocks
*sc
, CPUState
*cpu
)
118 if (!icount_align_option
) {
121 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
122 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
124 = cpu
->icount_extra
+ cpu_neg(cpu
)->icount_decr
.u16
.low
;
125 if (sc
->diff_clk
< max_delay
) {
126 max_delay
= sc
->diff_clk
;
128 if (sc
->diff_clk
> max_advance
) {
129 max_advance
= sc
->diff_clk
;
132 /* Print every 2s max if the guest is late. We limit the number
133 of printed messages to NB_PRINT_MAX(currently 100) */
137 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
141 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
144 #endif /* CONFIG USER ONLY */
146 /* Execute a TB, and fix up the CPU state afterwards if necessary */
147 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, TranslationBlock
*itb
)
149 CPUArchState
*env
= cpu
->env_ptr
;
151 TranslationBlock
*last_tb
;
153 uint8_t *tb_ptr
= itb
->tc
.ptr
;
155 qemu_log_mask_and_addr(CPU_LOG_EXEC
, itb
->pc
,
157 TARGET_FMT_lx
"/" TARGET_FMT_lx
"/%#x] %s\n",
158 cpu
->cpu_index
, itb
->tc
.ptr
,
159 itb
->cs_base
, itb
->pc
, itb
->flags
,
160 lookup_symbol(itb
->pc
));
162 #if defined(DEBUG_DISAS)
163 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)
164 && qemu_log_in_addr_range(itb
->pc
)) {
165 FILE *logfile
= qemu_log_lock();
167 if (qemu_loglevel_mask(CPU_LOG_TB_FPU
)) {
168 flags
|= CPU_DUMP_FPU
;
170 #if defined(TARGET_I386)
171 flags
|= CPU_DUMP_CCOP
;
173 log_cpu_state(cpu
, flags
);
174 qemu_log_unlock(logfile
);
176 #endif /* DEBUG_DISAS */
178 ret
= tcg_qemu_tb_exec(env
, tb_ptr
);
180 last_tb
= (TranslationBlock
*)(ret
& ~TB_EXIT_MASK
);
181 tb_exit
= ret
& TB_EXIT_MASK
;
182 trace_exec_tb_exit(last_tb
, tb_exit
);
184 if (tb_exit
> TB_EXIT_IDX1
) {
185 /* We didn't start executing this TB (eg because the instruction
186 * counter hit zero); we must restore the guest PC to the address
187 * of the start of the TB.
189 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
190 qemu_log_mask_and_addr(CPU_LOG_EXEC
, last_tb
->pc
,
191 "Stopped execution of TB chain before %p ["
192 TARGET_FMT_lx
"] %s\n",
193 last_tb
->tc
.ptr
, last_tb
->pc
,
194 lookup_symbol(last_tb
->pc
));
195 if (cc
->synchronize_from_tb
) {
196 cc
->synchronize_from_tb(cpu
, last_tb
);
199 cc
->set_pc(cpu
, last_tb
->pc
);
205 #ifndef CONFIG_USER_ONLY
206 /* Execute the code without caching the generated code. An interpreter
207 could be used if available. */
208 static void cpu_exec_nocache(CPUState
*cpu
, int max_cycles
,
209 TranslationBlock
*orig_tb
, bool ignore_icount
)
211 TranslationBlock
*tb
;
212 uint32_t cflags
= curr_cflags() | CF_NOCACHE
;
215 cflags
&= ~CF_USE_ICOUNT
;
218 /* Should never happen.
219 We only end up here when an existing TB is too long. */
220 cflags
|= MIN(max_cycles
, CF_COUNT_MASK
);
223 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
,
224 orig_tb
->flags
, cflags
);
225 tb
->orig_tb
= orig_tb
;
228 /* execute the generated code */
229 trace_exec_tb_nocache(tb
, tb
->pc
);
230 cpu_tb_exec(cpu
, tb
);
233 tb_phys_invalidate(tb
, -1);
239 static void cpu_exec_enter(CPUState
*cpu
)
241 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
243 if (cc
->cpu_exec_enter
) {
244 cc
->cpu_exec_enter(cpu
);
248 static void cpu_exec_exit(CPUState
*cpu
)
250 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
252 if (cc
->cpu_exec_exit
) {
253 cc
->cpu_exec_exit(cpu
);
257 void cpu_exec_step_atomic(CPUState
*cpu
)
259 TranslationBlock
*tb
;
260 target_ulong cs_base
, pc
;
263 uint32_t cf_mask
= cflags
& CF_HASH_MASK
;
265 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
268 tb
= tb_lookup__cpu_state(cpu
, &pc
, &cs_base
, &flags
, cf_mask
);
271 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
275 /* Since we got here, we know that parallel_cpus must be true. */
276 parallel_cpus
= false;
278 /* execute the generated code */
279 trace_exec_tb(tb
, pc
);
280 cpu_tb_exec(cpu
, tb
);
284 * The mmap_lock is dropped by tb_gen_code if it runs out of
287 #ifndef CONFIG_SOFTMMU
288 tcg_debug_assert(!have_mmap_lock());
290 if (qemu_mutex_iothread_locked()) {
291 qemu_mutex_unlock_iothread();
293 assert_no_pages_locked();
294 qemu_plugin_disable_mem_helpers(cpu
);
299 * As we start the exclusive region before codegen we must still
300 * be in the region if we longjump out of either the codegen or
303 g_assert(cpu_in_exclusive_context(cpu
));
304 parallel_cpus
= true;
310 target_ulong cs_base
;
312 tb_page_addr_t phys_page1
;
315 uint32_t trace_vcpu_dstate
;
318 static bool tb_lookup_cmp(const void *p
, const void *d
)
320 const TranslationBlock
*tb
= p
;
321 const struct tb_desc
*desc
= d
;
323 if (tb
->pc
== desc
->pc
&&
324 tb
->page_addr
[0] == desc
->phys_page1
&&
325 tb
->cs_base
== desc
->cs_base
&&
326 tb
->flags
== desc
->flags
&&
327 tb
->trace_vcpu_dstate
== desc
->trace_vcpu_dstate
&&
328 (tb_cflags(tb
) & (CF_HASH_MASK
| CF_INVALID
)) == desc
->cf_mask
) {
329 /* check next page if needed */
330 if (tb
->page_addr
[1] == -1) {
333 tb_page_addr_t phys_page2
;
334 target_ulong virt_page2
;
336 virt_page2
= (desc
->pc
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
337 phys_page2
= get_page_addr_code(desc
->env
, virt_page2
);
338 if (tb
->page_addr
[1] == phys_page2
) {
346 TranslationBlock
*tb_htable_lookup(CPUState
*cpu
, target_ulong pc
,
347 target_ulong cs_base
, uint32_t flags
,
350 tb_page_addr_t phys_pc
;
354 desc
.env
= (CPUArchState
*)cpu
->env_ptr
;
355 desc
.cs_base
= cs_base
;
357 desc
.cf_mask
= cf_mask
;
358 desc
.trace_vcpu_dstate
= *cpu
->trace_dstate
;
360 phys_pc
= get_page_addr_code(desc
.env
, pc
);
364 desc
.phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
365 h
= tb_hash_func(phys_pc
, pc
, flags
, cf_mask
, *cpu
->trace_dstate
);
366 return qht_lookup_custom(&tb_ctx
.htable
, &desc
, h
, tb_lookup_cmp
);
369 void tb_set_jmp_target(TranslationBlock
*tb
, int n
, uintptr_t addr
)
371 if (TCG_TARGET_HAS_direct_jump
) {
372 uintptr_t offset
= tb
->jmp_target_arg
[n
];
373 uintptr_t tc_ptr
= (uintptr_t)tb
->tc
.ptr
;
374 tb_target_set_jmp_target(tc_ptr
, tc_ptr
+ offset
, addr
);
376 tb
->jmp_target_arg
[n
] = addr
;
380 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
381 TranslationBlock
*tb_next
)
385 assert(n
< ARRAY_SIZE(tb
->jmp_list_next
));
386 qemu_spin_lock(&tb_next
->jmp_lock
);
388 /* make sure the destination TB is valid */
389 if (tb_next
->cflags
& CF_INVALID
) {
390 goto out_unlock_next
;
392 /* Atomically claim the jump destination slot only if it was NULL */
393 old
= qatomic_cmpxchg(&tb
->jmp_dest
[n
], (uintptr_t)NULL
,
396 goto out_unlock_next
;
399 /* patch the native jump address */
400 tb_set_jmp_target(tb
, n
, (uintptr_t)tb_next
->tc
.ptr
);
402 /* add in TB jmp list */
403 tb
->jmp_list_next
[n
] = tb_next
->jmp_list_head
;
404 tb_next
->jmp_list_head
= (uintptr_t)tb
| n
;
406 qemu_spin_unlock(&tb_next
->jmp_lock
);
408 qemu_log_mask_and_addr(CPU_LOG_EXEC
, tb
->pc
,
409 "Linking TBs %p [" TARGET_FMT_lx
410 "] index %d -> %p [" TARGET_FMT_lx
"]\n",
411 tb
->tc
.ptr
, tb
->pc
, n
,
412 tb_next
->tc
.ptr
, tb_next
->pc
);
416 qemu_spin_unlock(&tb_next
->jmp_lock
);
420 static inline TranslationBlock
*tb_find(CPUState
*cpu
,
421 TranslationBlock
*last_tb
,
422 int tb_exit
, uint32_t cf_mask
)
424 TranslationBlock
*tb
;
425 target_ulong cs_base
, pc
;
428 tb
= tb_lookup__cpu_state(cpu
, &pc
, &cs_base
, &flags
, cf_mask
);
431 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cf_mask
);
433 /* We add the TB in the virtual pc hash table for the fast lookup */
434 qatomic_set(&cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)], tb
);
436 #ifndef CONFIG_USER_ONLY
437 /* We don't take care of direct jumps when address mapping changes in
438 * system emulation. So it's not safe to make a direct jump to a TB
439 * spanning two pages because the mapping for the second page can change.
441 if (tb
->page_addr
[1] != -1) {
445 /* See if we can patch the calling TB. */
447 tb_add_jump(last_tb
, tb_exit
, tb
);
452 static inline bool cpu_handle_halt(CPUState
*cpu
)
455 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
456 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
457 X86CPU
*x86_cpu
= X86_CPU(cpu
);
458 qemu_mutex_lock_iothread();
459 apic_poll_irq(x86_cpu
->apic_state
);
460 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
461 qemu_mutex_unlock_iothread();
464 if (!cpu_has_work(cpu
)) {
474 static inline void cpu_handle_debug_exception(CPUState
*cpu
)
476 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
479 if (!cpu
->watchpoint_hit
) {
480 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
481 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
485 if (cc
->debug_excp_handler
) {
486 cc
->debug_excp_handler(cpu
);
490 static inline bool cpu_handle_exception(CPUState
*cpu
, int *ret
)
492 if (cpu
->exception_index
< 0) {
493 #ifndef CONFIG_USER_ONLY
494 if (replay_has_exception()
495 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0) {
496 /* try to cause an exception pending in the log */
497 cpu_exec_nocache(cpu
, 1, tb_find(cpu
, NULL
, 0, curr_cflags()), true);
500 if (cpu
->exception_index
< 0) {
505 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
506 /* exit request from the cpu execution loop */
507 *ret
= cpu
->exception_index
;
508 if (*ret
== EXCP_DEBUG
) {
509 cpu_handle_debug_exception(cpu
);
511 cpu
->exception_index
= -1;
514 #if defined(CONFIG_USER_ONLY)
515 /* if user mode only, we simulate a fake exception
516 which will be handled outside the cpu execution
518 #if defined(TARGET_I386)
519 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
520 cc
->do_interrupt(cpu
);
522 *ret
= cpu
->exception_index
;
523 cpu
->exception_index
= -1;
526 if (replay_exception()) {
527 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
528 qemu_mutex_lock_iothread();
529 cc
->do_interrupt(cpu
);
530 qemu_mutex_unlock_iothread();
531 cpu
->exception_index
= -1;
533 if (unlikely(cpu
->singlestep_enabled
)) {
535 * After processing the exception, ensure an EXCP_DEBUG is
536 * raised when single-stepping so that GDB doesn't miss the
540 cpu_handle_debug_exception(cpu
);
543 } else if (!replay_has_interrupt()) {
544 /* give a chance to iothread in replay mode */
545 *ret
= EXCP_INTERRUPT
;
555 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
556 * "real" interrupt event later. It does not need to be recorded for
559 static inline bool need_replay_interrupt(int interrupt_request
)
561 #if defined(TARGET_I386)
562 return !(interrupt_request
& CPU_INTERRUPT_POLL
);
568 static inline bool cpu_handle_interrupt(CPUState
*cpu
,
569 TranslationBlock
**last_tb
)
571 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
573 /* Clear the interrupt flag now since we're processing
574 * cpu->interrupt_request and cpu->exit_request.
575 * Ensure zeroing happens before reading cpu->exit_request or
576 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
578 qatomic_mb_set(&cpu_neg(cpu
)->icount_decr
.u16
.high
, 0);
580 if (unlikely(qatomic_read(&cpu
->interrupt_request
))) {
581 int interrupt_request
;
582 qemu_mutex_lock_iothread();
583 interrupt_request
= cpu
->interrupt_request
;
584 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
585 /* Mask out external interrupts for this step. */
586 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
588 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
589 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
590 cpu
->exception_index
= EXCP_DEBUG
;
591 qemu_mutex_unlock_iothread();
594 if (replay_mode
== REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
596 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
598 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
600 cpu
->exception_index
= EXCP_HLT
;
601 qemu_mutex_unlock_iothread();
604 #if defined(TARGET_I386)
605 else if (interrupt_request
& CPU_INTERRUPT_INIT
) {
606 X86CPU
*x86_cpu
= X86_CPU(cpu
);
607 CPUArchState
*env
= &x86_cpu
->env
;
609 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0, 0);
610 do_cpu_init(x86_cpu
);
611 cpu
->exception_index
= EXCP_HALTED
;
612 qemu_mutex_unlock_iothread();
616 else if (interrupt_request
& CPU_INTERRUPT_RESET
) {
619 qemu_mutex_unlock_iothread();
623 /* The target hook has 3 exit conditions:
624 False when the interrupt isn't processed,
625 True when it is, and we should restart on a new TB,
626 and via longjmp via cpu_loop_exit. */
628 if (cc
->cpu_exec_interrupt
&&
629 cc
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
630 if (need_replay_interrupt(interrupt_request
)) {
634 * After processing the interrupt, ensure an EXCP_DEBUG is
635 * raised when single-stepping so that GDB doesn't miss the
638 cpu
->exception_index
=
639 (cpu
->singlestep_enabled
? EXCP_DEBUG
: -1);
642 /* The target hook may have updated the 'cpu->interrupt_request';
643 * reload the 'interrupt_request' value */
644 interrupt_request
= cpu
->interrupt_request
;
646 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
647 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
648 /* ensure that no TB jump will be modified as
649 the program flow was changed */
653 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
654 qemu_mutex_unlock_iothread();
657 /* Finally, check if we need to exit to the main loop. */
658 if (unlikely(qatomic_read(&cpu
->exit_request
))
660 && cpu_neg(cpu
)->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0)) {
661 qatomic_set(&cpu
->exit_request
, 0);
662 if (cpu
->exception_index
== -1) {
663 cpu
->exception_index
= EXCP_INTERRUPT
;
671 static inline void cpu_loop_exec_tb(CPUState
*cpu
, TranslationBlock
*tb
,
672 TranslationBlock
**last_tb
, int *tb_exit
)
677 trace_exec_tb(tb
, tb
->pc
);
678 ret
= cpu_tb_exec(cpu
, tb
);
679 tb
= (TranslationBlock
*)(ret
& ~TB_EXIT_MASK
);
680 *tb_exit
= ret
& TB_EXIT_MASK
;
681 if (*tb_exit
!= TB_EXIT_REQUESTED
) {
687 insns_left
= qatomic_read(&cpu_neg(cpu
)->icount_decr
.u32
);
688 if (insns_left
< 0) {
689 /* Something asked us to stop executing chained TBs; just
690 * continue round the main loop. Whatever requested the exit
691 * will also have set something else (eg exit_request or
692 * interrupt_request) which will be handled by
693 * cpu_handle_interrupt. cpu_handle_interrupt will also
694 * clear cpu->icount_decr.u16.high.
699 /* Instruction counter expired. */
700 assert(icount_enabled());
701 #ifndef CONFIG_USER_ONLY
702 /* Ensure global icount has gone forward */
704 /* Refill decrementer and continue execution. */
705 insns_left
= MIN(0xffff, cpu
->icount_budget
);
706 cpu_neg(cpu
)->icount_decr
.u16
.low
= insns_left
;
707 cpu
->icount_extra
= cpu
->icount_budget
- insns_left
;
708 if (!cpu
->icount_extra
&& insns_left
< tb
->icount
) {
709 /* Execute any remaining instructions, then let the main loop
710 * handle the next event.
712 if (insns_left
> 0) {
713 cpu_exec_nocache(cpu
, insns_left
, tb
, false);
719 /* main execution loop */
721 int cpu_exec(CPUState
*cpu
)
723 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
725 SyncClocks sc
= { 0 };
727 /* replay_interrupt may need current_cpu */
730 if (cpu_handle_halt(cpu
)) {
738 /* Calculate difference between guest clock and host clock.
739 * This delay includes the delay of the last cycle, so
740 * what we have to do is sleep until it is 0. As for the
741 * advance/delay we gain here, we try to fix it next time.
743 init_delay_params(&sc
, cpu
);
745 /* prepare setjmp context for exception handling */
746 if (sigsetjmp(cpu
->jmp_env
, 0) != 0) {
747 #if defined(__clang__)
748 /* Some compilers wrongly smash all local variables after
749 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
750 * Reload essential local variables here for those compilers.
751 * Newer versions of gcc would complain about this code (-Wclobbered). */
753 cc
= CPU_GET_CLASS(cpu
);
754 #else /* buggy compiler */
755 /* Assert that the compiler does not smash local variables. */
756 g_assert(cpu
== current_cpu
);
757 g_assert(cc
== CPU_GET_CLASS(cpu
));
758 #endif /* buggy compiler */
759 #ifndef CONFIG_SOFTMMU
760 tcg_debug_assert(!have_mmap_lock());
762 if (qemu_mutex_iothread_locked()) {
763 qemu_mutex_unlock_iothread();
765 qemu_plugin_disable_mem_helpers(cpu
);
767 assert_no_pages_locked();
770 /* if an exception is pending, we execute it here */
771 while (!cpu_handle_exception(cpu
, &ret
)) {
772 TranslationBlock
*last_tb
= NULL
;
775 while (!cpu_handle_interrupt(cpu
, &last_tb
)) {
776 uint32_t cflags
= cpu
->cflags_next_tb
;
777 TranslationBlock
*tb
;
779 /* When requested, use an exact setting for cflags for the next
780 execution. This is used for icount, precise smc, and stop-
781 after-access watchpoints. Since this request should never
782 have CF_INVALID set, -1 is a convenient invalid value that
783 does not require tcg headers for cpu_common_reset. */
785 cflags
= curr_cflags();
787 cpu
->cflags_next_tb
= -1;
790 tb
= tb_find(cpu
, last_tb
, tb_exit
, cflags
);
791 cpu_loop_exec_tb(cpu
, tb
, &last_tb
, &tb_exit
);
792 /* Try to align the host and virtual clocks
793 if the guest is in advance */
794 align_clocks(&sc
, cpu
);
804 #ifndef CONFIG_USER_ONLY
806 void dump_drift_info(void)
808 if (!icount_enabled()) {
812 qemu_printf("Host - Guest clock %"PRIi64
" ms\n",
813 (cpu_get_clock() - icount_get()) / SCALE_MS
);
814 if (icount_align_option
) {
815 qemu_printf("Max guest delay %"PRIi64
" ms\n",
816 -max_delay
/ SCALE_MS
);
817 qemu_printf("Max guest advance %"PRIi64
" ms\n",
818 max_advance
/ SCALE_MS
);
820 qemu_printf("Max guest delay NA\n");
821 qemu_printf("Max guest advance NA\n");
825 #endif /* !CONFIG_USER_ONLY */