2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "exec/exec-all.h"
25 #include "qemu/atomic.h"
26 #include "sysemu/qtest.h"
27 #include "qemu/timer.h"
28 #include "exec/address-spaces.h"
30 #include "exec/tb-hash.h"
31 #include "exec/tb-lookup.h"
33 #include "qemu/main-loop.h"
34 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
35 #include "hw/i386/apic.h"
37 #include "sysemu/cpus.h"
38 #include "sysemu/replay.h"
40 /* -icount align implementation. */
42 typedef struct SyncClocks
{
44 int64_t last_cpu_icount
;
45 int64_t realtime_clock
;
48 #if !defined(CONFIG_USER_ONLY)
49 /* Allow the guest to have a max 3ms advance.
50 * The difference between the 2 clocks could therefore
53 #define VM_CLOCK_ADVANCE 3000000
54 #define THRESHOLD_REDUCE 1.5
55 #define MAX_DELAY_PRINT_RATE 2000000000LL
56 #define MAX_NB_PRINTS 100
58 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
62 if (!icount_align_option
) {
66 cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
67 sc
->diff_clk
+= cpu_icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
68 sc
->last_cpu_icount
= cpu_icount
;
70 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
72 struct timespec sleep_delay
, rem_delay
;
73 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
74 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
75 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
76 sc
->diff_clk
= rem_delay
.tv_sec
* 1000000000LL + rem_delay
.tv_nsec
;
81 Sleep(sc
->diff_clk
/ SCALE_MS
);
87 static void print_delay(const SyncClocks
*sc
)
89 static float threshold_delay
;
90 static int64_t last_realtime_clock
;
93 if (icount_align_option
&&
94 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
95 nb_prints
< MAX_NB_PRINTS
) {
96 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
97 (-sc
->diff_clk
/ (float)1000000000LL <
98 (threshold_delay
- THRESHOLD_REDUCE
))) {
99 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
100 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
104 last_realtime_clock
= sc
->realtime_clock
;
109 static void init_delay_params(SyncClocks
*sc
,
112 if (!icount_align_option
) {
115 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT
);
116 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) - sc
->realtime_clock
;
117 sc
->last_cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
118 if (sc
->diff_clk
< max_delay
) {
119 max_delay
= sc
->diff_clk
;
121 if (sc
->diff_clk
> max_advance
) {
122 max_advance
= sc
->diff_clk
;
125 /* Print every 2s max if the guest is late. We limit the number
126 of printed messages to NB_PRINT_MAX(currently 100) */
130 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
134 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
137 #endif /* CONFIG USER ONLY */
139 /* Execute a TB, and fix up the CPU state afterwards if necessary */
140 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, TranslationBlock
*itb
)
142 CPUArchState
*env
= cpu
->env_ptr
;
144 TranslationBlock
*last_tb
;
146 uint8_t *tb_ptr
= itb
->tc
.ptr
;
148 qemu_log_mask_and_addr(CPU_LOG_EXEC
, itb
->pc
,
149 "Trace %p [%d: " TARGET_FMT_lx
"] %s\n",
150 itb
->tc
.ptr
, cpu
->cpu_index
, itb
->pc
,
151 lookup_symbol(itb
->pc
));
153 #if defined(DEBUG_DISAS)
154 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)
155 && qemu_log_in_addr_range(itb
->pc
)) {
157 #if defined(TARGET_I386)
158 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
160 log_cpu_state(cpu
, 0);
164 #endif /* DEBUG_DISAS */
166 cpu
->can_do_io
= !use_icount
;
167 ret
= tcg_qemu_tb_exec(env
, tb_ptr
);
169 last_tb
= (TranslationBlock
*)(ret
& ~TB_EXIT_MASK
);
170 tb_exit
= ret
& TB_EXIT_MASK
;
171 trace_exec_tb_exit(last_tb
, tb_exit
);
173 if (tb_exit
> TB_EXIT_IDX1
) {
174 /* We didn't start executing this TB (eg because the instruction
175 * counter hit zero); we must restore the guest PC to the address
176 * of the start of the TB.
178 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
179 qemu_log_mask_and_addr(CPU_LOG_EXEC
, last_tb
->pc
,
180 "Stopped execution of TB chain before %p ["
181 TARGET_FMT_lx
"] %s\n",
182 last_tb
->tc
.ptr
, last_tb
->pc
,
183 lookup_symbol(last_tb
->pc
));
184 if (cc
->synchronize_from_tb
) {
185 cc
->synchronize_from_tb(cpu
, last_tb
);
188 cc
->set_pc(cpu
, last_tb
->pc
);
194 #ifndef CONFIG_USER_ONLY
195 /* Execute the code without caching the generated code. An interpreter
196 could be used if available. */
197 static void cpu_exec_nocache(CPUState
*cpu
, int max_cycles
,
198 TranslationBlock
*orig_tb
, bool ignore_icount
)
200 TranslationBlock
*tb
;
201 uint32_t cflags
= curr_cflags() | CF_NOCACHE
;
204 cflags
&= ~CF_USE_ICOUNT
;
207 /* Should never happen.
208 We only end up here when an existing TB is too long. */
209 cflags
|= MIN(max_cycles
, CF_COUNT_MASK
);
212 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
,
213 orig_tb
->flags
, cflags
);
214 tb
->orig_tb
= orig_tb
;
217 /* execute the generated code */
218 trace_exec_tb_nocache(tb
, tb
->pc
);
219 cpu_tb_exec(cpu
, tb
);
222 tb_phys_invalidate(tb
, -1);
228 void cpu_exec_step_atomic(CPUState
*cpu
)
230 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
231 TranslationBlock
*tb
;
232 target_ulong cs_base
, pc
;
235 uint32_t cf_mask
= cflags
& CF_HASH_MASK
;
236 /* volatile because we modify it between setjmp and longjmp */
237 volatile bool in_exclusive_region
= false;
239 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
240 tb
= tb_lookup__cpu_state(cpu
, &pc
, &cs_base
, &flags
, cf_mask
);
244 tb
= tb_htable_lookup(cpu
, pc
, cs_base
, flags
, cf_mask
);
245 if (likely(tb
== NULL
)) {
246 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
254 /* Since we got here, we know that parallel_cpus must be true. */
255 parallel_cpus
= false;
256 in_exclusive_region
= true;
257 cc
->cpu_exec_enter(cpu
);
258 /* execute the generated code */
259 trace_exec_tb(tb
, pc
);
260 cpu_tb_exec(cpu
, tb
);
261 cc
->cpu_exec_exit(cpu
);
263 /* We may have exited due to another problem here, so we need
264 * to reset any tb_locks we may have taken but didn't release.
265 * The mmap_lock is dropped by tb_gen_code if it runs out of
268 #ifndef CONFIG_SOFTMMU
269 tcg_debug_assert(!have_mmap_lock());
274 if (in_exclusive_region
) {
275 /* We might longjump out of either the codegen or the
276 * execution, so must make sure we only end the exclusive
277 * region if we started it.
279 parallel_cpus
= true;
286 target_ulong cs_base
;
288 tb_page_addr_t phys_page1
;
291 uint32_t trace_vcpu_dstate
;
294 static bool tb_cmp(const void *p
, const void *d
)
296 const TranslationBlock
*tb
= p
;
297 const struct tb_desc
*desc
= d
;
299 if (tb
->pc
== desc
->pc
&&
300 tb
->page_addr
[0] == desc
->phys_page1
&&
301 tb
->cs_base
== desc
->cs_base
&&
302 tb
->flags
== desc
->flags
&&
303 tb
->trace_vcpu_dstate
== desc
->trace_vcpu_dstate
&&
304 (tb_cflags(tb
) & (CF_HASH_MASK
| CF_INVALID
)) == desc
->cf_mask
) {
305 /* check next page if needed */
306 if (tb
->page_addr
[1] == -1) {
309 tb_page_addr_t phys_page2
;
310 target_ulong virt_page2
;
312 virt_page2
= (desc
->pc
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
313 phys_page2
= get_page_addr_code(desc
->env
, virt_page2
);
314 if (tb
->page_addr
[1] == phys_page2
) {
322 TranslationBlock
*tb_htable_lookup(CPUState
*cpu
, target_ulong pc
,
323 target_ulong cs_base
, uint32_t flags
,
326 tb_page_addr_t phys_pc
;
330 desc
.env
= (CPUArchState
*)cpu
->env_ptr
;
331 desc
.cs_base
= cs_base
;
333 desc
.cf_mask
= cf_mask
;
334 desc
.trace_vcpu_dstate
= *cpu
->trace_dstate
;
336 phys_pc
= get_page_addr_code(desc
.env
, pc
);
337 desc
.phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
338 h
= tb_hash_func(phys_pc
, pc
, flags
, cf_mask
, *cpu
->trace_dstate
);
339 return qht_lookup(&tb_ctx
.htable
, tb_cmp
, &desc
, h
);
342 void tb_set_jmp_target(TranslationBlock
*tb
, int n
, uintptr_t addr
)
344 if (TCG_TARGET_HAS_direct_jump
) {
345 uintptr_t offset
= tb
->jmp_target_arg
[n
];
346 uintptr_t tc_ptr
= (uintptr_t)tb
->tc
.ptr
;
347 tb_target_set_jmp_target(tc_ptr
, tc_ptr
+ offset
, addr
);
349 tb
->jmp_target_arg
[n
] = addr
;
353 /* Called with tb_lock held. */
354 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
355 TranslationBlock
*tb_next
)
357 assert(n
< ARRAY_SIZE(tb
->jmp_list_next
));
358 if (tb
->jmp_list_next
[n
]) {
359 /* Another thread has already done this while we were
360 * outside of the lock; nothing to do in this case */
363 qemu_log_mask_and_addr(CPU_LOG_EXEC
, tb
->pc
,
364 "Linking TBs %p [" TARGET_FMT_lx
365 "] index %d -> %p [" TARGET_FMT_lx
"]\n",
366 tb
->tc
.ptr
, tb
->pc
, n
,
367 tb_next
->tc
.ptr
, tb_next
->pc
);
369 /* patch the native jump address */
370 tb_set_jmp_target(tb
, n
, (uintptr_t)tb_next
->tc
.ptr
);
372 /* add in TB jmp circular list */
373 tb
->jmp_list_next
[n
] = tb_next
->jmp_list_first
;
374 tb_next
->jmp_list_first
= (uintptr_t)tb
| n
;
377 static inline TranslationBlock
*tb_find(CPUState
*cpu
,
378 TranslationBlock
*last_tb
,
379 int tb_exit
, uint32_t cf_mask
)
381 TranslationBlock
*tb
;
382 target_ulong cs_base
, pc
;
384 bool acquired_tb_lock
= false;
386 tb
= tb_lookup__cpu_state(cpu
, &pc
, &cs_base
, &flags
, cf_mask
);
388 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
389 * taken outside tb_lock. As system emulation is currently
390 * single threaded the locks are NOPs.
394 acquired_tb_lock
= true;
396 /* There's a chance that our desired tb has been translated while
397 * taking the locks so we check again inside the lock.
399 tb
= tb_htable_lookup(cpu
, pc
, cs_base
, flags
, cf_mask
);
400 if (likely(tb
== NULL
)) {
401 /* if no translated code available, then translate it now */
402 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, cf_mask
);
406 /* We add the TB in the virtual pc hash table for the fast lookup */
407 atomic_set(&cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)], tb
);
409 #ifndef CONFIG_USER_ONLY
410 /* We don't take care of direct jumps when address mapping changes in
411 * system emulation. So it's not safe to make a direct jump to a TB
412 * spanning two pages because the mapping for the second page can change.
414 if (tb
->page_addr
[1] != -1) {
418 /* See if we can patch the calling TB. */
419 if (last_tb
&& !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN
)) {
420 if (!acquired_tb_lock
) {
422 acquired_tb_lock
= true;
424 if (!(tb
->cflags
& CF_INVALID
)) {
425 tb_add_jump(last_tb
, tb_exit
, tb
);
428 if (acquired_tb_lock
) {
434 static inline bool cpu_handle_halt(CPUState
*cpu
)
437 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
438 if ((cpu
->interrupt_request
& CPU_INTERRUPT_POLL
)
439 && replay_interrupt()) {
440 X86CPU
*x86_cpu
= X86_CPU(cpu
);
441 qemu_mutex_lock_iothread();
442 apic_poll_irq(x86_cpu
->apic_state
);
443 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
444 qemu_mutex_unlock_iothread();
447 if (!cpu_has_work(cpu
)) {
457 static inline void cpu_handle_debug_exception(CPUState
*cpu
)
459 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
462 if (!cpu
->watchpoint_hit
) {
463 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
464 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
468 cc
->debug_excp_handler(cpu
);
471 static inline bool cpu_handle_exception(CPUState
*cpu
, int *ret
)
473 if (cpu
->exception_index
>= 0) {
474 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
475 /* exit request from the cpu execution loop */
476 *ret
= cpu
->exception_index
;
477 if (*ret
== EXCP_DEBUG
) {
478 cpu_handle_debug_exception(cpu
);
480 cpu
->exception_index
= -1;
483 #if defined(CONFIG_USER_ONLY)
484 /* if user mode only, we simulate a fake exception
485 which will be handled outside the cpu execution
487 #if defined(TARGET_I386)
488 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
489 cc
->do_interrupt(cpu
);
491 *ret
= cpu
->exception_index
;
492 cpu
->exception_index
= -1;
495 if (replay_exception()) {
496 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
497 qemu_mutex_lock_iothread();
498 cc
->do_interrupt(cpu
);
499 qemu_mutex_unlock_iothread();
500 cpu
->exception_index
= -1;
501 } else if (!replay_has_interrupt()) {
502 /* give a chance to iothread in replay mode */
503 *ret
= EXCP_INTERRUPT
;
508 #ifndef CONFIG_USER_ONLY
509 } else if (replay_has_exception()
510 && cpu
->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0) {
511 /* try to cause an exception pending in the log */
512 cpu_exec_nocache(cpu
, 1, tb_find(cpu
, NULL
, 0, curr_cflags()), true);
521 static inline bool cpu_handle_interrupt(CPUState
*cpu
,
522 TranslationBlock
**last_tb
)
524 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
526 if (unlikely(atomic_read(&cpu
->interrupt_request
))) {
527 int interrupt_request
;
528 qemu_mutex_lock_iothread();
529 interrupt_request
= cpu
->interrupt_request
;
530 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
531 /* Mask out external interrupts for this step. */
532 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
534 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
535 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
536 cpu
->exception_index
= EXCP_DEBUG
;
537 qemu_mutex_unlock_iothread();
540 if (replay_mode
== REPLAY_MODE_PLAY
&& !replay_has_interrupt()) {
542 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
544 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
546 cpu
->exception_index
= EXCP_HLT
;
547 qemu_mutex_unlock_iothread();
550 #if defined(TARGET_I386)
551 else if (interrupt_request
& CPU_INTERRUPT_INIT
) {
552 X86CPU
*x86_cpu
= X86_CPU(cpu
);
553 CPUArchState
*env
= &x86_cpu
->env
;
555 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0, 0);
556 do_cpu_init(x86_cpu
);
557 cpu
->exception_index
= EXCP_HALTED
;
558 qemu_mutex_unlock_iothread();
562 else if (interrupt_request
& CPU_INTERRUPT_RESET
) {
565 qemu_mutex_unlock_iothread();
569 /* The target hook has 3 exit conditions:
570 False when the interrupt isn't processed,
571 True when it is, and we should restart on a new TB,
572 and via longjmp via cpu_loop_exit. */
574 if (cc
->cpu_exec_interrupt(cpu
, interrupt_request
)) {
578 /* The target hook may have updated the 'cpu->interrupt_request';
579 * reload the 'interrupt_request' value */
580 interrupt_request
= cpu
->interrupt_request
;
582 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
583 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
584 /* ensure that no TB jump will be modified as
585 the program flow was changed */
589 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
590 qemu_mutex_unlock_iothread();
593 /* Finally, check if we need to exit to the main loop. */
594 if (unlikely(atomic_read(&cpu
->exit_request
)
595 || (use_icount
&& cpu
->icount_decr
.u16
.low
+ cpu
->icount_extra
== 0))) {
596 atomic_set(&cpu
->exit_request
, 0);
597 cpu
->exception_index
= EXCP_INTERRUPT
;
604 static inline void cpu_loop_exec_tb(CPUState
*cpu
, TranslationBlock
*tb
,
605 TranslationBlock
**last_tb
, int *tb_exit
)
610 trace_exec_tb(tb
, tb
->pc
);
611 ret
= cpu_tb_exec(cpu
, tb
);
612 tb
= (TranslationBlock
*)(ret
& ~TB_EXIT_MASK
);
613 *tb_exit
= ret
& TB_EXIT_MASK
;
614 if (*tb_exit
!= TB_EXIT_REQUESTED
) {
620 insns_left
= atomic_read(&cpu
->icount_decr
.u32
);
621 atomic_set(&cpu
->icount_decr
.u16
.high
, 0);
622 if (insns_left
< 0) {
623 /* Something asked us to stop executing chained TBs; just
624 * continue round the main loop. Whatever requested the exit
625 * will also have set something else (eg exit_request or
626 * interrupt_request) which we will handle next time around
627 * the loop. But we need to ensure the zeroing of icount_decr
628 * comes before the next read of cpu->exit_request
629 * or cpu->interrupt_request.
635 /* Instruction counter expired. */
637 #ifndef CONFIG_USER_ONLY
638 /* Ensure global icount has gone forward */
639 cpu_update_icount(cpu
);
640 /* Refill decrementer and continue execution. */
641 insns_left
= MIN(0xffff, cpu
->icount_budget
);
642 cpu
->icount_decr
.u16
.low
= insns_left
;
643 cpu
->icount_extra
= cpu
->icount_budget
- insns_left
;
644 if (!cpu
->icount_extra
) {
645 /* Execute any remaining instructions, then let the main loop
646 * handle the next event.
648 if (insns_left
> 0) {
649 cpu_exec_nocache(cpu
, insns_left
, tb
, false);
655 /* main execution loop */
657 int cpu_exec(CPUState
*cpu
)
659 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
661 SyncClocks sc
= { 0 };
663 /* replay_interrupt may need current_cpu */
666 if (cpu_handle_halt(cpu
)) {
672 cc
->cpu_exec_enter(cpu
);
674 /* Calculate difference between guest clock and host clock.
675 * This delay includes the delay of the last cycle, so
676 * what we have to do is sleep until it is 0. As for the
677 * advance/delay we gain here, we try to fix it next time.
679 init_delay_params(&sc
, cpu
);
681 /* prepare setjmp context for exception handling */
682 if (sigsetjmp(cpu
->jmp_env
, 0) != 0) {
683 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
684 /* Some compilers wrongly smash all local variables after
685 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
686 * Reload essential local variables here for those compilers.
687 * Newer versions of gcc would complain about this code (-Wclobbered). */
689 cc
= CPU_GET_CLASS(cpu
);
690 #else /* buggy compiler */
691 /* Assert that the compiler does not smash local variables. */
692 g_assert(cpu
== current_cpu
);
693 g_assert(cc
== CPU_GET_CLASS(cpu
));
694 #endif /* buggy compiler */
697 if (qemu_mutex_iothread_locked()) {
698 qemu_mutex_unlock_iothread();
702 /* if an exception is pending, we execute it here */
703 while (!cpu_handle_exception(cpu
, &ret
)) {
704 TranslationBlock
*last_tb
= NULL
;
707 while (!cpu_handle_interrupt(cpu
, &last_tb
)) {
708 uint32_t cflags
= cpu
->cflags_next_tb
;
709 TranslationBlock
*tb
;
711 /* When requested, use an exact setting for cflags for the next
712 execution. This is used for icount, precise smc, and stop-
713 after-access watchpoints. Since this request should never
714 have CF_INVALID set, -1 is a convenient invalid value that
715 does not require tcg headers for cpu_common_reset. */
717 cflags
= curr_cflags();
719 cpu
->cflags_next_tb
= -1;
722 tb
= tb_find(cpu
, last_tb
, tb_exit
, cflags
);
723 cpu_loop_exec_tb(cpu
, tb
, &last_tb
, &tb_exit
);
724 /* Try to align the host and virtual clocks
725 if the guest is in advance */
726 align_clocks(&sc
, cpu
);
730 cc
->cpu_exec_exit(cpu
);