Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[qemu/ar7.git] / accel / tcg / cpu-exec.c
blob6c85c3ee1e98096fc477b4527405faf36267033a
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "cpu.h"
23 #include "trace.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg.h"
27 #include "qemu/atomic.h"
28 #include "sysemu/qtest.h"
29 #include "qemu/timer.h"
30 #include "qemu/rcu.h"
31 #include "exec/tb-hash.h"
32 #include "exec/tb-lookup.h"
33 #include "exec/log.h"
34 #include "qemu/main-loop.h"
35 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
36 #include "hw/i386/apic.h"
37 #endif
38 #include "sysemu/cpus.h"
39 #include "sysemu/replay.h"
41 /* -icount align implementation. */
43 typedef struct SyncClocks {
44 int64_t diff_clk;
45 int64_t last_cpu_icount;
46 int64_t realtime_clock;
47 } SyncClocks;
49 #if !defined(CONFIG_USER_ONLY)
50 /* Allow the guest to have a max 3ms advance.
51 * The difference between the 2 clocks could therefore
52 * oscillate around 0.
54 #define VM_CLOCK_ADVANCE 3000000
55 #define THRESHOLD_REDUCE 1.5
56 #define MAX_DELAY_PRINT_RATE 2000000000LL
57 #define MAX_NB_PRINTS 100
59 static void align_clocks(SyncClocks *sc, CPUState *cpu)
61 int64_t cpu_icount;
63 if (!icount_align_option) {
64 return;
67 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
68 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
69 sc->last_cpu_icount = cpu_icount;
71 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
72 #ifndef _WIN32
73 struct timespec sleep_delay, rem_delay;
74 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
75 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
76 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
77 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
78 } else {
79 sc->diff_clk = 0;
81 #else
82 Sleep(sc->diff_clk / SCALE_MS);
83 sc->diff_clk = 0;
84 #endif
88 static void print_delay(const SyncClocks *sc)
90 static float threshold_delay;
91 static int64_t last_realtime_clock;
92 static int nb_prints;
94 if (icount_align_option &&
95 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
96 nb_prints < MAX_NB_PRINTS) {
97 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
98 (-sc->diff_clk / (float)1000000000LL <
99 (threshold_delay - THRESHOLD_REDUCE))) {
100 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
101 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
102 threshold_delay - 1,
103 threshold_delay);
104 nb_prints++;
105 last_realtime_clock = sc->realtime_clock;
110 static void init_delay_params(SyncClocks *sc, CPUState *cpu)
112 if (!icount_align_option) {
113 return;
115 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
116 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
117 sc->last_cpu_icount
118 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
119 if (sc->diff_clk < max_delay) {
120 max_delay = sc->diff_clk;
122 if (sc->diff_clk > max_advance) {
123 max_advance = sc->diff_clk;
126 /* Print every 2s max if the guest is late. We limit the number
127 of printed messages to NB_PRINT_MAX(currently 100) */
128 print_delay(sc);
130 #else
131 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
135 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
138 #endif /* CONFIG USER ONLY */
140 /* Execute a TB, and fix up the CPU state afterwards if necessary */
141 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
143 CPUArchState *env = cpu->env_ptr;
144 uintptr_t ret;
145 TranslationBlock *last_tb;
146 int tb_exit;
147 uint8_t *tb_ptr = itb->tc.ptr;
149 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
150 "Trace %d: %p ["
151 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
152 cpu->cpu_index, itb->tc.ptr,
153 itb->cs_base, itb->pc, itb->flags,
154 lookup_symbol(itb->pc));
156 #if defined(DEBUG_DISAS)
157 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
158 && qemu_log_in_addr_range(itb->pc)) {
159 qemu_log_lock();
160 int flags = 0;
161 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
162 flags |= CPU_DUMP_FPU;
164 #if defined(TARGET_I386)
165 flags |= CPU_DUMP_CCOP;
166 #endif
167 log_cpu_state(cpu, flags);
168 qemu_log_unlock();
170 #endif /* DEBUG_DISAS */
172 cpu->can_do_io = !use_icount;
173 ret = tcg_qemu_tb_exec(env, tb_ptr);
174 cpu->can_do_io = 1;
175 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
176 tb_exit = ret & TB_EXIT_MASK;
177 trace_exec_tb_exit(last_tb, tb_exit);
179 if (tb_exit > TB_EXIT_IDX1) {
180 /* We didn't start executing this TB (eg because the instruction
181 * counter hit zero); we must restore the guest PC to the address
182 * of the start of the TB.
184 CPUClass *cc = CPU_GET_CLASS(cpu);
185 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
186 "Stopped execution of TB chain before %p ["
187 TARGET_FMT_lx "] %s\n",
188 last_tb->tc.ptr, last_tb->pc,
189 lookup_symbol(last_tb->pc));
190 if (cc->synchronize_from_tb) {
191 cc->synchronize_from_tb(cpu, last_tb);
192 } else {
193 assert(cc->set_pc);
194 cc->set_pc(cpu, last_tb->pc);
197 return ret;
200 #ifndef CONFIG_USER_ONLY
201 /* Execute the code without caching the generated code. An interpreter
202 could be used if available. */
203 static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
204 TranslationBlock *orig_tb, bool ignore_icount)
206 TranslationBlock *tb;
207 uint32_t cflags = curr_cflags() | CF_NOCACHE;
209 if (ignore_icount) {
210 cflags &= ~CF_USE_ICOUNT;
213 /* Should never happen.
214 We only end up here when an existing TB is too long. */
215 cflags |= MIN(max_cycles, CF_COUNT_MASK);
217 mmap_lock();
218 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
219 orig_tb->flags, cflags);
220 tb->orig_tb = orig_tb;
221 mmap_unlock();
223 /* execute the generated code */
224 trace_exec_tb_nocache(tb, tb->pc);
225 cpu_tb_exec(cpu, tb);
227 mmap_lock();
228 tb_phys_invalidate(tb, -1);
229 mmap_unlock();
230 tcg_tb_remove(tb);
232 #endif
234 void cpu_exec_step_atomic(CPUState *cpu)
236 CPUClass *cc = CPU_GET_CLASS(cpu);
237 TranslationBlock *tb;
238 target_ulong cs_base, pc;
239 uint32_t flags;
240 uint32_t cflags = 1;
241 uint32_t cf_mask = cflags & CF_HASH_MASK;
242 /* volatile because we modify it between setjmp and longjmp */
243 volatile bool in_exclusive_region = false;
245 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
246 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
247 if (tb == NULL) {
248 mmap_lock();
249 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
250 mmap_unlock();
253 start_exclusive();
255 /* Since we got here, we know that parallel_cpus must be true. */
256 parallel_cpus = false;
257 in_exclusive_region = true;
258 cc->cpu_exec_enter(cpu);
259 /* execute the generated code */
260 trace_exec_tb(tb, pc);
261 cpu_tb_exec(cpu, tb);
262 cc->cpu_exec_exit(cpu);
263 } else {
265 * The mmap_lock is dropped by tb_gen_code if it runs out of
266 * memory.
268 #ifndef CONFIG_SOFTMMU
269 tcg_debug_assert(!have_mmap_lock());
270 #endif
271 if (qemu_mutex_iothread_locked()) {
272 qemu_mutex_unlock_iothread();
274 assert_no_pages_locked();
277 if (in_exclusive_region) {
278 /* We might longjump out of either the codegen or the
279 * execution, so must make sure we only end the exclusive
280 * region if we started it.
282 parallel_cpus = true;
283 end_exclusive();
287 struct tb_desc {
288 target_ulong pc;
289 target_ulong cs_base;
290 CPUArchState *env;
291 tb_page_addr_t phys_page1;
292 uint32_t flags;
293 uint32_t cf_mask;
294 uint32_t trace_vcpu_dstate;
297 static bool tb_lookup_cmp(const void *p, const void *d)
299 const TranslationBlock *tb = p;
300 const struct tb_desc *desc = d;
302 if (tb->pc == desc->pc &&
303 tb->page_addr[0] == desc->phys_page1 &&
304 tb->cs_base == desc->cs_base &&
305 tb->flags == desc->flags &&
306 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
307 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
308 /* check next page if needed */
309 if (tb->page_addr[1] == -1) {
310 return true;
311 } else {
312 tb_page_addr_t phys_page2;
313 target_ulong virt_page2;
315 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
316 phys_page2 = get_page_addr_code(desc->env, virt_page2);
317 if (tb->page_addr[1] == phys_page2) {
318 return true;
322 return false;
325 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
326 target_ulong cs_base, uint32_t flags,
327 uint32_t cf_mask)
329 tb_page_addr_t phys_pc;
330 struct tb_desc desc;
331 uint32_t h;
333 desc.env = (CPUArchState *)cpu->env_ptr;
334 desc.cs_base = cs_base;
335 desc.flags = flags;
336 desc.cf_mask = cf_mask;
337 desc.trace_vcpu_dstate = *cpu->trace_dstate;
338 desc.pc = pc;
339 phys_pc = get_page_addr_code(desc.env, pc);
340 if (phys_pc == -1) {
341 return NULL;
343 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
344 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
345 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
348 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
350 if (TCG_TARGET_HAS_direct_jump) {
351 uintptr_t offset = tb->jmp_target_arg[n];
352 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
353 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
354 } else {
355 tb->jmp_target_arg[n] = addr;
359 static inline void tb_add_jump(TranslationBlock *tb, int n,
360 TranslationBlock *tb_next)
362 uintptr_t old;
364 assert(n < ARRAY_SIZE(tb->jmp_list_next));
365 qemu_spin_lock(&tb_next->jmp_lock);
367 /* make sure the destination TB is valid */
368 if (tb_next->cflags & CF_INVALID) {
369 goto out_unlock_next;
371 /* Atomically claim the jump destination slot only if it was NULL */
372 old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
373 if (old) {
374 goto out_unlock_next;
377 /* patch the native jump address */
378 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
380 /* add in TB jmp list */
381 tb->jmp_list_next[n] = tb_next->jmp_list_head;
382 tb_next->jmp_list_head = (uintptr_t)tb | n;
384 qemu_spin_unlock(&tb_next->jmp_lock);
386 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
387 "Linking TBs %p [" TARGET_FMT_lx
388 "] index %d -> %p [" TARGET_FMT_lx "]\n",
389 tb->tc.ptr, tb->pc, n,
390 tb_next->tc.ptr, tb_next->pc);
391 return;
393 out_unlock_next:
394 qemu_spin_unlock(&tb_next->jmp_lock);
395 return;
398 static inline TranslationBlock *tb_find(CPUState *cpu,
399 TranslationBlock *last_tb,
400 int tb_exit, uint32_t cf_mask)
402 TranslationBlock *tb;
403 target_ulong cs_base, pc;
404 uint32_t flags;
406 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
407 if (tb == NULL) {
408 mmap_lock();
409 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
410 mmap_unlock();
411 /* We add the TB in the virtual pc hash table for the fast lookup */
412 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
414 #ifndef CONFIG_USER_ONLY
415 /* We don't take care of direct jumps when address mapping changes in
416 * system emulation. So it's not safe to make a direct jump to a TB
417 * spanning two pages because the mapping for the second page can change.
419 if (tb->page_addr[1] != -1) {
420 last_tb = NULL;
422 #endif
423 /* See if we can patch the calling TB. */
424 if (last_tb) {
425 tb_add_jump(last_tb, tb_exit, tb);
427 return tb;
430 static inline bool cpu_handle_halt(CPUState *cpu)
432 if (cpu->halted) {
433 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
434 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
435 && replay_interrupt()) {
436 X86CPU *x86_cpu = X86_CPU(cpu);
437 qemu_mutex_lock_iothread();
438 apic_poll_irq(x86_cpu->apic_state);
439 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
440 qemu_mutex_unlock_iothread();
442 #endif
443 if (!cpu_has_work(cpu)) {
444 return true;
447 cpu->halted = 0;
450 return false;
453 static inline void cpu_handle_debug_exception(CPUState *cpu)
455 CPUClass *cc = CPU_GET_CLASS(cpu);
456 CPUWatchpoint *wp;
458 if (!cpu->watchpoint_hit) {
459 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
460 wp->flags &= ~BP_WATCHPOINT_HIT;
464 cc->debug_excp_handler(cpu);
467 static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
469 if (cpu->exception_index < 0) {
470 #ifndef CONFIG_USER_ONLY
471 if (replay_has_exception()
472 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
473 /* try to cause an exception pending in the log */
474 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
476 #endif
477 if (cpu->exception_index < 0) {
478 return false;
482 if (cpu->exception_index >= EXCP_INTERRUPT) {
483 /* exit request from the cpu execution loop */
484 *ret = cpu->exception_index;
485 if (*ret == EXCP_DEBUG) {
486 cpu_handle_debug_exception(cpu);
488 cpu->exception_index = -1;
489 return true;
490 } else {
491 #if defined(CONFIG_USER_ONLY)
492 /* if user mode only, we simulate a fake exception
493 which will be handled outside the cpu execution
494 loop */
495 #if defined(TARGET_I386)
496 CPUClass *cc = CPU_GET_CLASS(cpu);
497 cc->do_interrupt(cpu);
498 #endif
499 *ret = cpu->exception_index;
500 cpu->exception_index = -1;
501 return true;
502 #else
503 if (replay_exception()) {
504 CPUClass *cc = CPU_GET_CLASS(cpu);
505 qemu_mutex_lock_iothread();
506 cc->do_interrupt(cpu);
507 qemu_mutex_unlock_iothread();
508 cpu->exception_index = -1;
509 } else if (!replay_has_interrupt()) {
510 /* give a chance to iothread in replay mode */
511 *ret = EXCP_INTERRUPT;
512 return true;
514 #endif
517 return false;
520 static inline bool cpu_handle_interrupt(CPUState *cpu,
521 TranslationBlock **last_tb)
523 CPUClass *cc = CPU_GET_CLASS(cpu);
525 /* Clear the interrupt flag now since we're processing
526 * cpu->interrupt_request and cpu->exit_request.
527 * Ensure zeroing happens before reading cpu->exit_request or
528 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
530 atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
532 if (unlikely(atomic_read(&cpu->interrupt_request))) {
533 int interrupt_request;
534 qemu_mutex_lock_iothread();
535 interrupt_request = cpu->interrupt_request;
536 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
537 /* Mask out external interrupts for this step. */
538 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
540 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
541 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
542 cpu->exception_index = EXCP_DEBUG;
543 qemu_mutex_unlock_iothread();
544 return true;
546 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
547 /* Do nothing */
548 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
549 replay_interrupt();
550 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
551 cpu->halted = 1;
552 cpu->exception_index = EXCP_HLT;
553 qemu_mutex_unlock_iothread();
554 return true;
556 #if defined(TARGET_I386)
557 else if (interrupt_request & CPU_INTERRUPT_INIT) {
558 X86CPU *x86_cpu = X86_CPU(cpu);
559 CPUArchState *env = &x86_cpu->env;
560 replay_interrupt();
561 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
562 do_cpu_init(x86_cpu);
563 cpu->exception_index = EXCP_HALTED;
564 qemu_mutex_unlock_iothread();
565 return true;
567 #else
568 else if (interrupt_request & CPU_INTERRUPT_RESET) {
569 replay_interrupt();
570 cpu_reset(cpu);
571 qemu_mutex_unlock_iothread();
572 return true;
574 #endif
575 /* The target hook has 3 exit conditions:
576 False when the interrupt isn't processed,
577 True when it is, and we should restart on a new TB,
578 and via longjmp via cpu_loop_exit. */
579 else {
580 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
581 replay_interrupt();
582 cpu->exception_index = -1;
583 *last_tb = NULL;
585 /* The target hook may have updated the 'cpu->interrupt_request';
586 * reload the 'interrupt_request' value */
587 interrupt_request = cpu->interrupt_request;
589 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
590 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
591 /* ensure that no TB jump will be modified as
592 the program flow was changed */
593 *last_tb = NULL;
596 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
597 qemu_mutex_unlock_iothread();
600 /* Finally, check if we need to exit to the main loop. */
601 if (unlikely(atomic_read(&cpu->exit_request))
602 || (use_icount
603 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
604 atomic_set(&cpu->exit_request, 0);
605 if (cpu->exception_index == -1) {
606 cpu->exception_index = EXCP_INTERRUPT;
608 return true;
611 return false;
614 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
615 TranslationBlock **last_tb, int *tb_exit)
617 uintptr_t ret;
618 int32_t insns_left;
620 trace_exec_tb(tb, tb->pc);
621 ret = cpu_tb_exec(cpu, tb);
622 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
623 *tb_exit = ret & TB_EXIT_MASK;
624 if (*tb_exit != TB_EXIT_REQUESTED) {
625 *last_tb = tb;
626 return;
629 *last_tb = NULL;
630 insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
631 if (insns_left < 0) {
632 /* Something asked us to stop executing chained TBs; just
633 * continue round the main loop. Whatever requested the exit
634 * will also have set something else (eg exit_request or
635 * interrupt_request) which will be handled by
636 * cpu_handle_interrupt. cpu_handle_interrupt will also
637 * clear cpu->icount_decr.u16.high.
639 return;
642 /* Instruction counter expired. */
643 assert(use_icount);
644 #ifndef CONFIG_USER_ONLY
645 /* Ensure global icount has gone forward */
646 cpu_update_icount(cpu);
647 /* Refill decrementer and continue execution. */
648 insns_left = MIN(0xffff, cpu->icount_budget);
649 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
650 cpu->icount_extra = cpu->icount_budget - insns_left;
651 if (!cpu->icount_extra) {
652 /* Execute any remaining instructions, then let the main loop
653 * handle the next event.
655 if (insns_left > 0) {
656 cpu_exec_nocache(cpu, insns_left, tb, false);
659 #endif
662 /* main execution loop */
664 int cpu_exec(CPUState *cpu)
666 CPUClass *cc = CPU_GET_CLASS(cpu);
667 int ret;
668 SyncClocks sc = { 0 };
670 /* replay_interrupt may need current_cpu */
671 current_cpu = cpu;
673 if (cpu_handle_halt(cpu)) {
674 return EXCP_HALTED;
677 rcu_read_lock();
679 cc->cpu_exec_enter(cpu);
681 /* Calculate difference between guest clock and host clock.
682 * This delay includes the delay of the last cycle, so
683 * what we have to do is sleep until it is 0. As for the
684 * advance/delay we gain here, we try to fix it next time.
686 init_delay_params(&sc, cpu);
688 /* prepare setjmp context for exception handling */
689 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
690 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
691 /* Some compilers wrongly smash all local variables after
692 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
693 * Reload essential local variables here for those compilers.
694 * Newer versions of gcc would complain about this code (-Wclobbered). */
695 cpu = current_cpu;
696 cc = CPU_GET_CLASS(cpu);
697 #else /* buggy compiler */
698 /* Assert that the compiler does not smash local variables. */
699 g_assert(cpu == current_cpu);
700 g_assert(cc == CPU_GET_CLASS(cpu));
701 #endif /* buggy compiler */
702 #ifndef CONFIG_SOFTMMU
703 tcg_debug_assert(!have_mmap_lock());
704 #endif
705 if (qemu_mutex_iothread_locked()) {
706 qemu_mutex_unlock_iothread();
708 assert_no_pages_locked();
711 /* if an exception is pending, we execute it here */
712 while (!cpu_handle_exception(cpu, &ret)) {
713 TranslationBlock *last_tb = NULL;
714 int tb_exit = 0;
716 while (!cpu_handle_interrupt(cpu, &last_tb)) {
717 uint32_t cflags = cpu->cflags_next_tb;
718 TranslationBlock *tb;
720 /* When requested, use an exact setting for cflags for the next
721 execution. This is used for icount, precise smc, and stop-
722 after-access watchpoints. Since this request should never
723 have CF_INVALID set, -1 is a convenient invalid value that
724 does not require tcg headers for cpu_common_reset. */
725 if (cflags == -1) {
726 cflags = curr_cflags();
727 } else {
728 cpu->cflags_next_tb = -1;
731 tb = tb_find(cpu, last_tb, tb_exit, cflags);
732 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
733 /* Try to align the host and virtual clocks
734 if the guest is in advance */
735 align_clocks(&sc, cpu);
739 cc->cpu_exec_exit(cpu);
740 rcu_read_unlock();
742 return ret;