sun4u: use sunhme as default on-board NIC
[qemu/ar7.git] / accel / tcg / cpu-exec.c
blobff6866624ac2681675f8acea49e250ba273da0ff
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "trace.h"
22 #include "disas/disas.h"
23 #include "exec/exec-all.h"
24 #include "tcg.h"
25 #include "qemu/atomic.h"
26 #include "sysemu/qtest.h"
27 #include "qemu/timer.h"
28 #include "exec/address-spaces.h"
29 #include "qemu/rcu.h"
30 #include "exec/tb-hash.h"
31 #include "exec/log.h"
32 #include "qemu/main-loop.h"
33 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
34 #include "hw/i386/apic.h"
35 #endif
36 #include "sysemu/cpus.h"
37 #include "sysemu/replay.h"
39 /* -icount align implementation. */
41 typedef struct SyncClocks {
42 int64_t diff_clk;
43 int64_t last_cpu_icount;
44 int64_t realtime_clock;
45 } SyncClocks;
47 #if !defined(CONFIG_USER_ONLY)
48 /* Allow the guest to have a max 3ms advance.
49 * The difference between the 2 clocks could therefore
50 * oscillate around 0.
52 #define VM_CLOCK_ADVANCE 3000000
53 #define THRESHOLD_REDUCE 1.5
54 #define MAX_DELAY_PRINT_RATE 2000000000LL
55 #define MAX_NB_PRINTS 100
57 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
59 int64_t cpu_icount;
61 if (!icount_align_option) {
62 return;
65 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
66 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
67 sc->last_cpu_icount = cpu_icount;
69 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
70 #ifndef _WIN32
71 struct timespec sleep_delay, rem_delay;
72 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
73 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
74 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
75 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
76 } else {
77 sc->diff_clk = 0;
79 #else
80 Sleep(sc->diff_clk / SCALE_MS);
81 sc->diff_clk = 0;
82 #endif
86 static void print_delay(const SyncClocks *sc)
88 static float threshold_delay;
89 static int64_t last_realtime_clock;
90 static int nb_prints;
92 if (icount_align_option &&
93 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
94 nb_prints < MAX_NB_PRINTS) {
95 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
96 (-sc->diff_clk / (float)1000000000LL <
97 (threshold_delay - THRESHOLD_REDUCE))) {
98 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
99 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
100 threshold_delay - 1,
101 threshold_delay);
102 nb_prints++;
103 last_realtime_clock = sc->realtime_clock;
108 static void init_delay_params(SyncClocks *sc,
109 const CPUState *cpu)
111 if (!icount_align_option) {
112 return;
114 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
115 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
116 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
117 if (sc->diff_clk < max_delay) {
118 max_delay = sc->diff_clk;
120 if (sc->diff_clk > max_advance) {
121 max_advance = sc->diff_clk;
124 /* Print every 2s max if the guest is late. We limit the number
125 of printed messages to NB_PRINT_MAX(currently 100) */
126 print_delay(sc);
128 #else
129 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
133 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
136 #endif /* CONFIG USER ONLY */
138 /* Execute a TB, and fix up the CPU state afterwards if necessary */
139 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
141 CPUArchState *env = cpu->env_ptr;
142 uintptr_t ret;
143 TranslationBlock *last_tb;
144 int tb_exit;
145 uint8_t *tb_ptr = itb->tc_ptr;
147 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
148 "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
149 itb->tc_ptr, cpu->cpu_index, itb->pc,
150 lookup_symbol(itb->pc));
152 #if defined(DEBUG_DISAS)
153 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
154 && qemu_log_in_addr_range(itb->pc)) {
155 qemu_log_lock();
156 #if defined(TARGET_I386)
157 log_cpu_state(cpu, CPU_DUMP_CCOP);
158 #else
159 log_cpu_state(cpu, 0);
160 #endif
161 qemu_log_unlock();
163 #endif /* DEBUG_DISAS */
165 cpu->can_do_io = !use_icount;
166 ret = tcg_qemu_tb_exec(env, tb_ptr);
167 cpu->can_do_io = 1;
168 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
169 tb_exit = ret & TB_EXIT_MASK;
170 trace_exec_tb_exit(last_tb, tb_exit);
172 if (tb_exit > TB_EXIT_IDX1) {
173 /* We didn't start executing this TB (eg because the instruction
174 * counter hit zero); we must restore the guest PC to the address
175 * of the start of the TB.
177 CPUClass *cc = CPU_GET_CLASS(cpu);
178 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
179 "Stopped execution of TB chain before %p ["
180 TARGET_FMT_lx "] %s\n",
181 last_tb->tc_ptr, last_tb->pc,
182 lookup_symbol(last_tb->pc));
183 if (cc->synchronize_from_tb) {
184 cc->synchronize_from_tb(cpu, last_tb);
185 } else {
186 assert(cc->set_pc);
187 cc->set_pc(cpu, last_tb->pc);
190 return ret;
193 #ifndef CONFIG_USER_ONLY
194 /* Execute the code without caching the generated code. An interpreter
195 could be used if available. */
196 static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
197 TranslationBlock *orig_tb, bool ignore_icount)
199 TranslationBlock *tb;
201 /* Should never happen.
202 We only end up here when an existing TB is too long. */
203 if (max_cycles > CF_COUNT_MASK)
204 max_cycles = CF_COUNT_MASK;
206 tb_lock();
207 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
208 max_cycles | CF_NOCACHE
209 | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
210 tb->orig_tb = orig_tb;
211 tb_unlock();
213 /* execute the generated code */
214 trace_exec_tb_nocache(tb, tb->pc);
215 cpu_tb_exec(cpu, tb);
217 tb_lock();
218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
220 tb_unlock();
222 #endif
224 static void cpu_exec_step(CPUState *cpu)
226 CPUClass *cc = CPU_GET_CLASS(cpu);
227 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
228 TranslationBlock *tb;
229 target_ulong cs_base, pc;
230 uint32_t flags;
232 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
233 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
234 mmap_lock();
235 tb_lock();
236 tb = tb_gen_code(cpu, pc, cs_base, flags,
237 1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
238 tb->orig_tb = NULL;
239 tb_unlock();
240 mmap_unlock();
242 cc->cpu_exec_enter(cpu);
243 /* execute the generated code */
244 trace_exec_tb_nocache(tb, pc);
245 cpu_tb_exec(cpu, tb);
246 cc->cpu_exec_exit(cpu);
248 tb_lock();
249 tb_phys_invalidate(tb, -1);
250 tb_free(tb);
251 tb_unlock();
252 } else {
253 /* We may have exited due to another problem here, so we need
254 * to reset any tb_locks we may have taken but didn't release.
255 * The mmap_lock is dropped by tb_gen_code if it runs out of
256 * memory.
258 #ifndef CONFIG_SOFTMMU
259 tcg_debug_assert(!have_mmap_lock());
260 #endif
261 tb_lock_reset();
265 void cpu_exec_step_atomic(CPUState *cpu)
267 start_exclusive();
269 /* Since we got here, we know that parallel_cpus must be true. */
270 parallel_cpus = false;
271 cpu_exec_step(cpu);
272 parallel_cpus = true;
274 end_exclusive();
277 struct tb_desc {
278 target_ulong pc;
279 target_ulong cs_base;
280 CPUArchState *env;
281 tb_page_addr_t phys_page1;
282 uint32_t flags;
283 uint32_t trace_vcpu_dstate;
286 static bool tb_cmp(const void *p, const void *d)
288 const TranslationBlock *tb = p;
289 const struct tb_desc *desc = d;
291 if (tb->pc == desc->pc &&
292 tb->page_addr[0] == desc->phys_page1 &&
293 tb->cs_base == desc->cs_base &&
294 tb->flags == desc->flags &&
295 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
296 !atomic_read(&tb->invalid)) {
297 /* check next page if needed */
298 if (tb->page_addr[1] == -1) {
299 return true;
300 } else {
301 tb_page_addr_t phys_page2;
302 target_ulong virt_page2;
304 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
305 phys_page2 = get_page_addr_code(desc->env, virt_page2);
306 if (tb->page_addr[1] == phys_page2) {
307 return true;
311 return false;
314 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
315 target_ulong cs_base, uint32_t flags)
317 tb_page_addr_t phys_pc;
318 struct tb_desc desc;
319 uint32_t h;
321 desc.env = (CPUArchState *)cpu->env_ptr;
322 desc.cs_base = cs_base;
323 desc.flags = flags;
324 desc.trace_vcpu_dstate = *cpu->trace_dstate;
325 desc.pc = pc;
326 phys_pc = get_page_addr_code(desc.env, pc);
327 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
328 h = tb_hash_func(phys_pc, pc, flags, *cpu->trace_dstate);
329 return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
332 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
334 if (TCG_TARGET_HAS_direct_jump) {
335 uintptr_t offset = tb->jmp_target_arg[n];
336 uintptr_t tc_ptr = (uintptr_t)tb->tc_ptr;
337 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
338 } else {
339 tb->jmp_target_arg[n] = addr;
343 /* Called with tb_lock held. */
344 static inline void tb_add_jump(TranslationBlock *tb, int n,
345 TranslationBlock *tb_next)
347 assert(n < ARRAY_SIZE(tb->jmp_list_next));
348 if (tb->jmp_list_next[n]) {
349 /* Another thread has already done this while we were
350 * outside of the lock; nothing to do in this case */
351 return;
353 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
354 "Linking TBs %p [" TARGET_FMT_lx
355 "] index %d -> %p [" TARGET_FMT_lx "]\n",
356 tb->tc_ptr, tb->pc, n,
357 tb_next->tc_ptr, tb_next->pc);
359 /* patch the native jump address */
360 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
362 /* add in TB jmp circular list */
363 tb->jmp_list_next[n] = tb_next->jmp_list_first;
364 tb_next->jmp_list_first = (uintptr_t)tb | n;
367 static inline TranslationBlock *tb_find(CPUState *cpu,
368 TranslationBlock *last_tb,
369 int tb_exit)
371 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
372 TranslationBlock *tb;
373 target_ulong cs_base, pc;
374 uint32_t flags;
375 bool have_tb_lock = false;
377 /* we record a subset of the CPU state. It will
378 always be the same before a given translated block
379 is executed. */
380 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
381 tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
382 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
383 tb->flags != flags ||
384 tb->trace_vcpu_dstate != *cpu->trace_dstate)) {
385 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
386 if (!tb) {
388 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
389 * taken outside tb_lock. As system emulation is currently
390 * single threaded the locks are NOPs.
392 mmap_lock();
393 tb_lock();
394 have_tb_lock = true;
396 /* There's a chance that our desired tb has been translated while
397 * taking the locks so we check again inside the lock.
399 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
400 if (!tb) {
401 /* if no translated code available, then translate it now */
402 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
405 mmap_unlock();
408 /* We add the TB in the virtual pc hash table for the fast lookup */
409 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
411 #ifndef CONFIG_USER_ONLY
412 /* We don't take care of direct jumps when address mapping changes in
413 * system emulation. So it's not safe to make a direct jump to a TB
414 * spanning two pages because the mapping for the second page can change.
416 if (tb->page_addr[1] != -1) {
417 last_tb = NULL;
419 #endif
420 /* See if we can patch the calling TB. */
421 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
422 if (!have_tb_lock) {
423 tb_lock();
424 have_tb_lock = true;
426 if (!tb->invalid) {
427 tb_add_jump(last_tb, tb_exit, tb);
430 if (have_tb_lock) {
431 tb_unlock();
433 return tb;
436 static inline bool cpu_handle_halt(CPUState *cpu)
438 if (cpu->halted) {
439 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
440 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
441 && replay_interrupt()) {
442 X86CPU *x86_cpu = X86_CPU(cpu);
443 qemu_mutex_lock_iothread();
444 apic_poll_irq(x86_cpu->apic_state);
445 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
446 qemu_mutex_unlock_iothread();
448 #endif
449 if (!cpu_has_work(cpu)) {
450 return true;
453 cpu->halted = 0;
456 return false;
459 static inline void cpu_handle_debug_exception(CPUState *cpu)
461 CPUClass *cc = CPU_GET_CLASS(cpu);
462 CPUWatchpoint *wp;
464 if (!cpu->watchpoint_hit) {
465 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
466 wp->flags &= ~BP_WATCHPOINT_HIT;
470 cc->debug_excp_handler(cpu);
473 static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
475 if (cpu->exception_index >= 0) {
476 if (cpu->exception_index >= EXCP_INTERRUPT) {
477 /* exit request from the cpu execution loop */
478 *ret = cpu->exception_index;
479 if (*ret == EXCP_DEBUG) {
480 cpu_handle_debug_exception(cpu);
482 cpu->exception_index = -1;
483 return true;
484 } else {
485 #if defined(CONFIG_USER_ONLY)
486 /* if user mode only, we simulate a fake exception
487 which will be handled outside the cpu execution
488 loop */
489 #if defined(TARGET_I386)
490 CPUClass *cc = CPU_GET_CLASS(cpu);
491 cc->do_interrupt(cpu);
492 #endif
493 *ret = cpu->exception_index;
494 cpu->exception_index = -1;
495 return true;
496 #else
497 if (replay_exception()) {
498 CPUClass *cc = CPU_GET_CLASS(cpu);
499 qemu_mutex_lock_iothread();
500 cc->do_interrupt(cpu);
501 qemu_mutex_unlock_iothread();
502 cpu->exception_index = -1;
503 } else if (!replay_has_interrupt()) {
504 /* give a chance to iothread in replay mode */
505 *ret = EXCP_INTERRUPT;
506 return true;
508 #endif
510 #ifndef CONFIG_USER_ONLY
511 } else if (replay_has_exception()
512 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
513 /* try to cause an exception pending in the log */
514 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
515 *ret = -1;
516 return true;
517 #endif
520 return false;
523 static inline bool cpu_handle_interrupt(CPUState *cpu,
524 TranslationBlock **last_tb)
526 CPUClass *cc = CPU_GET_CLASS(cpu);
528 if (unlikely(atomic_read(&cpu->interrupt_request))) {
529 int interrupt_request;
530 qemu_mutex_lock_iothread();
531 interrupt_request = cpu->interrupt_request;
532 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
533 /* Mask out external interrupts for this step. */
534 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
536 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
537 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
538 cpu->exception_index = EXCP_DEBUG;
539 qemu_mutex_unlock_iothread();
540 return true;
542 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
543 /* Do nothing */
544 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
545 replay_interrupt();
546 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
547 cpu->halted = 1;
548 cpu->exception_index = EXCP_HLT;
549 qemu_mutex_unlock_iothread();
550 return true;
552 #if defined(TARGET_I386)
553 else if (interrupt_request & CPU_INTERRUPT_INIT) {
554 X86CPU *x86_cpu = X86_CPU(cpu);
555 CPUArchState *env = &x86_cpu->env;
556 replay_interrupt();
557 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
558 do_cpu_init(x86_cpu);
559 cpu->exception_index = EXCP_HALTED;
560 qemu_mutex_unlock_iothread();
561 return true;
563 #else
564 else if (interrupt_request & CPU_INTERRUPT_RESET) {
565 replay_interrupt();
566 cpu_reset(cpu);
567 qemu_mutex_unlock_iothread();
568 return true;
570 #endif
571 /* The target hook has 3 exit conditions:
572 False when the interrupt isn't processed,
573 True when it is, and we should restart on a new TB,
574 and via longjmp via cpu_loop_exit. */
575 else {
576 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
577 replay_interrupt();
578 *last_tb = NULL;
580 /* The target hook may have updated the 'cpu->interrupt_request';
581 * reload the 'interrupt_request' value */
582 interrupt_request = cpu->interrupt_request;
584 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
585 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
586 /* ensure that no TB jump will be modified as
587 the program flow was changed */
588 *last_tb = NULL;
591 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
592 qemu_mutex_unlock_iothread();
595 /* Finally, check if we need to exit to the main loop. */
596 if (unlikely(atomic_read(&cpu->exit_request)
597 || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
598 atomic_set(&cpu->exit_request, 0);
599 cpu->exception_index = EXCP_INTERRUPT;
600 return true;
603 return false;
606 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
607 TranslationBlock **last_tb, int *tb_exit)
609 uintptr_t ret;
610 int32_t insns_left;
612 trace_exec_tb(tb, tb->pc);
613 ret = cpu_tb_exec(cpu, tb);
614 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
615 *tb_exit = ret & TB_EXIT_MASK;
616 if (*tb_exit != TB_EXIT_REQUESTED) {
617 *last_tb = tb;
618 return;
621 *last_tb = NULL;
622 insns_left = atomic_read(&cpu->icount_decr.u32);
623 atomic_set(&cpu->icount_decr.u16.high, 0);
624 if (insns_left < 0) {
625 /* Something asked us to stop executing chained TBs; just
626 * continue round the main loop. Whatever requested the exit
627 * will also have set something else (eg exit_request or
628 * interrupt_request) which we will handle next time around
629 * the loop. But we need to ensure the zeroing of icount_decr
630 * comes before the next read of cpu->exit_request
631 * or cpu->interrupt_request.
633 smp_mb();
634 return;
637 /* Instruction counter expired. */
638 assert(use_icount);
639 #ifndef CONFIG_USER_ONLY
640 /* Ensure global icount has gone forward */
641 cpu_update_icount(cpu);
642 /* Refill decrementer and continue execution. */
643 insns_left = MIN(0xffff, cpu->icount_budget);
644 cpu->icount_decr.u16.low = insns_left;
645 cpu->icount_extra = cpu->icount_budget - insns_left;
646 if (!cpu->icount_extra) {
647 /* Execute any remaining instructions, then let the main loop
648 * handle the next event.
650 if (insns_left > 0) {
651 cpu_exec_nocache(cpu, insns_left, tb, false);
654 #endif
657 /* main execution loop */
659 int cpu_exec(CPUState *cpu)
661 CPUClass *cc = CPU_GET_CLASS(cpu);
662 int ret;
663 SyncClocks sc = { 0 };
665 /* replay_interrupt may need current_cpu */
666 current_cpu = cpu;
668 if (cpu_handle_halt(cpu)) {
669 return EXCP_HALTED;
672 rcu_read_lock();
674 cc->cpu_exec_enter(cpu);
676 /* Calculate difference between guest clock and host clock.
677 * This delay includes the delay of the last cycle, so
678 * what we have to do is sleep until it is 0. As for the
679 * advance/delay we gain here, we try to fix it next time.
681 init_delay_params(&sc, cpu);
683 /* prepare setjmp context for exception handling */
684 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
685 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
686 /* Some compilers wrongly smash all local variables after
687 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
688 * Reload essential local variables here for those compilers.
689 * Newer versions of gcc would complain about this code (-Wclobbered). */
690 cpu = current_cpu;
691 cc = CPU_GET_CLASS(cpu);
692 #else /* buggy compiler */
693 /* Assert that the compiler does not smash local variables. */
694 g_assert(cpu == current_cpu);
695 g_assert(cc == CPU_GET_CLASS(cpu));
696 #endif /* buggy compiler */
697 cpu->can_do_io = 1;
698 tb_lock_reset();
699 if (qemu_mutex_iothread_locked()) {
700 qemu_mutex_unlock_iothread();
704 /* if an exception is pending, we execute it here */
705 while (!cpu_handle_exception(cpu, &ret)) {
706 TranslationBlock *last_tb = NULL;
707 int tb_exit = 0;
709 while (!cpu_handle_interrupt(cpu, &last_tb)) {
710 TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit);
711 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
712 /* Try to align the host and virtual clocks
713 if the guest is in advance */
714 align_clocks(&sc, cpu);
718 cc->cpu_exec_exit(cpu);
719 rcu_read_unlock();
721 return ret;