block: rename BlockAcctType members to start with BLOCK_ instead of BDRV_
[qemu/qmp-unstable.git] / cpu-exec.c
blob7b5d2e21d0a45c48f3f1ad4a7c1ff7dc2a144020
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "trace.h"
22 #include "disas/disas.h"
23 #include "tcg.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
28 /* -icount align implementation. */
30 typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
33 int64_t realtime_clock;
34 } SyncClocks;
36 #if !defined(CONFIG_USER_ONLY)
37 /* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
41 #define VM_CLOCK_ADVANCE 3000000
42 #define THRESHOLD_REDUCE 1.5
43 #define MAX_DELAY_PRINT_RATE 2000000000LL
44 #define MAX_NB_PRINTS 100
46 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
48 int64_t cpu_icount;
50 if (!icount_align_option) {
51 return;
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59 #ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
69 #else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72 #endif
76 static void print_delay(const SyncClocks *sc)
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
98 static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
101 if (!icount_align_option) {
102 return;
104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
106 sc->realtime_clock +
107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
120 #else
121 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
125 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
128 #endif /* CONFIG USER ONLY */
130 void cpu_loop_exit(CPUState *cpu)
132 cpu->current_tb = NULL;
133 siglongjmp(cpu->jmp_env, 1);
136 /* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
139 #if defined(CONFIG_SOFTMMU)
140 void cpu_resume_from_signal(CPUState *cpu, void *puc)
142 /* XXX: restore cpu registers saved in host registers */
144 cpu->exception_index = -1;
145 siglongjmp(cpu->jmp_env, 1);
147 #endif
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
150 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
152 CPUArchState *env = cpu->env_ptr;
153 uintptr_t next_tb;
155 #if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157 #if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159 #elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165 #else
166 log_cpu_state(cpu, 0);
167 #endif
169 #endif /* DEBUG_DISAS */
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
180 CPUClass *cc = CPU_GET_CLASS(cpu);
181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
193 cpu->tcg_exit_req = 0;
195 return next_tb;
198 /* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
200 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
201 TranslationBlock *orig_tb)
203 CPUState *cpu = ENV_GET_CPU(env);
204 TranslationBlock *tb;
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
212 max_cycles);
213 cpu->current_tb = tb;
214 /* execute the generated code */
215 trace_exec_tb_nocache(tb, tb->pc);
216 cpu_tb_exec(cpu, tb->tc_ptr);
217 cpu->current_tb = NULL;
218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
222 static TranslationBlock *tb_find_slow(CPUArchState *env,
223 target_ulong pc,
224 target_ulong cs_base,
225 uint64_t flags)
227 CPUState *cpu = ENV_GET_CPU(env);
228 TranslationBlock *tb, **ptb1;
229 unsigned int h;
230 tb_page_addr_t phys_pc, phys_page1;
231 target_ulong virt_page2;
233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
235 /* find translated block using physical mappings */
236 phys_pc = get_page_addr_code(env, pc);
237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
238 h = tb_phys_hash_func(phys_pc);
239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
244 if (tb->pc == pc &&
245 tb->page_addr[0] == phys_page1 &&
246 tb->cs_base == cs_base &&
247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
250 tb_page_addr_t phys_page2;
252 virt_page2 = (pc & TARGET_PAGE_MASK) +
253 TARGET_PAGE_SIZE;
254 phys_page2 = get_page_addr_code(env, virt_page2);
255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
261 ptb1 = &tb->phys_hash_next;
263 not_found:
264 /* if no translated code available, then translate it now */
265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
267 found:
268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
274 /* we add the TB in the virtual pc hash table */
275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
276 return tb;
279 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
281 CPUState *cpu = ENV_GET_CPU(env);
282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
284 int flags;
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
293 tb = tb_find_slow(env, pc, cs_base, flags);
295 return tb;
298 static CPUDebugExcpHandler *debug_excp_handler;
300 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
302 debug_excp_handler = handler;
305 static void cpu_handle_debug_exception(CPUArchState *env)
307 CPUState *cpu = ENV_GET_CPU(env);
308 CPUWatchpoint *wp;
310 if (!cpu->watchpoint_hit) {
311 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
312 wp->flags &= ~BP_WATCHPOINT_HIT;
315 if (debug_excp_handler) {
316 debug_excp_handler(env);
320 /* main execution loop */
322 volatile sig_atomic_t exit_request;
324 int cpu_exec(CPUArchState *env)
326 CPUState *cpu = ENV_GET_CPU(env);
327 #if !(defined(CONFIG_USER_ONLY) && \
328 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
329 CPUClass *cc = CPU_GET_CLASS(cpu);
330 #endif
331 #ifdef TARGET_I386
332 X86CPU *x86_cpu = X86_CPU(cpu);
333 #endif
334 int ret, interrupt_request;
335 TranslationBlock *tb;
336 uint8_t *tc_ptr;
337 uintptr_t next_tb;
338 SyncClocks sc;
340 /* This must be volatile so it is not trashed by longjmp() */
341 volatile bool have_tb_lock = false;
343 if (cpu->halted) {
344 if (!cpu_has_work(cpu)) {
345 return EXCP_HALTED;
348 cpu->halted = 0;
351 current_cpu = cpu;
353 /* As long as current_cpu is null, up to the assignment just above,
354 * requests by other threads to exit the execution loop are expected to
355 * be issued using the exit_request global. We must make sure that our
356 * evaluation of the global value is performed past the current_cpu
357 * value transition point, which requires a memory barrier as well as
358 * an instruction scheduling constraint on modern architectures. */
359 smp_mb();
361 if (unlikely(exit_request)) {
362 cpu->exit_request = 1;
365 #if defined(TARGET_I386)
366 /* put eflags in CPU temporary format */
367 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
368 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
369 CC_OP = CC_OP_EFLAGS;
370 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
371 #elif defined(TARGET_SPARC)
372 #elif defined(TARGET_M68K)
373 env->cc_op = CC_OP_FLAGS;
374 env->cc_dest = env->sr & 0xf;
375 env->cc_x = (env->sr >> 4) & 1;
376 #elif defined(TARGET_ALPHA)
377 #elif defined(TARGET_ARM)
378 #elif defined(TARGET_UNICORE32)
379 #elif defined(TARGET_PPC)
380 env->reserve_addr = -1;
381 #elif defined(TARGET_LM32)
382 #elif defined(TARGET_MICROBLAZE)
383 #elif defined(TARGET_MIPS)
384 #elif defined(TARGET_MOXIE)
385 #elif defined(TARGET_OPENRISC)
386 #elif defined(TARGET_SH4)
387 #elif defined(TARGET_CRIS)
388 #elif defined(TARGET_S390X)
389 #elif defined(TARGET_XTENSA)
390 #elif defined(TARGET_TRICORE)
391 /* XXXXX */
392 #else
393 #error unsupported target CPU
394 #endif
395 cpu->exception_index = -1;
397 /* Calculate difference between guest clock and host clock.
398 * This delay includes the delay of the last cycle, so
399 * what we have to do is sleep until it is 0. As for the
400 * advance/delay we gain here, we try to fix it next time.
402 init_delay_params(&sc, cpu);
404 /* prepare setjmp context for exception handling */
405 for(;;) {
406 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
407 /* if an exception is pending, we execute it here */
408 if (cpu->exception_index >= 0) {
409 if (cpu->exception_index >= EXCP_INTERRUPT) {
410 /* exit request from the cpu execution loop */
411 ret = cpu->exception_index;
412 if (ret == EXCP_DEBUG) {
413 cpu_handle_debug_exception(env);
415 break;
416 } else {
417 #if defined(CONFIG_USER_ONLY)
418 /* if user mode only, we simulate a fake exception
419 which will be handled outside the cpu execution
420 loop */
421 #if defined(TARGET_I386)
422 cc->do_interrupt(cpu);
423 #endif
424 ret = cpu->exception_index;
425 break;
426 #else
427 cc->do_interrupt(cpu);
428 cpu->exception_index = -1;
429 #endif
433 next_tb = 0; /* force lookup of first TB */
434 for(;;) {
435 interrupt_request = cpu->interrupt_request;
436 if (unlikely(interrupt_request)) {
437 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
438 /* Mask out external interrupts for this step. */
439 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
441 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
442 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
443 cpu->exception_index = EXCP_DEBUG;
444 cpu_loop_exit(cpu);
446 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
447 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
448 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
449 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
450 if (interrupt_request & CPU_INTERRUPT_HALT) {
451 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
452 cpu->halted = 1;
453 cpu->exception_index = EXCP_HLT;
454 cpu_loop_exit(cpu);
456 #endif
457 #if defined(TARGET_I386)
458 if (interrupt_request & CPU_INTERRUPT_INIT) {
459 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
460 do_cpu_init(x86_cpu);
461 cpu->exception_index = EXCP_HALTED;
462 cpu_loop_exit(cpu);
464 #else
465 if (interrupt_request & CPU_INTERRUPT_RESET) {
466 cpu_reset(cpu);
468 #endif
469 #if defined(TARGET_I386)
470 #if !defined(CONFIG_USER_ONLY)
471 if (interrupt_request & CPU_INTERRUPT_POLL) {
472 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
473 apic_poll_irq(x86_cpu->apic_state);
475 #endif
476 if (interrupt_request & CPU_INTERRUPT_SIPI) {
477 do_cpu_sipi(x86_cpu);
478 } else if (env->hflags2 & HF2_GIF_MASK) {
479 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
480 !(env->hflags & HF_SMM_MASK)) {
481 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
483 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
484 do_smm_enter(x86_cpu);
485 next_tb = 0;
486 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
487 !(env->hflags2 & HF2_NMI_MASK)) {
488 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
489 env->hflags2 |= HF2_NMI_MASK;
490 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
491 next_tb = 0;
492 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
493 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
494 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
495 next_tb = 0;
496 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
497 (((env->hflags2 & HF2_VINTR_MASK) &&
498 (env->hflags2 & HF2_HIF_MASK)) ||
499 (!(env->hflags2 & HF2_VINTR_MASK) &&
500 (env->eflags & IF_MASK &&
501 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
502 int intno;
503 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
505 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
506 CPU_INTERRUPT_VIRQ);
507 intno = cpu_get_pic_interrupt(env);
508 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
509 do_interrupt_x86_hardirq(env, intno, 1);
510 /* ensure that no TB jump will be modified as
511 the program flow was changed */
512 next_tb = 0;
513 #if !defined(CONFIG_USER_ONLY)
514 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
515 (env->eflags & IF_MASK) &&
516 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
517 int intno;
518 /* FIXME: this should respect TPR */
519 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
521 intno = ldl_phys(cpu->as,
522 env->vm_vmcb
523 + offsetof(struct vmcb,
524 control.int_vector));
525 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
526 do_interrupt_x86_hardirq(env, intno, 1);
527 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
528 next_tb = 0;
529 #endif
532 #elif defined(TARGET_PPC)
533 if (interrupt_request & CPU_INTERRUPT_HARD) {
534 ppc_hw_interrupt(env);
535 if (env->pending_interrupts == 0) {
536 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
538 next_tb = 0;
540 #elif defined(TARGET_LM32)
541 if ((interrupt_request & CPU_INTERRUPT_HARD)
542 && (env->ie & IE_IE)) {
543 cpu->exception_index = EXCP_IRQ;
544 cc->do_interrupt(cpu);
545 next_tb = 0;
547 #elif defined(TARGET_MICROBLAZE)
548 if ((interrupt_request & CPU_INTERRUPT_HARD)
549 && (env->sregs[SR_MSR] & MSR_IE)
550 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
551 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
552 cpu->exception_index = EXCP_IRQ;
553 cc->do_interrupt(cpu);
554 next_tb = 0;
556 #elif defined(TARGET_MIPS)
557 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
558 cpu_mips_hw_interrupts_pending(env)) {
559 /* Raise it */
560 cpu->exception_index = EXCP_EXT_INTERRUPT;
561 env->error_code = 0;
562 cc->do_interrupt(cpu);
563 next_tb = 0;
565 #elif defined(TARGET_TRICORE)
566 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
567 cc->do_interrupt(cpu);
568 next_tb = 0;
571 #elif defined(TARGET_OPENRISC)
573 int idx = -1;
574 if ((interrupt_request & CPU_INTERRUPT_HARD)
575 && (env->sr & SR_IEE)) {
576 idx = EXCP_INT;
578 if ((interrupt_request & CPU_INTERRUPT_TIMER)
579 && (env->sr & SR_TEE)) {
580 idx = EXCP_TICK;
582 if (idx >= 0) {
583 cpu->exception_index = idx;
584 cc->do_interrupt(cpu);
585 next_tb = 0;
588 #elif defined(TARGET_SPARC)
589 if (interrupt_request & CPU_INTERRUPT_HARD) {
590 if (cpu_interrupts_enabled(env) &&
591 env->interrupt_index > 0) {
592 int pil = env->interrupt_index & 0xf;
593 int type = env->interrupt_index & 0xf0;
595 if (((type == TT_EXTINT) &&
596 cpu_pil_allowed(env, pil)) ||
597 type != TT_EXTINT) {
598 cpu->exception_index = env->interrupt_index;
599 cc->do_interrupt(cpu);
600 next_tb = 0;
604 #elif defined(TARGET_ARM)
605 if (interrupt_request & CPU_INTERRUPT_FIQ
606 && !(env->daif & PSTATE_F)) {
607 cpu->exception_index = EXCP_FIQ;
608 cc->do_interrupt(cpu);
609 next_tb = 0;
611 /* ARMv7-M interrupt return works by loading a magic value
612 into the PC. On real hardware the load causes the
613 return to occur. The qemu implementation performs the
614 jump normally, then does the exception return when the
615 CPU tries to execute code at the magic address.
616 This will cause the magic PC value to be pushed to
617 the stack if an interrupt occurred at the wrong time.
618 We avoid this by disabling interrupts when
619 pc contains a magic address. */
620 if (interrupt_request & CPU_INTERRUPT_HARD
621 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
622 || !(env->daif & PSTATE_I))) {
623 cpu->exception_index = EXCP_IRQ;
624 cc->do_interrupt(cpu);
625 next_tb = 0;
627 #elif defined(TARGET_UNICORE32)
628 if (interrupt_request & CPU_INTERRUPT_HARD
629 && !(env->uncached_asr & ASR_I)) {
630 cpu->exception_index = UC32_EXCP_INTR;
631 cc->do_interrupt(cpu);
632 next_tb = 0;
634 #elif defined(TARGET_SH4)
635 if (interrupt_request & CPU_INTERRUPT_HARD) {
636 cc->do_interrupt(cpu);
637 next_tb = 0;
639 #elif defined(TARGET_ALPHA)
641 int idx = -1;
642 /* ??? This hard-codes the OSF/1 interrupt levels. */
643 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
644 case 0 ... 3:
645 if (interrupt_request & CPU_INTERRUPT_HARD) {
646 idx = EXCP_DEV_INTERRUPT;
648 /* FALLTHRU */
649 case 4:
650 if (interrupt_request & CPU_INTERRUPT_TIMER) {
651 idx = EXCP_CLK_INTERRUPT;
653 /* FALLTHRU */
654 case 5:
655 if (interrupt_request & CPU_INTERRUPT_SMP) {
656 idx = EXCP_SMP_INTERRUPT;
658 /* FALLTHRU */
659 case 6:
660 if (interrupt_request & CPU_INTERRUPT_MCHK) {
661 idx = EXCP_MCHK;
664 if (idx >= 0) {
665 cpu->exception_index = idx;
666 env->error_code = 0;
667 cc->do_interrupt(cpu);
668 next_tb = 0;
671 #elif defined(TARGET_CRIS)
672 if (interrupt_request & CPU_INTERRUPT_HARD
673 && (env->pregs[PR_CCS] & I_FLAG)
674 && !env->locked_irq) {
675 cpu->exception_index = EXCP_IRQ;
676 cc->do_interrupt(cpu);
677 next_tb = 0;
679 if (interrupt_request & CPU_INTERRUPT_NMI) {
680 unsigned int m_flag_archval;
681 if (env->pregs[PR_VR] < 32) {
682 m_flag_archval = M_FLAG_V10;
683 } else {
684 m_flag_archval = M_FLAG_V32;
686 if ((env->pregs[PR_CCS] & m_flag_archval)) {
687 cpu->exception_index = EXCP_NMI;
688 cc->do_interrupt(cpu);
689 next_tb = 0;
692 #elif defined(TARGET_M68K)
693 if (interrupt_request & CPU_INTERRUPT_HARD
694 && ((env->sr & SR_I) >> SR_I_SHIFT)
695 < env->pending_level) {
696 /* Real hardware gets the interrupt vector via an
697 IACK cycle at this point. Current emulated
698 hardware doesn't rely on this, so we
699 provide/save the vector when the interrupt is
700 first signalled. */
701 cpu->exception_index = env->pending_vector;
702 do_interrupt_m68k_hardirq(env);
703 next_tb = 0;
705 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
706 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
707 (env->psw.mask & PSW_MASK_EXT)) {
708 cc->do_interrupt(cpu);
709 next_tb = 0;
711 #elif defined(TARGET_XTENSA)
712 if (interrupt_request & CPU_INTERRUPT_HARD) {
713 cpu->exception_index = EXC_IRQ;
714 cc->do_interrupt(cpu);
715 next_tb = 0;
717 #endif
718 /* Don't use the cached interrupt_request value,
719 do_interrupt may have updated the EXITTB flag. */
720 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
721 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
722 /* ensure that no TB jump will be modified as
723 the program flow was changed */
724 next_tb = 0;
727 if (unlikely(cpu->exit_request)) {
728 cpu->exit_request = 0;
729 cpu->exception_index = EXCP_INTERRUPT;
730 cpu_loop_exit(cpu);
732 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
733 have_tb_lock = true;
734 tb = tb_find_fast(env);
735 /* Note: we do it here to avoid a gcc bug on Mac OS X when
736 doing it in tb_find_slow */
737 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
738 /* as some TB could have been invalidated because
739 of memory exceptions while generating the code, we
740 must recompute the hash index here */
741 next_tb = 0;
742 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
744 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
745 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
746 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
748 /* see if we can patch the calling TB. When the TB
749 spans two pages, we cannot safely do a direct
750 jump. */
751 if (next_tb != 0 && tb->page_addr[1] == -1) {
752 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
753 next_tb & TB_EXIT_MASK, tb);
755 have_tb_lock = false;
756 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
758 /* cpu_interrupt might be called while translating the
759 TB, but before it is linked into a potentially
760 infinite loop and becomes env->current_tb. Avoid
761 starting execution if there is a pending interrupt. */
762 cpu->current_tb = tb;
763 barrier();
764 if (likely(!cpu->exit_request)) {
765 trace_exec_tb(tb, tb->pc);
766 tc_ptr = tb->tc_ptr;
767 /* execute the generated code */
768 next_tb = cpu_tb_exec(cpu, tc_ptr);
769 switch (next_tb & TB_EXIT_MASK) {
770 case TB_EXIT_REQUESTED:
771 /* Something asked us to stop executing
772 * chained TBs; just continue round the main
773 * loop. Whatever requested the exit will also
774 * have set something else (eg exit_request or
775 * interrupt_request) which we will handle
776 * next time around the loop.
778 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
779 next_tb = 0;
780 break;
781 case TB_EXIT_ICOUNT_EXPIRED:
783 /* Instruction counter expired. */
784 int insns_left;
785 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
786 insns_left = cpu->icount_decr.u32;
787 if (cpu->icount_extra && insns_left >= 0) {
788 /* Refill decrementer and continue execution. */
789 cpu->icount_extra += insns_left;
790 if (cpu->icount_extra > 0xffff) {
791 insns_left = 0xffff;
792 } else {
793 insns_left = cpu->icount_extra;
795 cpu->icount_extra -= insns_left;
796 cpu->icount_decr.u16.low = insns_left;
797 } else {
798 if (insns_left > 0) {
799 /* Execute remaining instructions. */
800 cpu_exec_nocache(env, insns_left, tb);
801 align_clocks(&sc, cpu);
803 cpu->exception_index = EXCP_INTERRUPT;
804 next_tb = 0;
805 cpu_loop_exit(cpu);
807 break;
809 default:
810 break;
813 cpu->current_tb = NULL;
814 /* Try to align the host and virtual clocks
815 if the guest is in advance */
816 align_clocks(&sc, cpu);
817 /* reset soft MMU for next block (it can currently
818 only be set by a memory fault) */
819 } /* for(;;) */
820 } else {
821 /* Reload env after longjmp - the compiler may have smashed all
822 * local variables as longjmp is marked 'noreturn'. */
823 cpu = current_cpu;
824 env = cpu->env_ptr;
825 #if !(defined(CONFIG_USER_ONLY) && \
826 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
827 cc = CPU_GET_CLASS(cpu);
828 #endif
829 #ifdef TARGET_I386
830 x86_cpu = X86_CPU(cpu);
831 #endif
832 if (have_tb_lock) {
833 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
834 have_tb_lock = false;
837 } /* for(;;) */
840 #if defined(TARGET_I386)
841 /* restore flags in standard format */
842 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
843 | (env->df & DF_MASK);
844 #elif defined(TARGET_ARM)
845 /* XXX: Save/restore host fpu exception state?. */
846 #elif defined(TARGET_UNICORE32)
847 #elif defined(TARGET_SPARC)
848 #elif defined(TARGET_PPC)
849 #elif defined(TARGET_LM32)
850 #elif defined(TARGET_M68K)
851 cpu_m68k_flush_flags(env, env->cc_op);
852 env->cc_op = CC_OP_FLAGS;
853 env->sr = (env->sr & 0xffe0)
854 | env->cc_dest | (env->cc_x << 4);
855 #elif defined(TARGET_MICROBLAZE)
856 #elif defined(TARGET_MIPS)
857 #elif defined(TARGET_TRICORE)
858 #elif defined(TARGET_MOXIE)
859 #elif defined(TARGET_OPENRISC)
860 #elif defined(TARGET_SH4)
861 #elif defined(TARGET_ALPHA)
862 #elif defined(TARGET_CRIS)
863 #elif defined(TARGET_S390X)
864 #elif defined(TARGET_XTENSA)
865 /* XXXXX */
866 #else
867 #error unsupported target CPU
868 #endif
870 /* fail safe : never use current_cpu outside cpu_exec() */
871 current_cpu = NULL;
872 return ret;