target-m68k: Use cpu_exec_enter/exit qom hooks
[qemu/ar7.git] / cpu-exec.c
blob9f84dda020f63f7e48bb7e87800bb53913cb622b
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "trace.h"
22 #include "disas/disas.h"
23 #include "tcg.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
28 /* -icount align implementation. */
30 typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
33 int64_t realtime_clock;
34 } SyncClocks;
36 #if !defined(CONFIG_USER_ONLY)
37 /* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
41 #define VM_CLOCK_ADVANCE 3000000
42 #define THRESHOLD_REDUCE 1.5
43 #define MAX_DELAY_PRINT_RATE 2000000000LL
44 #define MAX_NB_PRINTS 100
46 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
48 int64_t cpu_icount;
50 if (!icount_align_option) {
51 return;
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59 #ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
69 #else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72 #endif
76 static void print_delay(const SyncClocks *sc)
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
98 static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
101 if (!icount_align_option) {
102 return;
104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
106 sc->realtime_clock +
107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
120 #else
121 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
125 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
128 #endif /* CONFIG USER ONLY */
130 void cpu_loop_exit(CPUState *cpu)
132 cpu->current_tb = NULL;
133 siglongjmp(cpu->jmp_env, 1);
136 /* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
139 #if defined(CONFIG_SOFTMMU)
140 void cpu_resume_from_signal(CPUState *cpu, void *puc)
142 /* XXX: restore cpu registers saved in host registers */
144 cpu->exception_index = -1;
145 siglongjmp(cpu->jmp_env, 1);
147 #endif
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
150 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
152 CPUArchState *env = cpu->env_ptr;
153 uintptr_t next_tb;
155 #if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157 #if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159 #elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165 #else
166 log_cpu_state(cpu, 0);
167 #endif
169 #endif /* DEBUG_DISAS */
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
180 CPUClass *cc = CPU_GET_CLASS(cpu);
181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
193 cpu->tcg_exit_req = 0;
195 return next_tb;
198 /* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
200 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
201 TranslationBlock *orig_tb)
203 CPUState *cpu = ENV_GET_CPU(env);
204 TranslationBlock *tb;
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
212 max_cycles);
213 cpu->current_tb = tb;
214 /* execute the generated code */
215 trace_exec_tb_nocache(tb, tb->pc);
216 cpu_tb_exec(cpu, tb->tc_ptr);
217 cpu->current_tb = NULL;
218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
222 static TranslationBlock *tb_find_slow(CPUArchState *env,
223 target_ulong pc,
224 target_ulong cs_base,
225 uint64_t flags)
227 CPUState *cpu = ENV_GET_CPU(env);
228 TranslationBlock *tb, **ptb1;
229 unsigned int h;
230 tb_page_addr_t phys_pc, phys_page1;
231 target_ulong virt_page2;
233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
235 /* find translated block using physical mappings */
236 phys_pc = get_page_addr_code(env, pc);
237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
238 h = tb_phys_hash_func(phys_pc);
239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
244 if (tb->pc == pc &&
245 tb->page_addr[0] == phys_page1 &&
246 tb->cs_base == cs_base &&
247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
250 tb_page_addr_t phys_page2;
252 virt_page2 = (pc & TARGET_PAGE_MASK) +
253 TARGET_PAGE_SIZE;
254 phys_page2 = get_page_addr_code(env, virt_page2);
255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
261 ptb1 = &tb->phys_hash_next;
263 not_found:
264 /* if no translated code available, then translate it now */
265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
267 found:
268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
274 /* we add the TB in the virtual pc hash table */
275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
276 return tb;
279 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
281 CPUState *cpu = ENV_GET_CPU(env);
282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
284 int flags;
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
293 tb = tb_find_slow(env, pc, cs_base, flags);
295 return tb;
298 static void cpu_handle_debug_exception(CPUArchState *env)
300 CPUState *cpu = ENV_GET_CPU(env);
301 CPUClass *cc = CPU_GET_CLASS(cpu);
302 CPUWatchpoint *wp;
304 if (!cpu->watchpoint_hit) {
305 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
306 wp->flags &= ~BP_WATCHPOINT_HIT;
310 cc->debug_excp_handler(cpu);
313 /* main execution loop */
315 volatile sig_atomic_t exit_request;
317 int cpu_exec(CPUArchState *env)
319 CPUState *cpu = ENV_GET_CPU(env);
320 CPUClass *cc = CPU_GET_CLASS(cpu);
321 #ifdef TARGET_I386
322 X86CPU *x86_cpu = X86_CPU(cpu);
323 #endif
324 int ret, interrupt_request;
325 TranslationBlock *tb;
326 uint8_t *tc_ptr;
327 uintptr_t next_tb;
328 SyncClocks sc;
330 /* This must be volatile so it is not trashed by longjmp() */
331 volatile bool have_tb_lock = false;
333 if (cpu->halted) {
334 if (!cpu_has_work(cpu)) {
335 return EXCP_HALTED;
338 cpu->halted = 0;
341 current_cpu = cpu;
343 /* As long as current_cpu is null, up to the assignment just above,
344 * requests by other threads to exit the execution loop are expected to
345 * be issued using the exit_request global. We must make sure that our
346 * evaluation of the global value is performed past the current_cpu
347 * value transition point, which requires a memory barrier as well as
348 * an instruction scheduling constraint on modern architectures. */
349 smp_mb();
351 if (unlikely(exit_request)) {
352 cpu->exit_request = 1;
355 #if defined(TARGET_PPC)
356 env->reserve_addr = -1;
357 #endif
358 cc->cpu_exec_enter(cpu);
359 cpu->exception_index = -1;
361 /* Calculate difference between guest clock and host clock.
362 * This delay includes the delay of the last cycle, so
363 * what we have to do is sleep until it is 0. As for the
364 * advance/delay we gain here, we try to fix it next time.
366 init_delay_params(&sc, cpu);
368 /* prepare setjmp context for exception handling */
369 for(;;) {
370 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
371 /* if an exception is pending, we execute it here */
372 if (cpu->exception_index >= 0) {
373 if (cpu->exception_index >= EXCP_INTERRUPT) {
374 /* exit request from the cpu execution loop */
375 ret = cpu->exception_index;
376 if (ret == EXCP_DEBUG) {
377 cpu_handle_debug_exception(env);
379 break;
380 } else {
381 #if defined(CONFIG_USER_ONLY)
382 /* if user mode only, we simulate a fake exception
383 which will be handled outside the cpu execution
384 loop */
385 #if defined(TARGET_I386)
386 cc->do_interrupt(cpu);
387 #endif
388 ret = cpu->exception_index;
389 break;
390 #else
391 cc->do_interrupt(cpu);
392 cpu->exception_index = -1;
393 #endif
397 next_tb = 0; /* force lookup of first TB */
398 for(;;) {
399 interrupt_request = cpu->interrupt_request;
400 if (unlikely(interrupt_request)) {
401 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
402 /* Mask out external interrupts for this step. */
403 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
405 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
406 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
407 cpu->exception_index = EXCP_DEBUG;
408 cpu_loop_exit(cpu);
410 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
411 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
412 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
413 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
414 if (interrupt_request & CPU_INTERRUPT_HALT) {
415 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
416 cpu->halted = 1;
417 cpu->exception_index = EXCP_HLT;
418 cpu_loop_exit(cpu);
420 #endif
421 #if defined(TARGET_I386)
422 if (interrupt_request & CPU_INTERRUPT_INIT) {
423 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
424 do_cpu_init(x86_cpu);
425 cpu->exception_index = EXCP_HALTED;
426 cpu_loop_exit(cpu);
428 #else
429 if (interrupt_request & CPU_INTERRUPT_RESET) {
430 cpu_reset(cpu);
432 #endif
433 #if defined(TARGET_I386)
434 #if !defined(CONFIG_USER_ONLY)
435 if (interrupt_request & CPU_INTERRUPT_POLL) {
436 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
437 apic_poll_irq(x86_cpu->apic_state);
439 #endif
440 if (interrupt_request & CPU_INTERRUPT_SIPI) {
441 do_cpu_sipi(x86_cpu);
442 } else if (env->hflags2 & HF2_GIF_MASK) {
443 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
444 !(env->hflags & HF_SMM_MASK)) {
445 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
447 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
448 do_smm_enter(x86_cpu);
449 next_tb = 0;
450 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
451 !(env->hflags2 & HF2_NMI_MASK)) {
452 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
453 env->hflags2 |= HF2_NMI_MASK;
454 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
455 next_tb = 0;
456 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
457 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
458 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
459 next_tb = 0;
460 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (((env->hflags2 & HF2_VINTR_MASK) &&
462 (env->hflags2 & HF2_HIF_MASK)) ||
463 (!(env->hflags2 & HF2_VINTR_MASK) &&
464 (env->eflags & IF_MASK &&
465 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
466 int intno;
467 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
469 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
470 CPU_INTERRUPT_VIRQ);
471 intno = cpu_get_pic_interrupt(env);
472 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
473 do_interrupt_x86_hardirq(env, intno, 1);
474 /* ensure that no TB jump will be modified as
475 the program flow was changed */
476 next_tb = 0;
477 #if !defined(CONFIG_USER_ONLY)
478 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
479 (env->eflags & IF_MASK) &&
480 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
481 int intno;
482 /* FIXME: this should respect TPR */
483 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
485 intno = ldl_phys(cpu->as,
486 env->vm_vmcb
487 + offsetof(struct vmcb,
488 control.int_vector));
489 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
490 do_interrupt_x86_hardirq(env, intno, 1);
491 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
492 next_tb = 0;
493 #endif
496 #elif defined(TARGET_PPC)
497 if (interrupt_request & CPU_INTERRUPT_HARD) {
498 ppc_hw_interrupt(env);
499 if (env->pending_interrupts == 0) {
500 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
502 next_tb = 0;
504 #elif defined(TARGET_LM32)
505 if ((interrupt_request & CPU_INTERRUPT_HARD)
506 && (env->ie & IE_IE)) {
507 cpu->exception_index = EXCP_IRQ;
508 cc->do_interrupt(cpu);
509 next_tb = 0;
511 #elif defined(TARGET_MICROBLAZE)
512 if ((interrupt_request & CPU_INTERRUPT_HARD)
513 && (env->sregs[SR_MSR] & MSR_IE)
514 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
515 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
516 cpu->exception_index = EXCP_IRQ;
517 cc->do_interrupt(cpu);
518 next_tb = 0;
520 #elif defined(TARGET_MIPS)
521 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
522 cpu_mips_hw_interrupts_pending(env)) {
523 /* Raise it */
524 cpu->exception_index = EXCP_EXT_INTERRUPT;
525 env->error_code = 0;
526 cc->do_interrupt(cpu);
527 next_tb = 0;
529 #elif defined(TARGET_TRICORE)
530 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
531 cc->do_interrupt(cpu);
532 next_tb = 0;
535 #elif defined(TARGET_OPENRISC)
537 int idx = -1;
538 if ((interrupt_request & CPU_INTERRUPT_HARD)
539 && (env->sr & SR_IEE)) {
540 idx = EXCP_INT;
542 if ((interrupt_request & CPU_INTERRUPT_TIMER)
543 && (env->sr & SR_TEE)) {
544 idx = EXCP_TICK;
546 if (idx >= 0) {
547 cpu->exception_index = idx;
548 cc->do_interrupt(cpu);
549 next_tb = 0;
552 #elif defined(TARGET_SPARC)
553 if (interrupt_request & CPU_INTERRUPT_HARD) {
554 if (cpu_interrupts_enabled(env) &&
555 env->interrupt_index > 0) {
556 int pil = env->interrupt_index & 0xf;
557 int type = env->interrupt_index & 0xf0;
559 if (((type == TT_EXTINT) &&
560 cpu_pil_allowed(env, pil)) ||
561 type != TT_EXTINT) {
562 cpu->exception_index = env->interrupt_index;
563 cc->do_interrupt(cpu);
564 next_tb = 0;
568 #elif defined(TARGET_ARM)
569 if (interrupt_request & CPU_INTERRUPT_FIQ
570 && !(env->daif & PSTATE_F)) {
571 cpu->exception_index = EXCP_FIQ;
572 cc->do_interrupt(cpu);
573 next_tb = 0;
575 /* ARMv7-M interrupt return works by loading a magic value
576 into the PC. On real hardware the load causes the
577 return to occur. The qemu implementation performs the
578 jump normally, then does the exception return when the
579 CPU tries to execute code at the magic address.
580 This will cause the magic PC value to be pushed to
581 the stack if an interrupt occurred at the wrong time.
582 We avoid this by disabling interrupts when
583 pc contains a magic address. */
584 if (interrupt_request & CPU_INTERRUPT_HARD
585 && !(env->daif & PSTATE_I)
586 && (!IS_M(env) || env->regs[15] < 0xfffffff0)) {
587 cpu->exception_index = EXCP_IRQ;
588 cc->do_interrupt(cpu);
589 next_tb = 0;
591 #elif defined(TARGET_UNICORE32)
592 if (interrupt_request & CPU_INTERRUPT_HARD
593 && !(env->uncached_asr & ASR_I)) {
594 cpu->exception_index = UC32_EXCP_INTR;
595 cc->do_interrupt(cpu);
596 next_tb = 0;
598 #elif defined(TARGET_SH4)
599 if (interrupt_request & CPU_INTERRUPT_HARD) {
600 cc->do_interrupt(cpu);
601 next_tb = 0;
603 #elif defined(TARGET_ALPHA)
605 int idx = -1;
606 /* ??? This hard-codes the OSF/1 interrupt levels. */
607 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
608 case 0 ... 3:
609 if (interrupt_request & CPU_INTERRUPT_HARD) {
610 idx = EXCP_DEV_INTERRUPT;
612 /* FALLTHRU */
613 case 4:
614 if (interrupt_request & CPU_INTERRUPT_TIMER) {
615 idx = EXCP_CLK_INTERRUPT;
617 /* FALLTHRU */
618 case 5:
619 if (interrupt_request & CPU_INTERRUPT_SMP) {
620 idx = EXCP_SMP_INTERRUPT;
622 /* FALLTHRU */
623 case 6:
624 if (interrupt_request & CPU_INTERRUPT_MCHK) {
625 idx = EXCP_MCHK;
628 if (idx >= 0) {
629 cpu->exception_index = idx;
630 env->error_code = 0;
631 cc->do_interrupt(cpu);
632 next_tb = 0;
635 #elif defined(TARGET_CRIS)
636 if (interrupt_request & CPU_INTERRUPT_HARD
637 && (env->pregs[PR_CCS] & I_FLAG)
638 && !env->locked_irq) {
639 cpu->exception_index = EXCP_IRQ;
640 cc->do_interrupt(cpu);
641 next_tb = 0;
643 if (interrupt_request & CPU_INTERRUPT_NMI) {
644 unsigned int m_flag_archval;
645 if (env->pregs[PR_VR] < 32) {
646 m_flag_archval = M_FLAG_V10;
647 } else {
648 m_flag_archval = M_FLAG_V32;
650 if ((env->pregs[PR_CCS] & m_flag_archval)) {
651 cpu->exception_index = EXCP_NMI;
652 cc->do_interrupt(cpu);
653 next_tb = 0;
656 #elif defined(TARGET_M68K)
657 if (interrupt_request & CPU_INTERRUPT_HARD
658 && ((env->sr & SR_I) >> SR_I_SHIFT)
659 < env->pending_level) {
660 /* Real hardware gets the interrupt vector via an
661 IACK cycle at this point. Current emulated
662 hardware doesn't rely on this, so we
663 provide/save the vector when the interrupt is
664 first signalled. */
665 cpu->exception_index = env->pending_vector;
666 do_interrupt_m68k_hardirq(env);
667 next_tb = 0;
669 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
670 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
671 (env->psw.mask & PSW_MASK_EXT)) {
672 cc->do_interrupt(cpu);
673 next_tb = 0;
675 #elif defined(TARGET_XTENSA)
676 if (interrupt_request & CPU_INTERRUPT_HARD) {
677 cpu->exception_index = EXC_IRQ;
678 cc->do_interrupt(cpu);
679 next_tb = 0;
681 #endif
682 /* Don't use the cached interrupt_request value,
683 do_interrupt may have updated the EXITTB flag. */
684 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
685 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
686 /* ensure that no TB jump will be modified as
687 the program flow was changed */
688 next_tb = 0;
691 if (unlikely(cpu->exit_request)) {
692 cpu->exit_request = 0;
693 cpu->exception_index = EXCP_INTERRUPT;
694 cpu_loop_exit(cpu);
696 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
697 have_tb_lock = true;
698 tb = tb_find_fast(env);
699 /* Note: we do it here to avoid a gcc bug on Mac OS X when
700 doing it in tb_find_slow */
701 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
702 /* as some TB could have been invalidated because
703 of memory exceptions while generating the code, we
704 must recompute the hash index here */
705 next_tb = 0;
706 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
708 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
709 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
710 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
712 /* see if we can patch the calling TB. When the TB
713 spans two pages, we cannot safely do a direct
714 jump. */
715 if (next_tb != 0 && tb->page_addr[1] == -1) {
716 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
717 next_tb & TB_EXIT_MASK, tb);
719 have_tb_lock = false;
720 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
722 /* cpu_interrupt might be called while translating the
723 TB, but before it is linked into a potentially
724 infinite loop and becomes env->current_tb. Avoid
725 starting execution if there is a pending interrupt. */
726 cpu->current_tb = tb;
727 barrier();
728 if (likely(!cpu->exit_request)) {
729 trace_exec_tb(tb, tb->pc);
730 tc_ptr = tb->tc_ptr;
731 /* execute the generated code */
732 next_tb = cpu_tb_exec(cpu, tc_ptr);
733 switch (next_tb & TB_EXIT_MASK) {
734 case TB_EXIT_REQUESTED:
735 /* Something asked us to stop executing
736 * chained TBs; just continue round the main
737 * loop. Whatever requested the exit will also
738 * have set something else (eg exit_request or
739 * interrupt_request) which we will handle
740 * next time around the loop.
742 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
743 next_tb = 0;
744 break;
745 case TB_EXIT_ICOUNT_EXPIRED:
747 /* Instruction counter expired. */
748 int insns_left;
749 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
750 insns_left = cpu->icount_decr.u32;
751 if (cpu->icount_extra && insns_left >= 0) {
752 /* Refill decrementer and continue execution. */
753 cpu->icount_extra += insns_left;
754 if (cpu->icount_extra > 0xffff) {
755 insns_left = 0xffff;
756 } else {
757 insns_left = cpu->icount_extra;
759 cpu->icount_extra -= insns_left;
760 cpu->icount_decr.u16.low = insns_left;
761 } else {
762 if (insns_left > 0) {
763 /* Execute remaining instructions. */
764 cpu_exec_nocache(env, insns_left, tb);
765 align_clocks(&sc, cpu);
767 cpu->exception_index = EXCP_INTERRUPT;
768 next_tb = 0;
769 cpu_loop_exit(cpu);
771 break;
773 default:
774 break;
777 cpu->current_tb = NULL;
778 /* Try to align the host and virtual clocks
779 if the guest is in advance */
780 align_clocks(&sc, cpu);
781 /* reset soft MMU for next block (it can currently
782 only be set by a memory fault) */
783 } /* for(;;) */
784 } else {
785 /* Reload env after longjmp - the compiler may have smashed all
786 * local variables as longjmp is marked 'noreturn'. */
787 cpu = current_cpu;
788 env = cpu->env_ptr;
789 #if !(defined(CONFIG_USER_ONLY) && \
790 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
791 cc = CPU_GET_CLASS(cpu);
792 #endif
793 #ifdef TARGET_I386
794 x86_cpu = X86_CPU(cpu);
795 #endif
796 if (have_tb_lock) {
797 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
798 have_tb_lock = false;
801 } /* for(;;) */
803 cc->cpu_exec_exit(cpu);
805 /* fail safe : never use current_cpu outside cpu_exec() */
806 current_cpu = NULL;
807 return ret;