cpu-exec: Print to console if the guest is late
[qemu/ar7.git] / cpu-exec.c
blob3c1450232951ebe46b0efeb19a277ff485a82d58
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "qemu/timer.h"
27 /* -icount align implementation. */
29 typedef struct SyncClocks {
30 int64_t diff_clk;
31 int64_t last_cpu_icount;
32 int64_t realtime_clock;
33 } SyncClocks;
35 #if !defined(CONFIG_USER_ONLY)
36 /* Allow the guest to have a max 3ms advance.
37 * The difference between the 2 clocks could therefore
38 * oscillate around 0.
40 #define VM_CLOCK_ADVANCE 3000000
41 #define THRESHOLD_REDUCE 1.5
42 #define MAX_DELAY_PRINT_RATE 2000000000LL
43 #define MAX_NB_PRINTS 100
45 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47 int64_t cpu_icount;
49 if (!icount_align_option) {
50 return;
53 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
54 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
55 sc->last_cpu_icount = cpu_icount;
57 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
58 #ifndef _WIN32
59 struct timespec sleep_delay, rem_delay;
60 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
61 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
62 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
63 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
64 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
65 } else {
66 sc->diff_clk = 0;
68 #else
69 Sleep(sc->diff_clk / SCALE_MS);
70 sc->diff_clk = 0;
71 #endif
75 static void print_delay(const SyncClocks *sc)
77 static float threshold_delay;
78 static int64_t last_realtime_clock;
79 static int nb_prints;
81 if (icount_align_option &&
82 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
83 nb_prints < MAX_NB_PRINTS) {
84 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
85 (-sc->diff_clk / (float)1000000000LL <
86 (threshold_delay - THRESHOLD_REDUCE))) {
87 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
88 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
89 threshold_delay - 1,
90 threshold_delay);
91 nb_prints++;
92 last_realtime_clock = sc->realtime_clock;
97 static void init_delay_params(SyncClocks *sc,
98 const CPUState *cpu)
100 if (!icount_align_option) {
101 return;
103 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
104 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
105 sc->realtime_clock +
106 cpu_get_clock_offset();
107 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
109 /* Print every 2s max if the guest is late. We limit the number
110 of printed messages to NB_PRINT_MAX(currently 100) */
111 print_delay(sc);
113 #else
114 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
118 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
121 #endif /* CONFIG USER ONLY */
123 void cpu_loop_exit(CPUState *cpu)
125 cpu->current_tb = NULL;
126 siglongjmp(cpu->jmp_env, 1);
129 /* exit the current TB from a signal handler. The host registers are
130 restored in a state compatible with the CPU emulator
132 #if defined(CONFIG_SOFTMMU)
133 void cpu_resume_from_signal(CPUState *cpu, void *puc)
135 /* XXX: restore cpu registers saved in host registers */
137 cpu->exception_index = -1;
138 siglongjmp(cpu->jmp_env, 1);
140 #endif
142 /* Execute a TB, and fix up the CPU state afterwards if necessary */
143 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
145 CPUArchState *env = cpu->env_ptr;
146 uintptr_t next_tb;
148 #if defined(DEBUG_DISAS)
149 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
150 #if defined(TARGET_I386)
151 log_cpu_state(cpu, CPU_DUMP_CCOP);
152 #elif defined(TARGET_M68K)
153 /* ??? Should not modify env state for dumping. */
154 cpu_m68k_flush_flags(env, env->cc_op);
155 env->cc_op = CC_OP_FLAGS;
156 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
157 log_cpu_state(cpu, 0);
158 #else
159 log_cpu_state(cpu, 0);
160 #endif
162 #endif /* DEBUG_DISAS */
164 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
165 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
166 /* We didn't start executing this TB (eg because the instruction
167 * counter hit zero); we must restore the guest PC to the address
168 * of the start of the TB.
170 CPUClass *cc = CPU_GET_CLASS(cpu);
171 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
172 if (cc->synchronize_from_tb) {
173 cc->synchronize_from_tb(cpu, tb);
174 } else {
175 assert(cc->set_pc);
176 cc->set_pc(cpu, tb->pc);
179 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
180 /* We were asked to stop executing TBs (probably a pending
181 * interrupt. We've now stopped, so clear the flag.
183 cpu->tcg_exit_req = 0;
185 return next_tb;
188 /* Execute the code without caching the generated code. An interpreter
189 could be used if available. */
190 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
191 TranslationBlock *orig_tb)
193 CPUState *cpu = ENV_GET_CPU(env);
194 TranslationBlock *tb;
196 /* Should never happen.
197 We only end up here when an existing TB is too long. */
198 if (max_cycles > CF_COUNT_MASK)
199 max_cycles = CF_COUNT_MASK;
201 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
202 max_cycles);
203 cpu->current_tb = tb;
204 /* execute the generated code */
205 cpu_tb_exec(cpu, tb->tc_ptr);
206 cpu->current_tb = NULL;
207 tb_phys_invalidate(tb, -1);
208 tb_free(tb);
211 static TranslationBlock *tb_find_slow(CPUArchState *env,
212 target_ulong pc,
213 target_ulong cs_base,
214 uint64_t flags)
216 CPUState *cpu = ENV_GET_CPU(env);
217 TranslationBlock *tb, **ptb1;
218 unsigned int h;
219 tb_page_addr_t phys_pc, phys_page1;
220 target_ulong virt_page2;
222 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
224 /* find translated block using physical mappings */
225 phys_pc = get_page_addr_code(env, pc);
226 phys_page1 = phys_pc & TARGET_PAGE_MASK;
227 h = tb_phys_hash_func(phys_pc);
228 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
229 for(;;) {
230 tb = *ptb1;
231 if (!tb)
232 goto not_found;
233 if (tb->pc == pc &&
234 tb->page_addr[0] == phys_page1 &&
235 tb->cs_base == cs_base &&
236 tb->flags == flags) {
237 /* check next page if needed */
238 if (tb->page_addr[1] != -1) {
239 tb_page_addr_t phys_page2;
241 virt_page2 = (pc & TARGET_PAGE_MASK) +
242 TARGET_PAGE_SIZE;
243 phys_page2 = get_page_addr_code(env, virt_page2);
244 if (tb->page_addr[1] == phys_page2)
245 goto found;
246 } else {
247 goto found;
250 ptb1 = &tb->phys_hash_next;
252 not_found:
253 /* if no translated code available, then translate it now */
254 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
256 found:
257 /* Move the last found TB to the head of the list */
258 if (likely(*ptb1)) {
259 *ptb1 = tb->phys_hash_next;
260 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
261 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
263 /* we add the TB in the virtual pc hash table */
264 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
265 return tb;
268 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
270 CPUState *cpu = ENV_GET_CPU(env);
271 TranslationBlock *tb;
272 target_ulong cs_base, pc;
273 int flags;
275 /* we record a subset of the CPU state. It will
276 always be the same before a given translated block
277 is executed. */
278 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
279 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
280 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
281 tb->flags != flags)) {
282 tb = tb_find_slow(env, pc, cs_base, flags);
284 return tb;
287 static CPUDebugExcpHandler *debug_excp_handler;
289 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
291 debug_excp_handler = handler;
294 static void cpu_handle_debug_exception(CPUArchState *env)
296 CPUState *cpu = ENV_GET_CPU(env);
297 CPUWatchpoint *wp;
299 if (!cpu->watchpoint_hit) {
300 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
301 wp->flags &= ~BP_WATCHPOINT_HIT;
304 if (debug_excp_handler) {
305 debug_excp_handler(env);
309 /* main execution loop */
311 volatile sig_atomic_t exit_request;
313 int cpu_exec(CPUArchState *env)
315 CPUState *cpu = ENV_GET_CPU(env);
316 #if !(defined(CONFIG_USER_ONLY) && \
317 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
318 CPUClass *cc = CPU_GET_CLASS(cpu);
319 #endif
320 #ifdef TARGET_I386
321 X86CPU *x86_cpu = X86_CPU(cpu);
322 #endif
323 int ret, interrupt_request;
324 TranslationBlock *tb;
325 uint8_t *tc_ptr;
326 uintptr_t next_tb;
327 SyncClocks sc;
329 /* This must be volatile so it is not trashed by longjmp() */
330 volatile bool have_tb_lock = false;
332 if (cpu->halted) {
333 if (!cpu_has_work(cpu)) {
334 return EXCP_HALTED;
337 cpu->halted = 0;
340 current_cpu = cpu;
342 /* As long as current_cpu is null, up to the assignment just above,
343 * requests by other threads to exit the execution loop are expected to
344 * be issued using the exit_request global. We must make sure that our
345 * evaluation of the global value is performed past the current_cpu
346 * value transition point, which requires a memory barrier as well as
347 * an instruction scheduling constraint on modern architectures. */
348 smp_mb();
350 if (unlikely(exit_request)) {
351 cpu->exit_request = 1;
354 #if defined(TARGET_I386)
355 /* put eflags in CPU temporary format */
356 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
357 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
358 CC_OP = CC_OP_EFLAGS;
359 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
360 #elif defined(TARGET_SPARC)
361 #elif defined(TARGET_M68K)
362 env->cc_op = CC_OP_FLAGS;
363 env->cc_dest = env->sr & 0xf;
364 env->cc_x = (env->sr >> 4) & 1;
365 #elif defined(TARGET_ALPHA)
366 #elif defined(TARGET_ARM)
367 #elif defined(TARGET_UNICORE32)
368 #elif defined(TARGET_PPC)
369 env->reserve_addr = -1;
370 #elif defined(TARGET_LM32)
371 #elif defined(TARGET_MICROBLAZE)
372 #elif defined(TARGET_MIPS)
373 #elif defined(TARGET_MOXIE)
374 #elif defined(TARGET_OPENRISC)
375 #elif defined(TARGET_SH4)
376 #elif defined(TARGET_CRIS)
377 #elif defined(TARGET_S390X)
378 #elif defined(TARGET_XTENSA)
379 /* XXXXX */
380 #else
381 #error unsupported target CPU
382 #endif
383 cpu->exception_index = -1;
385 /* Calculate difference between guest clock and host clock.
386 * This delay includes the delay of the last cycle, so
387 * what we have to do is sleep until it is 0. As for the
388 * advance/delay we gain here, we try to fix it next time.
390 init_delay_params(&sc, cpu);
392 /* prepare setjmp context for exception handling */
393 for(;;) {
394 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
395 /* if an exception is pending, we execute it here */
396 if (cpu->exception_index >= 0) {
397 if (cpu->exception_index >= EXCP_INTERRUPT) {
398 /* exit request from the cpu execution loop */
399 ret = cpu->exception_index;
400 if (ret == EXCP_DEBUG) {
401 cpu_handle_debug_exception(env);
403 break;
404 } else {
405 #if defined(CONFIG_USER_ONLY)
406 /* if user mode only, we simulate a fake exception
407 which will be handled outside the cpu execution
408 loop */
409 #if defined(TARGET_I386)
410 cc->do_interrupt(cpu);
411 #endif
412 ret = cpu->exception_index;
413 break;
414 #else
415 cc->do_interrupt(cpu);
416 cpu->exception_index = -1;
417 #endif
421 next_tb = 0; /* force lookup of first TB */
422 for(;;) {
423 interrupt_request = cpu->interrupt_request;
424 if (unlikely(interrupt_request)) {
425 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
426 /* Mask out external interrupts for this step. */
427 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
429 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
430 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
431 cpu->exception_index = EXCP_DEBUG;
432 cpu_loop_exit(cpu);
434 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
435 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
436 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
437 if (interrupt_request & CPU_INTERRUPT_HALT) {
438 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
439 cpu->halted = 1;
440 cpu->exception_index = EXCP_HLT;
441 cpu_loop_exit(cpu);
443 #endif
444 #if defined(TARGET_I386)
445 if (interrupt_request & CPU_INTERRUPT_INIT) {
446 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
447 do_cpu_init(x86_cpu);
448 cpu->exception_index = EXCP_HALTED;
449 cpu_loop_exit(cpu);
451 #else
452 if (interrupt_request & CPU_INTERRUPT_RESET) {
453 cpu_reset(cpu);
455 #endif
456 #if defined(TARGET_I386)
457 #if !defined(CONFIG_USER_ONLY)
458 if (interrupt_request & CPU_INTERRUPT_POLL) {
459 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
460 apic_poll_irq(x86_cpu->apic_state);
462 #endif
463 if (interrupt_request & CPU_INTERRUPT_SIPI) {
464 do_cpu_sipi(x86_cpu);
465 } else if (env->hflags2 & HF2_GIF_MASK) {
466 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
467 !(env->hflags & HF_SMM_MASK)) {
468 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
470 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
471 do_smm_enter(x86_cpu);
472 next_tb = 0;
473 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
474 !(env->hflags2 & HF2_NMI_MASK)) {
475 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
476 env->hflags2 |= HF2_NMI_MASK;
477 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
478 next_tb = 0;
479 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
480 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
481 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
482 next_tb = 0;
483 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
484 (((env->hflags2 & HF2_VINTR_MASK) &&
485 (env->hflags2 & HF2_HIF_MASK)) ||
486 (!(env->hflags2 & HF2_VINTR_MASK) &&
487 (env->eflags & IF_MASK &&
488 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
489 int intno;
490 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
492 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
493 CPU_INTERRUPT_VIRQ);
494 intno = cpu_get_pic_interrupt(env);
495 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
496 do_interrupt_x86_hardirq(env, intno, 1);
497 /* ensure that no TB jump will be modified as
498 the program flow was changed */
499 next_tb = 0;
500 #if !defined(CONFIG_USER_ONLY)
501 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
502 (env->eflags & IF_MASK) &&
503 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
504 int intno;
505 /* FIXME: this should respect TPR */
506 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
508 intno = ldl_phys(cpu->as,
509 env->vm_vmcb
510 + offsetof(struct vmcb,
511 control.int_vector));
512 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
513 do_interrupt_x86_hardirq(env, intno, 1);
514 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
515 next_tb = 0;
516 #endif
519 #elif defined(TARGET_PPC)
520 if (interrupt_request & CPU_INTERRUPT_HARD) {
521 ppc_hw_interrupt(env);
522 if (env->pending_interrupts == 0) {
523 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
525 next_tb = 0;
527 #elif defined(TARGET_LM32)
528 if ((interrupt_request & CPU_INTERRUPT_HARD)
529 && (env->ie & IE_IE)) {
530 cpu->exception_index = EXCP_IRQ;
531 cc->do_interrupt(cpu);
532 next_tb = 0;
534 #elif defined(TARGET_MICROBLAZE)
535 if ((interrupt_request & CPU_INTERRUPT_HARD)
536 && (env->sregs[SR_MSR] & MSR_IE)
537 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
538 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
539 cpu->exception_index = EXCP_IRQ;
540 cc->do_interrupt(cpu);
541 next_tb = 0;
543 #elif defined(TARGET_MIPS)
544 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
545 cpu_mips_hw_interrupts_pending(env)) {
546 /* Raise it */
547 cpu->exception_index = EXCP_EXT_INTERRUPT;
548 env->error_code = 0;
549 cc->do_interrupt(cpu);
550 next_tb = 0;
552 #elif defined(TARGET_OPENRISC)
554 int idx = -1;
555 if ((interrupt_request & CPU_INTERRUPT_HARD)
556 && (env->sr & SR_IEE)) {
557 idx = EXCP_INT;
559 if ((interrupt_request & CPU_INTERRUPT_TIMER)
560 && (env->sr & SR_TEE)) {
561 idx = EXCP_TICK;
563 if (idx >= 0) {
564 cpu->exception_index = idx;
565 cc->do_interrupt(cpu);
566 next_tb = 0;
569 #elif defined(TARGET_SPARC)
570 if (interrupt_request & CPU_INTERRUPT_HARD) {
571 if (cpu_interrupts_enabled(env) &&
572 env->interrupt_index > 0) {
573 int pil = env->interrupt_index & 0xf;
574 int type = env->interrupt_index & 0xf0;
576 if (((type == TT_EXTINT) &&
577 cpu_pil_allowed(env, pil)) ||
578 type != TT_EXTINT) {
579 cpu->exception_index = env->interrupt_index;
580 cc->do_interrupt(cpu);
581 next_tb = 0;
585 #elif defined(TARGET_ARM)
586 if (interrupt_request & CPU_INTERRUPT_FIQ
587 && !(env->daif & PSTATE_F)) {
588 cpu->exception_index = EXCP_FIQ;
589 cc->do_interrupt(cpu);
590 next_tb = 0;
592 /* ARMv7-M interrupt return works by loading a magic value
593 into the PC. On real hardware the load causes the
594 return to occur. The qemu implementation performs the
595 jump normally, then does the exception return when the
596 CPU tries to execute code at the magic address.
597 This will cause the magic PC value to be pushed to
598 the stack if an interrupt occurred at the wrong time.
599 We avoid this by disabling interrupts when
600 pc contains a magic address. */
601 if (interrupt_request & CPU_INTERRUPT_HARD
602 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
603 || !(env->daif & PSTATE_I))) {
604 cpu->exception_index = EXCP_IRQ;
605 cc->do_interrupt(cpu);
606 next_tb = 0;
608 #elif defined(TARGET_UNICORE32)
609 if (interrupt_request & CPU_INTERRUPT_HARD
610 && !(env->uncached_asr & ASR_I)) {
611 cpu->exception_index = UC32_EXCP_INTR;
612 cc->do_interrupt(cpu);
613 next_tb = 0;
615 #elif defined(TARGET_SH4)
616 if (interrupt_request & CPU_INTERRUPT_HARD) {
617 cc->do_interrupt(cpu);
618 next_tb = 0;
620 #elif defined(TARGET_ALPHA)
622 int idx = -1;
623 /* ??? This hard-codes the OSF/1 interrupt levels. */
624 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
625 case 0 ... 3:
626 if (interrupt_request & CPU_INTERRUPT_HARD) {
627 idx = EXCP_DEV_INTERRUPT;
629 /* FALLTHRU */
630 case 4:
631 if (interrupt_request & CPU_INTERRUPT_TIMER) {
632 idx = EXCP_CLK_INTERRUPT;
634 /* FALLTHRU */
635 case 5:
636 if (interrupt_request & CPU_INTERRUPT_SMP) {
637 idx = EXCP_SMP_INTERRUPT;
639 /* FALLTHRU */
640 case 6:
641 if (interrupt_request & CPU_INTERRUPT_MCHK) {
642 idx = EXCP_MCHK;
645 if (idx >= 0) {
646 cpu->exception_index = idx;
647 env->error_code = 0;
648 cc->do_interrupt(cpu);
649 next_tb = 0;
652 #elif defined(TARGET_CRIS)
653 if (interrupt_request & CPU_INTERRUPT_HARD
654 && (env->pregs[PR_CCS] & I_FLAG)
655 && !env->locked_irq) {
656 cpu->exception_index = EXCP_IRQ;
657 cc->do_interrupt(cpu);
658 next_tb = 0;
660 if (interrupt_request & CPU_INTERRUPT_NMI) {
661 unsigned int m_flag_archval;
662 if (env->pregs[PR_VR] < 32) {
663 m_flag_archval = M_FLAG_V10;
664 } else {
665 m_flag_archval = M_FLAG_V32;
667 if ((env->pregs[PR_CCS] & m_flag_archval)) {
668 cpu->exception_index = EXCP_NMI;
669 cc->do_interrupt(cpu);
670 next_tb = 0;
673 #elif defined(TARGET_M68K)
674 if (interrupt_request & CPU_INTERRUPT_HARD
675 && ((env->sr & SR_I) >> SR_I_SHIFT)
676 < env->pending_level) {
677 /* Real hardware gets the interrupt vector via an
678 IACK cycle at this point. Current emulated
679 hardware doesn't rely on this, so we
680 provide/save the vector when the interrupt is
681 first signalled. */
682 cpu->exception_index = env->pending_vector;
683 do_interrupt_m68k_hardirq(env);
684 next_tb = 0;
686 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
687 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
688 (env->psw.mask & PSW_MASK_EXT)) {
689 cc->do_interrupt(cpu);
690 next_tb = 0;
692 #elif defined(TARGET_XTENSA)
693 if (interrupt_request & CPU_INTERRUPT_HARD) {
694 cpu->exception_index = EXC_IRQ;
695 cc->do_interrupt(cpu);
696 next_tb = 0;
698 #endif
699 /* Don't use the cached interrupt_request value,
700 do_interrupt may have updated the EXITTB flag. */
701 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
702 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
703 /* ensure that no TB jump will be modified as
704 the program flow was changed */
705 next_tb = 0;
708 if (unlikely(cpu->exit_request)) {
709 cpu->exit_request = 0;
710 cpu->exception_index = EXCP_INTERRUPT;
711 cpu_loop_exit(cpu);
713 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
714 have_tb_lock = true;
715 tb = tb_find_fast(env);
716 /* Note: we do it here to avoid a gcc bug on Mac OS X when
717 doing it in tb_find_slow */
718 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
719 /* as some TB could have been invalidated because
720 of memory exceptions while generating the code, we
721 must recompute the hash index here */
722 next_tb = 0;
723 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
725 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
726 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
727 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
729 /* see if we can patch the calling TB. When the TB
730 spans two pages, we cannot safely do a direct
731 jump. */
732 if (next_tb != 0 && tb->page_addr[1] == -1) {
733 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
734 next_tb & TB_EXIT_MASK, tb);
736 have_tb_lock = false;
737 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
739 /* cpu_interrupt might be called while translating the
740 TB, but before it is linked into a potentially
741 infinite loop and becomes env->current_tb. Avoid
742 starting execution if there is a pending interrupt. */
743 cpu->current_tb = tb;
744 barrier();
745 if (likely(!cpu->exit_request)) {
746 tc_ptr = tb->tc_ptr;
747 /* execute the generated code */
748 next_tb = cpu_tb_exec(cpu, tc_ptr);
749 switch (next_tb & TB_EXIT_MASK) {
750 case TB_EXIT_REQUESTED:
751 /* Something asked us to stop executing
752 * chained TBs; just continue round the main
753 * loop. Whatever requested the exit will also
754 * have set something else (eg exit_request or
755 * interrupt_request) which we will handle
756 * next time around the loop.
758 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
759 next_tb = 0;
760 break;
761 case TB_EXIT_ICOUNT_EXPIRED:
763 /* Instruction counter expired. */
764 int insns_left;
765 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
766 insns_left = cpu->icount_decr.u32;
767 if (cpu->icount_extra && insns_left >= 0) {
768 /* Refill decrementer and continue execution. */
769 cpu->icount_extra += insns_left;
770 if (cpu->icount_extra > 0xffff) {
771 insns_left = 0xffff;
772 } else {
773 insns_left = cpu->icount_extra;
775 cpu->icount_extra -= insns_left;
776 cpu->icount_decr.u16.low = insns_left;
777 } else {
778 if (insns_left > 0) {
779 /* Execute remaining instructions. */
780 cpu_exec_nocache(env, insns_left, tb);
781 align_clocks(&sc, cpu);
783 cpu->exception_index = EXCP_INTERRUPT;
784 next_tb = 0;
785 cpu_loop_exit(cpu);
787 break;
789 default:
790 break;
793 cpu->current_tb = NULL;
794 /* Try to align the host and virtual clocks
795 if the guest is in advance */
796 align_clocks(&sc, cpu);
797 /* reset soft MMU for next block (it can currently
798 only be set by a memory fault) */
799 } /* for(;;) */
800 } else {
801 /* Reload env after longjmp - the compiler may have smashed all
802 * local variables as longjmp is marked 'noreturn'. */
803 cpu = current_cpu;
804 env = cpu->env_ptr;
805 #if !(defined(CONFIG_USER_ONLY) && \
806 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
807 cc = CPU_GET_CLASS(cpu);
808 #endif
809 #ifdef TARGET_I386
810 x86_cpu = X86_CPU(cpu);
811 #endif
812 if (have_tb_lock) {
813 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
814 have_tb_lock = false;
817 } /* for(;;) */
820 #if defined(TARGET_I386)
821 /* restore flags in standard format */
822 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
823 | (env->df & DF_MASK);
824 #elif defined(TARGET_ARM)
825 /* XXX: Save/restore host fpu exception state?. */
826 #elif defined(TARGET_UNICORE32)
827 #elif defined(TARGET_SPARC)
828 #elif defined(TARGET_PPC)
829 #elif defined(TARGET_LM32)
830 #elif defined(TARGET_M68K)
831 cpu_m68k_flush_flags(env, env->cc_op);
832 env->cc_op = CC_OP_FLAGS;
833 env->sr = (env->sr & 0xffe0)
834 | env->cc_dest | (env->cc_x << 4);
835 #elif defined(TARGET_MICROBLAZE)
836 #elif defined(TARGET_MIPS)
837 #elif defined(TARGET_MOXIE)
838 #elif defined(TARGET_OPENRISC)
839 #elif defined(TARGET_SH4)
840 #elif defined(TARGET_ALPHA)
841 #elif defined(TARGET_CRIS)
842 #elif defined(TARGET_S390X)
843 #elif defined(TARGET_XTENSA)
844 /* XXXXX */
845 #else
846 #error unsupported target CPU
847 #endif
849 /* fail safe : never use current_cpu outside cpu_exec() */
850 current_cpu = NULL;
851 return ret;