trace: teach lttng backend to use format strings
[qemu/cris-port.git] / cpu-exec.c
blobcbc8067b37a122052821965b4093525691cfcf3c
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "qemu/timer.h"
27 /* -icount align implementation. */
29 typedef struct SyncClocks {
30 int64_t diff_clk;
31 int64_t last_cpu_icount;
32 int64_t realtime_clock;
33 } SyncClocks;
35 #if !defined(CONFIG_USER_ONLY)
36 /* Allow the guest to have a max 3ms advance.
37 * The difference between the 2 clocks could therefore
38 * oscillate around 0.
40 #define VM_CLOCK_ADVANCE 3000000
41 #define THRESHOLD_REDUCE 1.5
42 #define MAX_DELAY_PRINT_RATE 2000000000LL
43 #define MAX_NB_PRINTS 100
45 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47 int64_t cpu_icount;
49 if (!icount_align_option) {
50 return;
53 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
54 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
55 sc->last_cpu_icount = cpu_icount;
57 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
58 #ifndef _WIN32
59 struct timespec sleep_delay, rem_delay;
60 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
61 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
62 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
63 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
64 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
65 } else {
66 sc->diff_clk = 0;
68 #else
69 Sleep(sc->diff_clk / SCALE_MS);
70 sc->diff_clk = 0;
71 #endif
75 static void print_delay(const SyncClocks *sc)
77 static float threshold_delay;
78 static int64_t last_realtime_clock;
79 static int nb_prints;
81 if (icount_align_option &&
82 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
83 nb_prints < MAX_NB_PRINTS) {
84 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
85 (-sc->diff_clk / (float)1000000000LL <
86 (threshold_delay - THRESHOLD_REDUCE))) {
87 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
88 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
89 threshold_delay - 1,
90 threshold_delay);
91 nb_prints++;
92 last_realtime_clock = sc->realtime_clock;
97 static void init_delay_params(SyncClocks *sc,
98 const CPUState *cpu)
100 if (!icount_align_option) {
101 return;
103 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
104 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
105 sc->realtime_clock +
106 cpu_get_clock_offset();
107 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
108 if (sc->diff_clk < max_delay) {
109 max_delay = sc->diff_clk;
111 if (sc->diff_clk > max_advance) {
112 max_advance = sc->diff_clk;
115 /* Print every 2s max if the guest is late. We limit the number
116 of printed messages to NB_PRINT_MAX(currently 100) */
117 print_delay(sc);
119 #else
120 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
124 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
127 #endif /* CONFIG USER ONLY */
129 void cpu_loop_exit(CPUState *cpu)
131 cpu->current_tb = NULL;
132 siglongjmp(cpu->jmp_env, 1);
135 /* exit the current TB from a signal handler. The host registers are
136 restored in a state compatible with the CPU emulator
138 #if defined(CONFIG_SOFTMMU)
139 void cpu_resume_from_signal(CPUState *cpu, void *puc)
141 /* XXX: restore cpu registers saved in host registers */
143 cpu->exception_index = -1;
144 siglongjmp(cpu->jmp_env, 1);
146 #endif
148 /* Execute a TB, and fix up the CPU state afterwards if necessary */
149 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151 CPUArchState *env = cpu->env_ptr;
152 uintptr_t next_tb;
154 #if defined(DEBUG_DISAS)
155 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
156 #if defined(TARGET_I386)
157 log_cpu_state(cpu, CPU_DUMP_CCOP);
158 #elif defined(TARGET_M68K)
159 /* ??? Should not modify env state for dumping. */
160 cpu_m68k_flush_flags(env, env->cc_op);
161 env->cc_op = CC_OP_FLAGS;
162 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
163 log_cpu_state(cpu, 0);
164 #else
165 log_cpu_state(cpu, 0);
166 #endif
168 #endif /* DEBUG_DISAS */
170 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
171 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
172 /* We didn't start executing this TB (eg because the instruction
173 * counter hit zero); we must restore the guest PC to the address
174 * of the start of the TB.
176 CPUClass *cc = CPU_GET_CLASS(cpu);
177 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
178 if (cc->synchronize_from_tb) {
179 cc->synchronize_from_tb(cpu, tb);
180 } else {
181 assert(cc->set_pc);
182 cc->set_pc(cpu, tb->pc);
185 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
186 /* We were asked to stop executing TBs (probably a pending
187 * interrupt. We've now stopped, so clear the flag.
189 cpu->tcg_exit_req = 0;
191 return next_tb;
194 /* Execute the code without caching the generated code. An interpreter
195 could be used if available. */
196 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
197 TranslationBlock *orig_tb)
199 CPUState *cpu = ENV_GET_CPU(env);
200 TranslationBlock *tb;
202 /* Should never happen.
203 We only end up here when an existing TB is too long. */
204 if (max_cycles > CF_COUNT_MASK)
205 max_cycles = CF_COUNT_MASK;
207 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
208 max_cycles);
209 cpu->current_tb = tb;
210 /* execute the generated code */
211 cpu_tb_exec(cpu, tb->tc_ptr);
212 cpu->current_tb = NULL;
213 tb_phys_invalidate(tb, -1);
214 tb_free(tb);
217 static TranslationBlock *tb_find_slow(CPUArchState *env,
218 target_ulong pc,
219 target_ulong cs_base,
220 uint64_t flags)
222 CPUState *cpu = ENV_GET_CPU(env);
223 TranslationBlock *tb, **ptb1;
224 unsigned int h;
225 tb_page_addr_t phys_pc, phys_page1;
226 target_ulong virt_page2;
228 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
230 /* find translated block using physical mappings */
231 phys_pc = get_page_addr_code(env, pc);
232 phys_page1 = phys_pc & TARGET_PAGE_MASK;
233 h = tb_phys_hash_func(phys_pc);
234 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
235 for(;;) {
236 tb = *ptb1;
237 if (!tb)
238 goto not_found;
239 if (tb->pc == pc &&
240 tb->page_addr[0] == phys_page1 &&
241 tb->cs_base == cs_base &&
242 tb->flags == flags) {
243 /* check next page if needed */
244 if (tb->page_addr[1] != -1) {
245 tb_page_addr_t phys_page2;
247 virt_page2 = (pc & TARGET_PAGE_MASK) +
248 TARGET_PAGE_SIZE;
249 phys_page2 = get_page_addr_code(env, virt_page2);
250 if (tb->page_addr[1] == phys_page2)
251 goto found;
252 } else {
253 goto found;
256 ptb1 = &tb->phys_hash_next;
258 not_found:
259 /* if no translated code available, then translate it now */
260 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
262 found:
263 /* Move the last found TB to the head of the list */
264 if (likely(*ptb1)) {
265 *ptb1 = tb->phys_hash_next;
266 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
267 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
269 /* we add the TB in the virtual pc hash table */
270 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
271 return tb;
274 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
276 CPUState *cpu = ENV_GET_CPU(env);
277 TranslationBlock *tb;
278 target_ulong cs_base, pc;
279 int flags;
281 /* we record a subset of the CPU state. It will
282 always be the same before a given translated block
283 is executed. */
284 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
285 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
286 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
287 tb->flags != flags)) {
288 tb = tb_find_slow(env, pc, cs_base, flags);
290 return tb;
293 static CPUDebugExcpHandler *debug_excp_handler;
295 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
297 debug_excp_handler = handler;
300 static void cpu_handle_debug_exception(CPUArchState *env)
302 CPUState *cpu = ENV_GET_CPU(env);
303 CPUWatchpoint *wp;
305 if (!cpu->watchpoint_hit) {
306 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
307 wp->flags &= ~BP_WATCHPOINT_HIT;
310 if (debug_excp_handler) {
311 debug_excp_handler(env);
315 /* main execution loop */
317 volatile sig_atomic_t exit_request;
319 int cpu_exec(CPUArchState *env)
321 CPUState *cpu = ENV_GET_CPU(env);
322 #if !(defined(CONFIG_USER_ONLY) && \
323 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
324 CPUClass *cc = CPU_GET_CLASS(cpu);
325 #endif
326 #ifdef TARGET_I386
327 X86CPU *x86_cpu = X86_CPU(cpu);
328 #endif
329 int ret, interrupt_request;
330 TranslationBlock *tb;
331 uint8_t *tc_ptr;
332 uintptr_t next_tb;
333 SyncClocks sc;
335 /* This must be volatile so it is not trashed by longjmp() */
336 volatile bool have_tb_lock = false;
338 if (cpu->halted) {
339 if (!cpu_has_work(cpu)) {
340 return EXCP_HALTED;
343 cpu->halted = 0;
346 current_cpu = cpu;
348 /* As long as current_cpu is null, up to the assignment just above,
349 * requests by other threads to exit the execution loop are expected to
350 * be issued using the exit_request global. We must make sure that our
351 * evaluation of the global value is performed past the current_cpu
352 * value transition point, which requires a memory barrier as well as
353 * an instruction scheduling constraint on modern architectures. */
354 smp_mb();
356 if (unlikely(exit_request)) {
357 cpu->exit_request = 1;
360 #if defined(TARGET_I386)
361 /* put eflags in CPU temporary format */
362 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
363 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
364 CC_OP = CC_OP_EFLAGS;
365 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
366 #elif defined(TARGET_SPARC)
367 #elif defined(TARGET_M68K)
368 env->cc_op = CC_OP_FLAGS;
369 env->cc_dest = env->sr & 0xf;
370 env->cc_x = (env->sr >> 4) & 1;
371 #elif defined(TARGET_ALPHA)
372 #elif defined(TARGET_ARM)
373 #elif defined(TARGET_UNICORE32)
374 #elif defined(TARGET_PPC)
375 env->reserve_addr = -1;
376 #elif defined(TARGET_LM32)
377 #elif defined(TARGET_MICROBLAZE)
378 #elif defined(TARGET_MIPS)
379 #elif defined(TARGET_MOXIE)
380 #elif defined(TARGET_OPENRISC)
381 #elif defined(TARGET_SH4)
382 #elif defined(TARGET_CRIS)
383 #elif defined(TARGET_S390X)
384 #elif defined(TARGET_XTENSA)
385 /* XXXXX */
386 #else
387 #error unsupported target CPU
388 #endif
389 cpu->exception_index = -1;
391 /* Calculate difference between guest clock and host clock.
392 * This delay includes the delay of the last cycle, so
393 * what we have to do is sleep until it is 0. As for the
394 * advance/delay we gain here, we try to fix it next time.
396 init_delay_params(&sc, cpu);
398 /* prepare setjmp context for exception handling */
399 for(;;) {
400 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
401 /* if an exception is pending, we execute it here */
402 if (cpu->exception_index >= 0) {
403 if (cpu->exception_index >= EXCP_INTERRUPT) {
404 /* exit request from the cpu execution loop */
405 ret = cpu->exception_index;
406 if (ret == EXCP_DEBUG) {
407 cpu_handle_debug_exception(env);
409 break;
410 } else {
411 #if defined(CONFIG_USER_ONLY)
412 /* if user mode only, we simulate a fake exception
413 which will be handled outside the cpu execution
414 loop */
415 #if defined(TARGET_I386)
416 cc->do_interrupt(cpu);
417 #endif
418 ret = cpu->exception_index;
419 break;
420 #else
421 cc->do_interrupt(cpu);
422 cpu->exception_index = -1;
423 #endif
427 next_tb = 0; /* force lookup of first TB */
428 for(;;) {
429 interrupt_request = cpu->interrupt_request;
430 if (unlikely(interrupt_request)) {
431 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
432 /* Mask out external interrupts for this step. */
433 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
435 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
436 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
437 cpu->exception_index = EXCP_DEBUG;
438 cpu_loop_exit(cpu);
440 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
441 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
442 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
443 if (interrupt_request & CPU_INTERRUPT_HALT) {
444 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
445 cpu->halted = 1;
446 cpu->exception_index = EXCP_HLT;
447 cpu_loop_exit(cpu);
449 #endif
450 #if defined(TARGET_I386)
451 if (interrupt_request & CPU_INTERRUPT_INIT) {
452 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
453 do_cpu_init(x86_cpu);
454 cpu->exception_index = EXCP_HALTED;
455 cpu_loop_exit(cpu);
457 #else
458 if (interrupt_request & CPU_INTERRUPT_RESET) {
459 cpu_reset(cpu);
461 #endif
462 #if defined(TARGET_I386)
463 #if !defined(CONFIG_USER_ONLY)
464 if (interrupt_request & CPU_INTERRUPT_POLL) {
465 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
466 apic_poll_irq(x86_cpu->apic_state);
468 #endif
469 if (interrupt_request & CPU_INTERRUPT_SIPI) {
470 do_cpu_sipi(x86_cpu);
471 } else if (env->hflags2 & HF2_GIF_MASK) {
472 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
473 !(env->hflags & HF_SMM_MASK)) {
474 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
476 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
477 do_smm_enter(x86_cpu);
478 next_tb = 0;
479 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
480 !(env->hflags2 & HF2_NMI_MASK)) {
481 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
482 env->hflags2 |= HF2_NMI_MASK;
483 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
484 next_tb = 0;
485 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
486 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
487 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
488 next_tb = 0;
489 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
490 (((env->hflags2 & HF2_VINTR_MASK) &&
491 (env->hflags2 & HF2_HIF_MASK)) ||
492 (!(env->hflags2 & HF2_VINTR_MASK) &&
493 (env->eflags & IF_MASK &&
494 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
495 int intno;
496 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
498 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
499 CPU_INTERRUPT_VIRQ);
500 intno = cpu_get_pic_interrupt(env);
501 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
502 do_interrupt_x86_hardirq(env, intno, 1);
503 /* ensure that no TB jump will be modified as
504 the program flow was changed */
505 next_tb = 0;
506 #if !defined(CONFIG_USER_ONLY)
507 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
508 (env->eflags & IF_MASK) &&
509 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
510 int intno;
511 /* FIXME: this should respect TPR */
512 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
514 intno = ldl_phys(cpu->as,
515 env->vm_vmcb
516 + offsetof(struct vmcb,
517 control.int_vector));
518 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
519 do_interrupt_x86_hardirq(env, intno, 1);
520 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
521 next_tb = 0;
522 #endif
525 #elif defined(TARGET_PPC)
526 if (interrupt_request & CPU_INTERRUPT_HARD) {
527 ppc_hw_interrupt(env);
528 if (env->pending_interrupts == 0) {
529 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
531 next_tb = 0;
533 #elif defined(TARGET_LM32)
534 if ((interrupt_request & CPU_INTERRUPT_HARD)
535 && (env->ie & IE_IE)) {
536 cpu->exception_index = EXCP_IRQ;
537 cc->do_interrupt(cpu);
538 next_tb = 0;
540 #elif defined(TARGET_MICROBLAZE)
541 if ((interrupt_request & CPU_INTERRUPT_HARD)
542 && (env->sregs[SR_MSR] & MSR_IE)
543 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
544 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
545 cpu->exception_index = EXCP_IRQ;
546 cc->do_interrupt(cpu);
547 next_tb = 0;
549 #elif defined(TARGET_MIPS)
550 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
551 cpu_mips_hw_interrupts_pending(env)) {
552 /* Raise it */
553 cpu->exception_index = EXCP_EXT_INTERRUPT;
554 env->error_code = 0;
555 cc->do_interrupt(cpu);
556 next_tb = 0;
558 #elif defined(TARGET_OPENRISC)
560 int idx = -1;
561 if ((interrupt_request & CPU_INTERRUPT_HARD)
562 && (env->sr & SR_IEE)) {
563 idx = EXCP_INT;
565 if ((interrupt_request & CPU_INTERRUPT_TIMER)
566 && (env->sr & SR_TEE)) {
567 idx = EXCP_TICK;
569 if (idx >= 0) {
570 cpu->exception_index = idx;
571 cc->do_interrupt(cpu);
572 next_tb = 0;
575 #elif defined(TARGET_SPARC)
576 if (interrupt_request & CPU_INTERRUPT_HARD) {
577 if (cpu_interrupts_enabled(env) &&
578 env->interrupt_index > 0) {
579 int pil = env->interrupt_index & 0xf;
580 int type = env->interrupt_index & 0xf0;
582 if (((type == TT_EXTINT) &&
583 cpu_pil_allowed(env, pil)) ||
584 type != TT_EXTINT) {
585 cpu->exception_index = env->interrupt_index;
586 cc->do_interrupt(cpu);
587 next_tb = 0;
591 #elif defined(TARGET_ARM)
592 if (interrupt_request & CPU_INTERRUPT_FIQ
593 && !(env->daif & PSTATE_F)) {
594 cpu->exception_index = EXCP_FIQ;
595 cc->do_interrupt(cpu);
596 next_tb = 0;
598 /* ARMv7-M interrupt return works by loading a magic value
599 into the PC. On real hardware the load causes the
600 return to occur. The qemu implementation performs the
601 jump normally, then does the exception return when the
602 CPU tries to execute code at the magic address.
603 This will cause the magic PC value to be pushed to
604 the stack if an interrupt occurred at the wrong time.
605 We avoid this by disabling interrupts when
606 pc contains a magic address. */
607 if (interrupt_request & CPU_INTERRUPT_HARD
608 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
609 || !(env->daif & PSTATE_I))) {
610 cpu->exception_index = EXCP_IRQ;
611 cc->do_interrupt(cpu);
612 next_tb = 0;
614 #elif defined(TARGET_UNICORE32)
615 if (interrupt_request & CPU_INTERRUPT_HARD
616 && !(env->uncached_asr & ASR_I)) {
617 cpu->exception_index = UC32_EXCP_INTR;
618 cc->do_interrupt(cpu);
619 next_tb = 0;
621 #elif defined(TARGET_SH4)
622 if (interrupt_request & CPU_INTERRUPT_HARD) {
623 cc->do_interrupt(cpu);
624 next_tb = 0;
626 #elif defined(TARGET_ALPHA)
628 int idx = -1;
629 /* ??? This hard-codes the OSF/1 interrupt levels. */
630 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
631 case 0 ... 3:
632 if (interrupt_request & CPU_INTERRUPT_HARD) {
633 idx = EXCP_DEV_INTERRUPT;
635 /* FALLTHRU */
636 case 4:
637 if (interrupt_request & CPU_INTERRUPT_TIMER) {
638 idx = EXCP_CLK_INTERRUPT;
640 /* FALLTHRU */
641 case 5:
642 if (interrupt_request & CPU_INTERRUPT_SMP) {
643 idx = EXCP_SMP_INTERRUPT;
645 /* FALLTHRU */
646 case 6:
647 if (interrupt_request & CPU_INTERRUPT_MCHK) {
648 idx = EXCP_MCHK;
651 if (idx >= 0) {
652 cpu->exception_index = idx;
653 env->error_code = 0;
654 cc->do_interrupt(cpu);
655 next_tb = 0;
658 #elif defined(TARGET_CRIS)
659 if (interrupt_request & CPU_INTERRUPT_HARD
660 && (env->pregs[PR_CCS] & I_FLAG)
661 && !env->locked_irq) {
662 cpu->exception_index = EXCP_IRQ;
663 cc->do_interrupt(cpu);
664 next_tb = 0;
666 if (interrupt_request & CPU_INTERRUPT_NMI) {
667 unsigned int m_flag_archval;
668 if (env->pregs[PR_VR] < 32) {
669 m_flag_archval = M_FLAG_V10;
670 } else {
671 m_flag_archval = M_FLAG_V32;
673 if ((env->pregs[PR_CCS] & m_flag_archval)) {
674 cpu->exception_index = EXCP_NMI;
675 cc->do_interrupt(cpu);
676 next_tb = 0;
679 #elif defined(TARGET_M68K)
680 if (interrupt_request & CPU_INTERRUPT_HARD
681 && ((env->sr & SR_I) >> SR_I_SHIFT)
682 < env->pending_level) {
683 /* Real hardware gets the interrupt vector via an
684 IACK cycle at this point. Current emulated
685 hardware doesn't rely on this, so we
686 provide/save the vector when the interrupt is
687 first signalled. */
688 cpu->exception_index = env->pending_vector;
689 do_interrupt_m68k_hardirq(env);
690 next_tb = 0;
692 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
693 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
694 (env->psw.mask & PSW_MASK_EXT)) {
695 cc->do_interrupt(cpu);
696 next_tb = 0;
698 #elif defined(TARGET_XTENSA)
699 if (interrupt_request & CPU_INTERRUPT_HARD) {
700 cpu->exception_index = EXC_IRQ;
701 cc->do_interrupt(cpu);
702 next_tb = 0;
704 #endif
705 /* Don't use the cached interrupt_request value,
706 do_interrupt may have updated the EXITTB flag. */
707 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
708 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
709 /* ensure that no TB jump will be modified as
710 the program flow was changed */
711 next_tb = 0;
714 if (unlikely(cpu->exit_request)) {
715 cpu->exit_request = 0;
716 cpu->exception_index = EXCP_INTERRUPT;
717 cpu_loop_exit(cpu);
719 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
720 have_tb_lock = true;
721 tb = tb_find_fast(env);
722 /* Note: we do it here to avoid a gcc bug on Mac OS X when
723 doing it in tb_find_slow */
724 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
725 /* as some TB could have been invalidated because
726 of memory exceptions while generating the code, we
727 must recompute the hash index here */
728 next_tb = 0;
729 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
731 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
732 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
733 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
735 /* see if we can patch the calling TB. When the TB
736 spans two pages, we cannot safely do a direct
737 jump. */
738 if (next_tb != 0 && tb->page_addr[1] == -1) {
739 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
740 next_tb & TB_EXIT_MASK, tb);
742 have_tb_lock = false;
743 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
745 /* cpu_interrupt might be called while translating the
746 TB, but before it is linked into a potentially
747 infinite loop and becomes env->current_tb. Avoid
748 starting execution if there is a pending interrupt. */
749 cpu->current_tb = tb;
750 barrier();
751 if (likely(!cpu->exit_request)) {
752 tc_ptr = tb->tc_ptr;
753 /* execute the generated code */
754 next_tb = cpu_tb_exec(cpu, tc_ptr);
755 switch (next_tb & TB_EXIT_MASK) {
756 case TB_EXIT_REQUESTED:
757 /* Something asked us to stop executing
758 * chained TBs; just continue round the main
759 * loop. Whatever requested the exit will also
760 * have set something else (eg exit_request or
761 * interrupt_request) which we will handle
762 * next time around the loop.
764 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
765 next_tb = 0;
766 break;
767 case TB_EXIT_ICOUNT_EXPIRED:
769 /* Instruction counter expired. */
770 int insns_left;
771 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
772 insns_left = cpu->icount_decr.u32;
773 if (cpu->icount_extra && insns_left >= 0) {
774 /* Refill decrementer and continue execution. */
775 cpu->icount_extra += insns_left;
776 if (cpu->icount_extra > 0xffff) {
777 insns_left = 0xffff;
778 } else {
779 insns_left = cpu->icount_extra;
781 cpu->icount_extra -= insns_left;
782 cpu->icount_decr.u16.low = insns_left;
783 } else {
784 if (insns_left > 0) {
785 /* Execute remaining instructions. */
786 cpu_exec_nocache(env, insns_left, tb);
787 align_clocks(&sc, cpu);
789 cpu->exception_index = EXCP_INTERRUPT;
790 next_tb = 0;
791 cpu_loop_exit(cpu);
793 break;
795 default:
796 break;
799 cpu->current_tb = NULL;
800 /* Try to align the host and virtual clocks
801 if the guest is in advance */
802 align_clocks(&sc, cpu);
803 /* reset soft MMU for next block (it can currently
804 only be set by a memory fault) */
805 } /* for(;;) */
806 } else {
807 /* Reload env after longjmp - the compiler may have smashed all
808 * local variables as longjmp is marked 'noreturn'. */
809 cpu = current_cpu;
810 env = cpu->env_ptr;
811 #if !(defined(CONFIG_USER_ONLY) && \
812 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
813 cc = CPU_GET_CLASS(cpu);
814 #endif
815 #ifdef TARGET_I386
816 x86_cpu = X86_CPU(cpu);
817 #endif
818 if (have_tb_lock) {
819 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
820 have_tb_lock = false;
823 } /* for(;;) */
826 #if defined(TARGET_I386)
827 /* restore flags in standard format */
828 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
829 | (env->df & DF_MASK);
830 #elif defined(TARGET_ARM)
831 /* XXX: Save/restore host fpu exception state?. */
832 #elif defined(TARGET_UNICORE32)
833 #elif defined(TARGET_SPARC)
834 #elif defined(TARGET_PPC)
835 #elif defined(TARGET_LM32)
836 #elif defined(TARGET_M68K)
837 cpu_m68k_flush_flags(env, env->cc_op);
838 env->cc_op = CC_OP_FLAGS;
839 env->sr = (env->sr & 0xffe0)
840 | env->cc_dest | (env->cc_x << 4);
841 #elif defined(TARGET_MICROBLAZE)
842 #elif defined(TARGET_MIPS)
843 #elif defined(TARGET_MOXIE)
844 #elif defined(TARGET_OPENRISC)
845 #elif defined(TARGET_SH4)
846 #elif defined(TARGET_ALPHA)
847 #elif defined(TARGET_CRIS)
848 #elif defined(TARGET_S390X)
849 #elif defined(TARGET_XTENSA)
850 /* XXXXX */
851 #else
852 #error unsupported target CPU
853 #endif
855 /* fail safe : never use current_cpu outside cpu_exec() */
856 current_cpu = NULL;
857 return ret;