2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 //#define CONFIG_DEBUG_EXEC
28 bool qemu_cpu_has_work(CPUState
*cpu
)
30 return cpu_has_work(cpu
);
33 void cpu_loop_exit(CPUArchState
*env
)
35 env
->current_tb
= NULL
;
36 longjmp(env
->jmp_env
, 1);
39 /* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState
*env
, void *puc
)
45 /* XXX: restore cpu registers saved in host registers */
47 env
->exception_index
= -1;
48 longjmp(env
->jmp_env
, 1);
52 /* Execute the code without caching the generated code. An interpreter
53 could be used if available. */
54 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
55 TranslationBlock
*orig_tb
)
57 tcg_target_ulong next_tb
;
60 /* Should never happen.
61 We only end up here when an existing TB is too long. */
62 if (max_cycles
> CF_COUNT_MASK
)
63 max_cycles
= CF_COUNT_MASK
;
65 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
68 /* execute the generated code */
69 next_tb
= tcg_qemu_tb_exec(env
, tb
->tc_ptr
);
70 env
->current_tb
= NULL
;
72 if ((next_tb
& 3) == 2) {
73 /* Restore PC. This may happen if async event occurs before
74 the TB starts executing. */
75 cpu_pc_from_tb(env
, tb
);
77 tb_phys_invalidate(tb
, -1);
81 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
86 TranslationBlock
*tb
, **ptb1
;
88 tb_page_addr_t phys_pc
, phys_page1
;
89 target_ulong virt_page2
;
91 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
93 /* find translated block using physical mappings */
94 phys_pc
= get_page_addr_code(env
, pc
);
95 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
96 h
= tb_phys_hash_func(phys_pc
);
97 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
103 tb
->page_addr
[0] == phys_page1
&&
104 tb
->cs_base
== cs_base
&&
105 tb
->flags
== flags
) {
106 /* check next page if needed */
107 if (tb
->page_addr
[1] != -1) {
108 tb_page_addr_t phys_page2
;
110 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
112 phys_page2
= get_page_addr_code(env
, virt_page2
);
113 if (tb
->page_addr
[1] == phys_page2
)
119 ptb1
= &tb
->phys_hash_next
;
122 /* if no translated code available, then translate it now */
123 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
126 /* Move the last found TB to the head of the list */
128 *ptb1
= tb
->phys_hash_next
;
129 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
130 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
132 /* we add the TB in the virtual pc hash table */
133 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
137 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
139 TranslationBlock
*tb
;
140 target_ulong cs_base
, pc
;
143 /* we record a subset of the CPU state. It will
144 always be the same before a given translated block
146 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
147 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
148 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
149 tb
->flags
!= flags
)) {
150 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
155 static CPUDebugExcpHandler
*debug_excp_handler
;
157 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
159 debug_excp_handler
= handler
;
162 static void cpu_handle_debug_exception(CPUArchState
*env
)
166 if (!env
->watchpoint_hit
) {
167 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
168 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
171 if (debug_excp_handler
) {
172 debug_excp_handler(env
);
176 /* main execution loop */
178 volatile sig_atomic_t exit_request
;
180 int cpu_exec(CPUArchState
*env
)
182 CPUState
*cpu
= ENV_GET_CPU(env
);
183 int ret
, interrupt_request
;
184 TranslationBlock
*tb
;
186 tcg_target_ulong next_tb
;
189 if (!cpu_has_work(cpu
)) {
196 cpu_single_env
= env
;
198 if (unlikely(exit_request
)) {
199 env
->exit_request
= 1;
202 #if defined(TARGET_I386)
203 /* put eflags in CPU temporary format */
204 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
205 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
206 CC_OP
= CC_OP_EFLAGS
;
207 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
208 #elif defined(TARGET_SPARC)
209 #elif defined(TARGET_M68K)
210 env
->cc_op
= CC_OP_FLAGS
;
211 env
->cc_dest
= env
->sr
& 0xf;
212 env
->cc_x
= (env
->sr
>> 4) & 1;
213 #elif defined(TARGET_ALPHA)
214 #elif defined(TARGET_ARM)
215 #elif defined(TARGET_UNICORE32)
216 #elif defined(TARGET_PPC)
217 env
->reserve_addr
= -1;
218 #elif defined(TARGET_LM32)
219 #elif defined(TARGET_MICROBLAZE)
220 #elif defined(TARGET_MIPS)
221 #elif defined(TARGET_OPENRISC)
222 #elif defined(TARGET_SH4)
223 #elif defined(TARGET_CRIS)
224 #elif defined(TARGET_S390X)
225 #elif defined(TARGET_XTENSA)
228 #error unsupported target CPU
230 env
->exception_index
= -1;
232 /* prepare setjmp context for exception handling */
234 if (setjmp(env
->jmp_env
) == 0) {
235 /* if an exception is pending, we execute it here */
236 if (env
->exception_index
>= 0) {
237 if (env
->exception_index
>= EXCP_INTERRUPT
) {
238 /* exit request from the cpu execution loop */
239 ret
= env
->exception_index
;
240 if (ret
== EXCP_DEBUG
) {
241 cpu_handle_debug_exception(env
);
245 #if defined(CONFIG_USER_ONLY)
246 /* if user mode only, we simulate a fake exception
247 which will be handled outside the cpu execution
249 #if defined(TARGET_I386)
252 ret
= env
->exception_index
;
256 env
->exception_index
= -1;
261 next_tb
= 0; /* force lookup of first TB */
263 interrupt_request
= env
->interrupt_request
;
264 if (unlikely(interrupt_request
)) {
265 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
266 /* Mask out external interrupts for this step. */
267 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
269 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
270 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
271 env
->exception_index
= EXCP_DEBUG
;
274 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
275 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
276 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
277 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
278 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
280 env
->exception_index
= EXCP_HLT
;
284 #if defined(TARGET_I386)
285 #if !defined(CONFIG_USER_ONLY)
286 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
287 env
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
288 apic_poll_irq(env
->apic_state
);
291 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
292 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
,
294 do_cpu_init(x86_env_get_cpu(env
));
295 env
->exception_index
= EXCP_HALTED
;
297 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
298 do_cpu_sipi(x86_env_get_cpu(env
));
299 } else if (env
->hflags2
& HF2_GIF_MASK
) {
300 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
301 !(env
->hflags
& HF_SMM_MASK
)) {
302 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
304 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
307 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
308 !(env
->hflags2
& HF2_NMI_MASK
)) {
309 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
310 env
->hflags2
|= HF2_NMI_MASK
;
311 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
313 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
314 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
315 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
317 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
318 (((env
->hflags2
& HF2_VINTR_MASK
) &&
319 (env
->hflags2
& HF2_HIF_MASK
)) ||
320 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
321 (env
->eflags
& IF_MASK
&&
322 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
324 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
326 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
327 intno
= cpu_get_pic_interrupt(env
);
328 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
329 do_interrupt_x86_hardirq(env
, intno
, 1);
330 /* ensure that no TB jump will be modified as
331 the program flow was changed */
333 #if !defined(CONFIG_USER_ONLY)
334 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
335 (env
->eflags
& IF_MASK
) &&
336 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
338 /* FIXME: this should respect TPR */
339 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
341 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
342 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
343 do_interrupt_x86_hardirq(env
, intno
, 1);
344 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
349 #elif defined(TARGET_PPC)
350 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
353 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
354 ppc_hw_interrupt(env
);
355 if (env
->pending_interrupts
== 0)
356 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
359 #elif defined(TARGET_LM32)
360 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
361 && (env
->ie
& IE_IE
)) {
362 env
->exception_index
= EXCP_IRQ
;
366 #elif defined(TARGET_MICROBLAZE)
367 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
368 && (env
->sregs
[SR_MSR
] & MSR_IE
)
369 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
370 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
371 env
->exception_index
= EXCP_IRQ
;
375 #elif defined(TARGET_MIPS)
376 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
377 cpu_mips_hw_interrupts_pending(env
)) {
379 env
->exception_index
= EXCP_EXT_INTERRUPT
;
384 #elif defined(TARGET_OPENRISC)
387 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
388 && (env
->sr
& SR_IEE
)) {
391 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
392 && (env
->sr
& SR_TEE
)) {
396 env
->exception_index
= idx
;
401 #elif defined(TARGET_SPARC)
402 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
403 if (cpu_interrupts_enabled(env
) &&
404 env
->interrupt_index
> 0) {
405 int pil
= env
->interrupt_index
& 0xf;
406 int type
= env
->interrupt_index
& 0xf0;
408 if (((type
== TT_EXTINT
) &&
409 cpu_pil_allowed(env
, pil
)) ||
411 env
->exception_index
= env
->interrupt_index
;
417 #elif defined(TARGET_ARM)
418 if (interrupt_request
& CPU_INTERRUPT_FIQ
419 && !(env
->uncached_cpsr
& CPSR_F
)) {
420 env
->exception_index
= EXCP_FIQ
;
424 /* ARMv7-M interrupt return works by loading a magic value
425 into the PC. On real hardware the load causes the
426 return to occur. The qemu implementation performs the
427 jump normally, then does the exception return when the
428 CPU tries to execute code at the magic address.
429 This will cause the magic PC value to be pushed to
430 the stack if an interrupt occurred at the wrong time.
431 We avoid this by disabling interrupts when
432 pc contains a magic address. */
433 if (interrupt_request
& CPU_INTERRUPT_HARD
434 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
435 || !(env
->uncached_cpsr
& CPSR_I
))) {
436 env
->exception_index
= EXCP_IRQ
;
440 #elif defined(TARGET_UNICORE32)
441 if (interrupt_request
& CPU_INTERRUPT_HARD
442 && !(env
->uncached_asr
& ASR_I
)) {
443 env
->exception_index
= UC32_EXCP_INTR
;
447 #elif defined(TARGET_SH4)
448 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
452 #elif defined(TARGET_ALPHA)
455 /* ??? This hard-codes the OSF/1 interrupt levels. */
456 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
458 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
459 idx
= EXCP_DEV_INTERRUPT
;
463 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
464 idx
= EXCP_CLK_INTERRUPT
;
468 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
469 idx
= EXCP_SMP_INTERRUPT
;
473 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
478 env
->exception_index
= idx
;
484 #elif defined(TARGET_CRIS)
485 if (interrupt_request
& CPU_INTERRUPT_HARD
486 && (env
->pregs
[PR_CCS
] & I_FLAG
)
487 && !env
->locked_irq
) {
488 env
->exception_index
= EXCP_IRQ
;
492 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
493 unsigned int m_flag_archval
;
494 if (env
->pregs
[PR_VR
] < 32) {
495 m_flag_archval
= M_FLAG_V10
;
497 m_flag_archval
= M_FLAG_V32
;
499 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
500 env
->exception_index
= EXCP_NMI
;
505 #elif defined(TARGET_M68K)
506 if (interrupt_request
& CPU_INTERRUPT_HARD
507 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
508 < env
->pending_level
) {
509 /* Real hardware gets the interrupt vector via an
510 IACK cycle at this point. Current emulated
511 hardware doesn't rely on this, so we
512 provide/save the vector when the interrupt is
514 env
->exception_index
= env
->pending_vector
;
515 do_interrupt_m68k_hardirq(env
);
518 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
519 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
520 (env
->psw
.mask
& PSW_MASK_EXT
)) {
524 #elif defined(TARGET_XTENSA)
525 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
526 env
->exception_index
= EXC_IRQ
;
531 /* Don't use the cached interrupt_request value,
532 do_interrupt may have updated the EXITTB flag. */
533 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
534 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
535 /* ensure that no TB jump will be modified as
536 the program flow was changed */
540 if (unlikely(env
->exit_request
)) {
541 env
->exit_request
= 0;
542 env
->exception_index
= EXCP_INTERRUPT
;
545 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
546 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
547 /* restore flags in standard format */
548 #if defined(TARGET_I386)
549 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
551 log_cpu_state(env
, CPU_DUMP_CCOP
);
552 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
553 #elif defined(TARGET_M68K)
554 cpu_m68k_flush_flags(env
, env
->cc_op
);
555 env
->cc_op
= CC_OP_FLAGS
;
556 env
->sr
= (env
->sr
& 0xffe0)
557 | env
->cc_dest
| (env
->cc_x
<< 4);
558 log_cpu_state(env
, 0);
560 log_cpu_state(env
, 0);
563 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
564 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
565 tb
= tb_find_fast(env
);
566 /* Note: we do it here to avoid a gcc bug on Mac OS X when
567 doing it in tb_find_slow */
568 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
569 /* as some TB could have been invalidated because
570 of memory exceptions while generating the code, we
571 must recompute the hash index here */
573 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
575 #ifdef CONFIG_DEBUG_EXEC
576 qemu_log_mask(CPU_LOG_EXEC
, "Trace %p [" TARGET_FMT_lx
"] %s\n",
578 lookup_symbol(tb
->pc
));
580 /* see if we can patch the calling TB. When the TB
581 spans two pages, we cannot safely do a direct
583 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
584 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
586 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
588 /* cpu_interrupt might be called while translating the
589 TB, but before it is linked into a potentially
590 infinite loop and becomes env->current_tb. Avoid
591 starting execution if there is a pending interrupt. */
592 env
->current_tb
= tb
;
594 if (likely(!env
->exit_request
)) {
596 /* execute the generated code */
597 next_tb
= tcg_qemu_tb_exec(env
, tc_ptr
);
598 if ((next_tb
& 3) == 2) {
599 /* Instruction counter expired. */
601 tb
= (TranslationBlock
*)(next_tb
& ~3);
603 cpu_pc_from_tb(env
, tb
);
604 insns_left
= env
->icount_decr
.u32
;
605 if (env
->icount_extra
&& insns_left
>= 0) {
606 /* Refill decrementer and continue execution. */
607 env
->icount_extra
+= insns_left
;
608 if (env
->icount_extra
> 0xffff) {
611 insns_left
= env
->icount_extra
;
613 env
->icount_extra
-= insns_left
;
614 env
->icount_decr
.u16
.low
= insns_left
;
616 if (insns_left
> 0) {
617 /* Execute remaining instructions. */
618 cpu_exec_nocache(env
, insns_left
, tb
);
620 env
->exception_index
= EXCP_INTERRUPT
;
626 env
->current_tb
= NULL
;
627 /* reset soft MMU for next block (it can currently
628 only be set by a memory fault) */
631 /* Reload env after longjmp - the compiler may have smashed all
632 * local variables as longjmp is marked 'noreturn'. */
633 env
= cpu_single_env
;
638 #if defined(TARGET_I386)
639 /* restore flags in standard format */
640 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
642 #elif defined(TARGET_ARM)
643 /* XXX: Save/restore host fpu exception state?. */
644 #elif defined(TARGET_UNICORE32)
645 #elif defined(TARGET_SPARC)
646 #elif defined(TARGET_PPC)
647 #elif defined(TARGET_LM32)
648 #elif defined(TARGET_M68K)
649 cpu_m68k_flush_flags(env
, env
->cc_op
);
650 env
->cc_op
= CC_OP_FLAGS
;
651 env
->sr
= (env
->sr
& 0xffe0)
652 | env
->cc_dest
| (env
->cc_x
<< 4);
653 #elif defined(TARGET_MICROBLAZE)
654 #elif defined(TARGET_MIPS)
655 #elif defined(TARGET_OPENRISC)
656 #elif defined(TARGET_SH4)
657 #elif defined(TARGET_ALPHA)
658 #elif defined(TARGET_CRIS)
659 #elif defined(TARGET_S390X)
660 #elif defined(TARGET_XTENSA)
663 #error unsupported target CPU
666 /* fail safe : never use cpu_single_env outside cpu_exec() */
667 cpu_single_env
= NULL
;