2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 //#define CONFIG_DEBUG_EXEC
28 bool qemu_cpu_has_work(CPUState
*cpu
)
30 return cpu_has_work(cpu
);
33 void cpu_loop_exit(CPUArchState
*env
)
35 CPUState
*cpu
= ENV_GET_CPU(env
);
37 cpu
->current_tb
= NULL
;
38 siglongjmp(env
->jmp_env
, 1);
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState
*env
, void *puc
)
47 /* XXX: restore cpu registers saved in host registers */
49 env
->exception_index
= -1;
50 siglongjmp(env
->jmp_env
, 1);
54 /* Execute the code without caching the generated code. An interpreter
55 could be used if available. */
56 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
57 TranslationBlock
*orig_tb
)
59 CPUState
*cpu
= ENV_GET_CPU(env
);
60 tcg_target_ulong next_tb
;
63 /* Should never happen.
64 We only end up here when an existing TB is too long. */
65 if (max_cycles
> CF_COUNT_MASK
)
66 max_cycles
= CF_COUNT_MASK
;
68 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
71 /* execute the generated code */
72 next_tb
= tcg_qemu_tb_exec(env
, tb
->tc_ptr
);
73 cpu
->current_tb
= NULL
;
75 if ((next_tb
& 3) == 2) {
76 /* Restore PC. This may happen if async event occurs before
77 the TB starts executing. */
78 cpu_pc_from_tb(env
, tb
);
80 tb_phys_invalidate(tb
, -1);
84 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
89 TranslationBlock
*tb
, **ptb1
;
91 tb_page_addr_t phys_pc
, phys_page1
;
92 target_ulong virt_page2
;
94 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
96 /* find translated block using physical mappings */
97 phys_pc
= get_page_addr_code(env
, pc
);
98 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
99 h
= tb_phys_hash_func(phys_pc
);
100 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
106 tb
->page_addr
[0] == phys_page1
&&
107 tb
->cs_base
== cs_base
&&
108 tb
->flags
== flags
) {
109 /* check next page if needed */
110 if (tb
->page_addr
[1] != -1) {
111 tb_page_addr_t phys_page2
;
113 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
115 phys_page2
= get_page_addr_code(env
, virt_page2
);
116 if (tb
->page_addr
[1] == phys_page2
)
122 ptb1
= &tb
->phys_hash_next
;
125 /* if no translated code available, then translate it now */
126 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
129 /* Move the last found TB to the head of the list */
131 *ptb1
= tb
->phys_hash_next
;
132 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
133 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
135 /* we add the TB in the virtual pc hash table */
136 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
140 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
142 TranslationBlock
*tb
;
143 target_ulong cs_base
, pc
;
146 /* we record a subset of the CPU state. It will
147 always be the same before a given translated block
149 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
150 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
151 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
152 tb
->flags
!= flags
)) {
153 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
158 static CPUDebugExcpHandler
*debug_excp_handler
;
160 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
162 debug_excp_handler
= handler
;
165 static void cpu_handle_debug_exception(CPUArchState
*env
)
169 if (!env
->watchpoint_hit
) {
170 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
171 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
174 if (debug_excp_handler
) {
175 debug_excp_handler(env
);
179 /* main execution loop */
181 volatile sig_atomic_t exit_request
;
183 int cpu_exec(CPUArchState
*env
)
185 CPUState
*cpu
= ENV_GET_CPU(env
);
186 int ret
, interrupt_request
;
187 TranslationBlock
*tb
;
189 tcg_target_ulong next_tb
;
192 if (!cpu_has_work(cpu
)) {
199 cpu_single_env
= env
;
201 if (unlikely(exit_request
)) {
202 cpu
->exit_request
= 1;
205 #if defined(TARGET_I386)
206 /* put eflags in CPU temporary format */
207 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
208 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
209 CC_OP
= CC_OP_EFLAGS
;
210 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
211 #elif defined(TARGET_SPARC)
212 #elif defined(TARGET_M68K)
213 env
->cc_op
= CC_OP_FLAGS
;
214 env
->cc_dest
= env
->sr
& 0xf;
215 env
->cc_x
= (env
->sr
>> 4) & 1;
216 #elif defined(TARGET_ALPHA)
217 #elif defined(TARGET_ARM)
218 #elif defined(TARGET_UNICORE32)
219 #elif defined(TARGET_PPC)
220 env
->reserve_addr
= -1;
221 #elif defined(TARGET_LM32)
222 #elif defined(TARGET_MICROBLAZE)
223 #elif defined(TARGET_MIPS)
224 #elif defined(TARGET_OPENRISC)
225 #elif defined(TARGET_SH4)
226 #elif defined(TARGET_CRIS)
227 #elif defined(TARGET_S390X)
228 #elif defined(TARGET_XTENSA)
231 #error unsupported target CPU
233 env
->exception_index
= -1;
235 /* prepare setjmp context for exception handling */
237 if (sigsetjmp(env
->jmp_env
, 0) == 0) {
238 /* if an exception is pending, we execute it here */
239 if (env
->exception_index
>= 0) {
240 if (env
->exception_index
>= EXCP_INTERRUPT
) {
241 /* exit request from the cpu execution loop */
242 ret
= env
->exception_index
;
243 if (ret
== EXCP_DEBUG
) {
244 cpu_handle_debug_exception(env
);
248 #if defined(CONFIG_USER_ONLY)
249 /* if user mode only, we simulate a fake exception
250 which will be handled outside the cpu execution
252 #if defined(TARGET_I386)
255 ret
= env
->exception_index
;
259 env
->exception_index
= -1;
264 next_tb
= 0; /* force lookup of first TB */
266 interrupt_request
= env
->interrupt_request
;
267 if (unlikely(interrupt_request
)) {
268 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
269 /* Mask out external interrupts for this step. */
270 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
272 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
273 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
274 env
->exception_index
= EXCP_DEBUG
;
277 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
278 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
279 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
280 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
281 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
283 env
->exception_index
= EXCP_HLT
;
287 #if defined(TARGET_I386)
288 #if !defined(CONFIG_USER_ONLY)
289 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
290 env
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
291 apic_poll_irq(env
->apic_state
);
294 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
295 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
,
297 do_cpu_init(x86_env_get_cpu(env
));
298 env
->exception_index
= EXCP_HALTED
;
300 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
301 do_cpu_sipi(x86_env_get_cpu(env
));
302 } else if (env
->hflags2
& HF2_GIF_MASK
) {
303 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
304 !(env
->hflags
& HF_SMM_MASK
)) {
305 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
307 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
310 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
311 !(env
->hflags2
& HF2_NMI_MASK
)) {
312 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
313 env
->hflags2
|= HF2_NMI_MASK
;
314 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
316 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
317 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
318 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
320 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
321 (((env
->hflags2
& HF2_VINTR_MASK
) &&
322 (env
->hflags2
& HF2_HIF_MASK
)) ||
323 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
324 (env
->eflags
& IF_MASK
&&
325 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
327 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
329 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
330 intno
= cpu_get_pic_interrupt(env
);
331 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
332 do_interrupt_x86_hardirq(env
, intno
, 1);
333 /* ensure that no TB jump will be modified as
334 the program flow was changed */
336 #if !defined(CONFIG_USER_ONLY)
337 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
338 (env
->eflags
& IF_MASK
) &&
339 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
341 /* FIXME: this should respect TPR */
342 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
344 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
345 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
346 do_interrupt_x86_hardirq(env
, intno
, 1);
347 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
352 #elif defined(TARGET_PPC)
353 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
356 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
357 ppc_hw_interrupt(env
);
358 if (env
->pending_interrupts
== 0)
359 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
362 #elif defined(TARGET_LM32)
363 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
364 && (env
->ie
& IE_IE
)) {
365 env
->exception_index
= EXCP_IRQ
;
369 #elif defined(TARGET_MICROBLAZE)
370 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
371 && (env
->sregs
[SR_MSR
] & MSR_IE
)
372 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
373 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
374 env
->exception_index
= EXCP_IRQ
;
378 #elif defined(TARGET_MIPS)
379 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
380 cpu_mips_hw_interrupts_pending(env
)) {
382 env
->exception_index
= EXCP_EXT_INTERRUPT
;
387 #elif defined(TARGET_OPENRISC)
390 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
391 && (env
->sr
& SR_IEE
)) {
394 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
395 && (env
->sr
& SR_TEE
)) {
399 env
->exception_index
= idx
;
404 #elif defined(TARGET_SPARC)
405 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
406 if (cpu_interrupts_enabled(env
) &&
407 env
->interrupt_index
> 0) {
408 int pil
= env
->interrupt_index
& 0xf;
409 int type
= env
->interrupt_index
& 0xf0;
411 if (((type
== TT_EXTINT
) &&
412 cpu_pil_allowed(env
, pil
)) ||
414 env
->exception_index
= env
->interrupt_index
;
420 #elif defined(TARGET_ARM)
421 if (interrupt_request
& CPU_INTERRUPT_FIQ
422 && !(env
->uncached_cpsr
& CPSR_F
)) {
423 env
->exception_index
= EXCP_FIQ
;
427 /* ARMv7-M interrupt return works by loading a magic value
428 into the PC. On real hardware the load causes the
429 return to occur. The qemu implementation performs the
430 jump normally, then does the exception return when the
431 CPU tries to execute code at the magic address.
432 This will cause the magic PC value to be pushed to
433 the stack if an interrupt occurred at the wrong time.
434 We avoid this by disabling interrupts when
435 pc contains a magic address. */
436 if (interrupt_request
& CPU_INTERRUPT_HARD
437 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
438 || !(env
->uncached_cpsr
& CPSR_I
))) {
439 env
->exception_index
= EXCP_IRQ
;
443 #elif defined(TARGET_UNICORE32)
444 if (interrupt_request
& CPU_INTERRUPT_HARD
445 && !(env
->uncached_asr
& ASR_I
)) {
446 env
->exception_index
= UC32_EXCP_INTR
;
450 #elif defined(TARGET_SH4)
451 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
455 #elif defined(TARGET_ALPHA)
458 /* ??? This hard-codes the OSF/1 interrupt levels. */
459 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
461 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
462 idx
= EXCP_DEV_INTERRUPT
;
466 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
467 idx
= EXCP_CLK_INTERRUPT
;
471 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
472 idx
= EXCP_SMP_INTERRUPT
;
476 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
481 env
->exception_index
= idx
;
487 #elif defined(TARGET_CRIS)
488 if (interrupt_request
& CPU_INTERRUPT_HARD
489 && (env
->pregs
[PR_CCS
] & I_FLAG
)
490 && !env
->locked_irq
) {
491 env
->exception_index
= EXCP_IRQ
;
495 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
496 unsigned int m_flag_archval
;
497 if (env
->pregs
[PR_VR
] < 32) {
498 m_flag_archval
= M_FLAG_V10
;
500 m_flag_archval
= M_FLAG_V32
;
502 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
503 env
->exception_index
= EXCP_NMI
;
508 #elif defined(TARGET_M68K)
509 if (interrupt_request
& CPU_INTERRUPT_HARD
510 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
511 < env
->pending_level
) {
512 /* Real hardware gets the interrupt vector via an
513 IACK cycle at this point. Current emulated
514 hardware doesn't rely on this, so we
515 provide/save the vector when the interrupt is
517 env
->exception_index
= env
->pending_vector
;
518 do_interrupt_m68k_hardirq(env
);
521 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
522 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
523 (env
->psw
.mask
& PSW_MASK_EXT
)) {
527 #elif defined(TARGET_XTENSA)
528 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
529 env
->exception_index
= EXC_IRQ
;
534 /* Don't use the cached interrupt_request value,
535 do_interrupt may have updated the EXITTB flag. */
536 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
537 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
538 /* ensure that no TB jump will be modified as
539 the program flow was changed */
543 if (unlikely(cpu
->exit_request
)) {
544 cpu
->exit_request
= 0;
545 env
->exception_index
= EXCP_INTERRUPT
;
548 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
549 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
550 /* restore flags in standard format */
551 #if defined(TARGET_I386)
552 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
554 log_cpu_state(env
, CPU_DUMP_CCOP
);
555 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
556 #elif defined(TARGET_M68K)
557 cpu_m68k_flush_flags(env
, env
->cc_op
);
558 env
->cc_op
= CC_OP_FLAGS
;
559 env
->sr
= (env
->sr
& 0xffe0)
560 | env
->cc_dest
| (env
->cc_x
<< 4);
561 log_cpu_state(env
, 0);
563 log_cpu_state(env
, 0);
566 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
567 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
568 tb
= tb_find_fast(env
);
569 /* Note: we do it here to avoid a gcc bug on Mac OS X when
570 doing it in tb_find_slow */
571 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
572 /* as some TB could have been invalidated because
573 of memory exceptions while generating the code, we
574 must recompute the hash index here */
576 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
578 #ifdef CONFIG_DEBUG_EXEC
579 qemu_log_mask(CPU_LOG_EXEC
, "Trace %p [" TARGET_FMT_lx
"] %s\n",
581 lookup_symbol(tb
->pc
));
583 /* see if we can patch the calling TB. When the TB
584 spans two pages, we cannot safely do a direct
586 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
587 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
589 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
591 /* cpu_interrupt might be called while translating the
592 TB, but before it is linked into a potentially
593 infinite loop and becomes env->current_tb. Avoid
594 starting execution if there is a pending interrupt. */
595 cpu
->current_tb
= tb
;
597 if (likely(!cpu
->exit_request
)) {
599 /* execute the generated code */
600 next_tb
= tcg_qemu_tb_exec(env
, tc_ptr
);
601 if ((next_tb
& 3) == 2) {
602 /* Instruction counter expired. */
604 tb
= (TranslationBlock
*)(next_tb
& ~3);
606 cpu_pc_from_tb(env
, tb
);
607 insns_left
= env
->icount_decr
.u32
;
608 if (env
->icount_extra
&& insns_left
>= 0) {
609 /* Refill decrementer and continue execution. */
610 env
->icount_extra
+= insns_left
;
611 if (env
->icount_extra
> 0xffff) {
614 insns_left
= env
->icount_extra
;
616 env
->icount_extra
-= insns_left
;
617 env
->icount_decr
.u16
.low
= insns_left
;
619 if (insns_left
> 0) {
620 /* Execute remaining instructions. */
621 cpu_exec_nocache(env
, insns_left
, tb
);
623 env
->exception_index
= EXCP_INTERRUPT
;
629 cpu
->current_tb
= NULL
;
630 /* reset soft MMU for next block (it can currently
631 only be set by a memory fault) */
634 /* Reload env after longjmp - the compiler may have smashed all
635 * local variables as longjmp is marked 'noreturn'. */
636 env
= cpu_single_env
;
641 #if defined(TARGET_I386)
642 /* restore flags in standard format */
643 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
645 #elif defined(TARGET_ARM)
646 /* XXX: Save/restore host fpu exception state?. */
647 #elif defined(TARGET_UNICORE32)
648 #elif defined(TARGET_SPARC)
649 #elif defined(TARGET_PPC)
650 #elif defined(TARGET_LM32)
651 #elif defined(TARGET_M68K)
652 cpu_m68k_flush_flags(env
, env
->cc_op
);
653 env
->cc_op
= CC_OP_FLAGS
;
654 env
->sr
= (env
->sr
& 0xffe0)
655 | env
->cc_dest
| (env
->cc_x
<< 4);
656 #elif defined(TARGET_MICROBLAZE)
657 #elif defined(TARGET_MIPS)
658 #elif defined(TARGET_OPENRISC)
659 #elif defined(TARGET_SH4)
660 #elif defined(TARGET_ALPHA)
661 #elif defined(TARGET_CRIS)
662 #elif defined(TARGET_S390X)
663 #elif defined(TARGET_XTENSA)
666 #error unsupported target CPU
669 /* fail safe : never use cpu_single_env outside cpu_exec() */
670 cpu_single_env
= NULL
;