2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 //#define CONFIG_DEBUG_EXEC
28 bool qemu_cpu_has_work(CPUState
*cpu
)
30 return cpu_has_work(cpu
);
33 void cpu_loop_exit(CPUArchState
*env
)
35 CPUState
*cpu
= ENV_GET_CPU(env
);
37 cpu
->current_tb
= NULL
;
38 siglongjmp(env
->jmp_env
, 1);
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState
*env
, void *puc
)
47 /* XXX: restore cpu registers saved in host registers */
49 env
->exception_index
= -1;
50 siglongjmp(env
->jmp_env
, 1);
54 /* Execute a TB, and fix up the CPU state afterwards if necessary */
55 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
57 CPUArchState
*env
= cpu
->env_ptr
;
58 tcg_target_ulong next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
59 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
64 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
65 cpu_pc_from_tb(env
, tb
);
67 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
71 cpu
->tcg_exit_req
= 0;
76 /* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
78 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
79 TranslationBlock
*orig_tb
)
81 CPUState
*cpu
= ENV_GET_CPU(env
);
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles
> CF_COUNT_MASK
)
87 max_cycles
= CF_COUNT_MASK
;
89 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
92 /* execute the generated code */
93 cpu_tb_exec(cpu
, tb
->tc_ptr
);
94 cpu
->current_tb
= NULL
;
95 tb_phys_invalidate(tb
, -1);
99 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
101 target_ulong cs_base
,
104 TranslationBlock
*tb
, **ptb1
;
106 tb_page_addr_t phys_pc
, phys_page1
;
107 target_ulong virt_page2
;
109 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
111 /* find translated block using physical mappings */
112 phys_pc
= get_page_addr_code(env
, pc
);
113 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
114 h
= tb_phys_hash_func(phys_pc
);
115 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
121 tb
->page_addr
[0] == phys_page1
&&
122 tb
->cs_base
== cs_base
&&
123 tb
->flags
== flags
) {
124 /* check next page if needed */
125 if (tb
->page_addr
[1] != -1) {
126 tb_page_addr_t phys_page2
;
128 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
130 phys_page2
= get_page_addr_code(env
, virt_page2
);
131 if (tb
->page_addr
[1] == phys_page2
)
137 ptb1
= &tb
->phys_hash_next
;
140 /* if no translated code available, then translate it now */
141 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
144 /* Move the last found TB to the head of the list */
146 *ptb1
= tb
->phys_hash_next
;
147 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
148 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
150 /* we add the TB in the virtual pc hash table */
151 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
155 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
157 TranslationBlock
*tb
;
158 target_ulong cs_base
, pc
;
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
164 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
165 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
166 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
167 tb
->flags
!= flags
)) {
168 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
173 static CPUDebugExcpHandler
*debug_excp_handler
;
175 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
177 debug_excp_handler
= handler
;
180 static void cpu_handle_debug_exception(CPUArchState
*env
)
184 if (!env
->watchpoint_hit
) {
185 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
186 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
189 if (debug_excp_handler
) {
190 debug_excp_handler(env
);
194 /* main execution loop */
196 volatile sig_atomic_t exit_request
;
198 int cpu_exec(CPUArchState
*env
)
200 CPUState
*cpu
= ENV_GET_CPU(env
);
201 #if !(defined(CONFIG_USER_ONLY) && \
202 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
203 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
205 int ret
, interrupt_request
;
206 TranslationBlock
*tb
;
208 tcg_target_ulong next_tb
;
211 if (!cpu_has_work(cpu
)) {
218 cpu_single_env
= env
;
220 if (unlikely(exit_request
)) {
221 cpu
->exit_request
= 1;
224 #if defined(TARGET_I386)
225 /* put eflags in CPU temporary format */
226 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
227 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
228 CC_OP
= CC_OP_EFLAGS
;
229 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
230 #elif defined(TARGET_SPARC)
231 #elif defined(TARGET_M68K)
232 env
->cc_op
= CC_OP_FLAGS
;
233 env
->cc_dest
= env
->sr
& 0xf;
234 env
->cc_x
= (env
->sr
>> 4) & 1;
235 #elif defined(TARGET_ALPHA)
236 #elif defined(TARGET_ARM)
237 #elif defined(TARGET_UNICORE32)
238 #elif defined(TARGET_PPC)
239 env
->reserve_addr
= -1;
240 #elif defined(TARGET_LM32)
241 #elif defined(TARGET_MICROBLAZE)
242 #elif defined(TARGET_MIPS)
243 #elif defined(TARGET_MOXIE)
244 #elif defined(TARGET_OPENRISC)
245 #elif defined(TARGET_SH4)
246 #elif defined(TARGET_CRIS)
247 #elif defined(TARGET_S390X)
248 #elif defined(TARGET_XTENSA)
251 #error unsupported target CPU
253 env
->exception_index
= -1;
255 /* prepare setjmp context for exception handling */
257 if (sigsetjmp(env
->jmp_env
, 0) == 0) {
258 /* if an exception is pending, we execute it here */
259 if (env
->exception_index
>= 0) {
260 if (env
->exception_index
>= EXCP_INTERRUPT
) {
261 /* exit request from the cpu execution loop */
262 ret
= env
->exception_index
;
263 if (ret
== EXCP_DEBUG
) {
264 cpu_handle_debug_exception(env
);
268 #if defined(CONFIG_USER_ONLY)
269 /* if user mode only, we simulate a fake exception
270 which will be handled outside the cpu execution
272 #if defined(TARGET_I386)
273 cc
->do_interrupt(cpu
);
275 ret
= env
->exception_index
;
278 cc
->do_interrupt(cpu
);
279 env
->exception_index
= -1;
284 next_tb
= 0; /* force lookup of first TB */
286 interrupt_request
= cpu
->interrupt_request
;
287 if (unlikely(interrupt_request
)) {
288 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
289 /* Mask out external interrupts for this step. */
290 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
292 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
293 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
294 env
->exception_index
= EXCP_DEBUG
;
297 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
298 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
299 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
300 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
301 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
303 env
->exception_index
= EXCP_HLT
;
307 #if defined(TARGET_I386)
308 #if !defined(CONFIG_USER_ONLY)
309 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
310 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
311 apic_poll_irq(env
->apic_state
);
314 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
315 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
,
317 do_cpu_init(x86_env_get_cpu(env
));
318 env
->exception_index
= EXCP_HALTED
;
320 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
321 do_cpu_sipi(x86_env_get_cpu(env
));
322 } else if (env
->hflags2
& HF2_GIF_MASK
) {
323 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
324 !(env
->hflags
& HF_SMM_MASK
)) {
325 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
327 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
330 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
331 !(env
->hflags2
& HF2_NMI_MASK
)) {
332 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
333 env
->hflags2
|= HF2_NMI_MASK
;
334 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
336 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
337 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
338 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
340 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
341 (((env
->hflags2
& HF2_VINTR_MASK
) &&
342 (env
->hflags2
& HF2_HIF_MASK
)) ||
343 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
344 (env
->eflags
& IF_MASK
&&
345 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
347 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
349 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
351 intno
= cpu_get_pic_interrupt(env
);
352 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
353 do_interrupt_x86_hardirq(env
, intno
, 1);
354 /* ensure that no TB jump will be modified as
355 the program flow was changed */
357 #if !defined(CONFIG_USER_ONLY)
358 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
359 (env
->eflags
& IF_MASK
) &&
360 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
362 /* FIXME: this should respect TPR */
363 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
365 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
366 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
367 do_interrupt_x86_hardirq(env
, intno
, 1);
368 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
373 #elif defined(TARGET_PPC)
374 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
377 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
378 ppc_hw_interrupt(env
);
379 if (env
->pending_interrupts
== 0) {
380 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
384 #elif defined(TARGET_LM32)
385 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
386 && (env
->ie
& IE_IE
)) {
387 env
->exception_index
= EXCP_IRQ
;
388 cc
->do_interrupt(cpu
);
391 #elif defined(TARGET_MICROBLAZE)
392 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
393 && (env
->sregs
[SR_MSR
] & MSR_IE
)
394 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
395 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
396 env
->exception_index
= EXCP_IRQ
;
397 cc
->do_interrupt(cpu
);
400 #elif defined(TARGET_MIPS)
401 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
402 cpu_mips_hw_interrupts_pending(env
)) {
404 env
->exception_index
= EXCP_EXT_INTERRUPT
;
406 cc
->do_interrupt(cpu
);
409 #elif defined(TARGET_OPENRISC)
412 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
413 && (env
->sr
& SR_IEE
)) {
416 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
417 && (env
->sr
& SR_TEE
)) {
421 env
->exception_index
= idx
;
422 cc
->do_interrupt(cpu
);
426 #elif defined(TARGET_SPARC)
427 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
428 if (cpu_interrupts_enabled(env
) &&
429 env
->interrupt_index
> 0) {
430 int pil
= env
->interrupt_index
& 0xf;
431 int type
= env
->interrupt_index
& 0xf0;
433 if (((type
== TT_EXTINT
) &&
434 cpu_pil_allowed(env
, pil
)) ||
436 env
->exception_index
= env
->interrupt_index
;
437 cc
->do_interrupt(cpu
);
442 #elif defined(TARGET_ARM)
443 if (interrupt_request
& CPU_INTERRUPT_FIQ
444 && !(env
->uncached_cpsr
& CPSR_F
)) {
445 env
->exception_index
= EXCP_FIQ
;
446 cc
->do_interrupt(cpu
);
449 /* ARMv7-M interrupt return works by loading a magic value
450 into the PC. On real hardware the load causes the
451 return to occur. The qemu implementation performs the
452 jump normally, then does the exception return when the
453 CPU tries to execute code at the magic address.
454 This will cause the magic PC value to be pushed to
455 the stack if an interrupt occurred at the wrong time.
456 We avoid this by disabling interrupts when
457 pc contains a magic address. */
458 if (interrupt_request
& CPU_INTERRUPT_HARD
459 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
460 || !(env
->uncached_cpsr
& CPSR_I
))) {
461 env
->exception_index
= EXCP_IRQ
;
462 cc
->do_interrupt(cpu
);
465 #elif defined(TARGET_UNICORE32)
466 if (interrupt_request
& CPU_INTERRUPT_HARD
467 && !(env
->uncached_asr
& ASR_I
)) {
468 env
->exception_index
= UC32_EXCP_INTR
;
469 cc
->do_interrupt(cpu
);
472 #elif defined(TARGET_SH4)
473 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
474 cc
->do_interrupt(cpu
);
477 #elif defined(TARGET_ALPHA)
480 /* ??? This hard-codes the OSF/1 interrupt levels. */
481 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
483 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
484 idx
= EXCP_DEV_INTERRUPT
;
488 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
489 idx
= EXCP_CLK_INTERRUPT
;
493 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
494 idx
= EXCP_SMP_INTERRUPT
;
498 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
503 env
->exception_index
= idx
;
505 cc
->do_interrupt(cpu
);
509 #elif defined(TARGET_CRIS)
510 if (interrupt_request
& CPU_INTERRUPT_HARD
511 && (env
->pregs
[PR_CCS
] & I_FLAG
)
512 && !env
->locked_irq
) {
513 env
->exception_index
= EXCP_IRQ
;
514 cc
->do_interrupt(cpu
);
517 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
518 unsigned int m_flag_archval
;
519 if (env
->pregs
[PR_VR
] < 32) {
520 m_flag_archval
= M_FLAG_V10
;
522 m_flag_archval
= M_FLAG_V32
;
524 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
525 env
->exception_index
= EXCP_NMI
;
526 cc
->do_interrupt(cpu
);
530 #elif defined(TARGET_M68K)
531 if (interrupt_request
& CPU_INTERRUPT_HARD
532 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
533 < env
->pending_level
) {
534 /* Real hardware gets the interrupt vector via an
535 IACK cycle at this point. Current emulated
536 hardware doesn't rely on this, so we
537 provide/save the vector when the interrupt is
539 env
->exception_index
= env
->pending_vector
;
540 do_interrupt_m68k_hardirq(env
);
543 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
544 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
545 (env
->psw
.mask
& PSW_MASK_EXT
)) {
546 cc
->do_interrupt(cpu
);
549 #elif defined(TARGET_XTENSA)
550 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
551 env
->exception_index
= EXC_IRQ
;
552 cc
->do_interrupt(cpu
);
556 /* Don't use the cached interrupt_request value,
557 do_interrupt may have updated the EXITTB flag. */
558 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
559 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
560 /* ensure that no TB jump will be modified as
561 the program flow was changed */
565 if (unlikely(cpu
->exit_request
)) {
566 cpu
->exit_request
= 0;
567 env
->exception_index
= EXCP_INTERRUPT
;
570 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
571 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
572 /* restore flags in standard format */
573 #if defined(TARGET_I386)
574 log_cpu_state(env
, CPU_DUMP_CCOP
);
575 #elif defined(TARGET_M68K)
576 cpu_m68k_flush_flags(env
, env
->cc_op
);
577 env
->cc_op
= CC_OP_FLAGS
;
578 env
->sr
= (env
->sr
& 0xffe0)
579 | env
->cc_dest
| (env
->cc_x
<< 4);
580 log_cpu_state(env
, 0);
582 log_cpu_state(env
, 0);
585 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
586 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
587 tb
= tb_find_fast(env
);
588 /* Note: we do it here to avoid a gcc bug on Mac OS X when
589 doing it in tb_find_slow */
590 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
591 /* as some TB could have been invalidated because
592 of memory exceptions while generating the code, we
593 must recompute the hash index here */
595 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
597 #ifdef CONFIG_DEBUG_EXEC
598 qemu_log_mask(CPU_LOG_EXEC
, "Trace %p [" TARGET_FMT_lx
"] %s\n",
600 lookup_symbol(tb
->pc
));
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
605 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
606 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
607 next_tb
& TB_EXIT_MASK
, tb
);
609 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
615 cpu
->current_tb
= tb
;
617 if (likely(!cpu
->exit_request
)) {
619 /* execute the generated code */
620 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
621 switch (next_tb
& TB_EXIT_MASK
) {
622 case TB_EXIT_REQUESTED
:
623 /* Something asked us to stop executing
624 * chained TBs; just continue round the main
625 * loop. Whatever requested the exit will also
626 * have set something else (eg exit_request or
627 * interrupt_request) which we will handle
628 * next time around the loop.
630 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
633 case TB_EXIT_ICOUNT_EXPIRED
:
635 /* Instruction counter expired. */
637 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
638 insns_left
= env
->icount_decr
.u32
;
639 if (env
->icount_extra
&& insns_left
>= 0) {
640 /* Refill decrementer and continue execution. */
641 env
->icount_extra
+= insns_left
;
642 if (env
->icount_extra
> 0xffff) {
645 insns_left
= env
->icount_extra
;
647 env
->icount_extra
-= insns_left
;
648 env
->icount_decr
.u16
.low
= insns_left
;
650 if (insns_left
> 0) {
651 /* Execute remaining instructions. */
652 cpu_exec_nocache(env
, insns_left
, tb
);
654 env
->exception_index
= EXCP_INTERRUPT
;
664 cpu
->current_tb
= NULL
;
665 /* reset soft MMU for next block (it can currently
666 only be set by a memory fault) */
669 /* Reload env after longjmp - the compiler may have smashed all
670 * local variables as longjmp is marked 'noreturn'. */
671 env
= cpu_single_env
;
676 #if defined(TARGET_I386)
677 /* restore flags in standard format */
678 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
680 #elif defined(TARGET_ARM)
681 /* XXX: Save/restore host fpu exception state?. */
682 #elif defined(TARGET_UNICORE32)
683 #elif defined(TARGET_SPARC)
684 #elif defined(TARGET_PPC)
685 #elif defined(TARGET_LM32)
686 #elif defined(TARGET_M68K)
687 cpu_m68k_flush_flags(env
, env
->cc_op
);
688 env
->cc_op
= CC_OP_FLAGS
;
689 env
->sr
= (env
->sr
& 0xffe0)
690 | env
->cc_dest
| (env
->cc_x
<< 4);
691 #elif defined(TARGET_MICROBLAZE)
692 #elif defined(TARGET_MIPS)
693 #elif defined(TARGET_MOXIE)
694 #elif defined(TARGET_OPENRISC)
695 #elif defined(TARGET_SH4)
696 #elif defined(TARGET_ALPHA)
697 #elif defined(TARGET_CRIS)
698 #elif defined(TARGET_S390X)
699 #elif defined(TARGET_XTENSA)
702 #error unsupported target CPU
705 /* fail safe : never use cpu_single_env outside cpu_exec() */
706 cpu_single_env
= NULL
;