2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 //#define CONFIG_DEBUG_EXEC
28 bool qemu_cpu_has_work(CPUState
*cpu
)
30 return cpu_has_work(cpu
);
33 void cpu_loop_exit(CPUArchState
*env
)
35 CPUState
*cpu
= ENV_GET_CPU(env
);
37 cpu
->current_tb
= NULL
;
38 siglongjmp(env
->jmp_env
, 1);
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState
*env
, void *puc
)
47 /* XXX: restore cpu registers saved in host registers */
49 env
->exception_index
= -1;
50 siglongjmp(env
->jmp_env
, 1);
54 /* Execute a TB, and fix up the CPU state afterwards if necessary */
55 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
57 CPUArchState
*env
= cpu
->env_ptr
;
58 tcg_target_ulong next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
59 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
64 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
65 cpu_pc_from_tb(env
, tb
);
67 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
71 cpu
->tcg_exit_req
= 0;
76 /* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
78 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
79 TranslationBlock
*orig_tb
)
81 CPUState
*cpu
= ENV_GET_CPU(env
);
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles
> CF_COUNT_MASK
)
87 max_cycles
= CF_COUNT_MASK
;
89 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
92 /* execute the generated code */
93 cpu_tb_exec(cpu
, tb
->tc_ptr
);
94 cpu
->current_tb
= NULL
;
95 tb_phys_invalidate(tb
, -1);
99 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
101 target_ulong cs_base
,
104 TranslationBlock
*tb
, **ptb1
;
106 tb_page_addr_t phys_pc
, phys_page1
;
107 target_ulong virt_page2
;
109 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
111 /* find translated block using physical mappings */
112 phys_pc
= get_page_addr_code(env
, pc
);
113 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
114 h
= tb_phys_hash_func(phys_pc
);
115 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
121 tb
->page_addr
[0] == phys_page1
&&
122 tb
->cs_base
== cs_base
&&
123 tb
->flags
== flags
) {
124 /* check next page if needed */
125 if (tb
->page_addr
[1] != -1) {
126 tb_page_addr_t phys_page2
;
128 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
130 phys_page2
= get_page_addr_code(env
, virt_page2
);
131 if (tb
->page_addr
[1] == phys_page2
)
137 ptb1
= &tb
->phys_hash_next
;
140 /* if no translated code available, then translate it now */
141 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
144 /* Move the last found TB to the head of the list */
146 *ptb1
= tb
->phys_hash_next
;
147 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
148 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
150 /* we add the TB in the virtual pc hash table */
151 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
155 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
157 TranslationBlock
*tb
;
158 target_ulong cs_base
, pc
;
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
164 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
165 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
166 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
167 tb
->flags
!= flags
)) {
168 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
173 static CPUDebugExcpHandler
*debug_excp_handler
;
175 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
177 debug_excp_handler
= handler
;
180 static void cpu_handle_debug_exception(CPUArchState
*env
)
184 if (!env
->watchpoint_hit
) {
185 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
186 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
189 if (debug_excp_handler
) {
190 debug_excp_handler(env
);
194 /* main execution loop */
196 volatile sig_atomic_t exit_request
;
198 int cpu_exec(CPUArchState
*env
)
200 CPUState
*cpu
= ENV_GET_CPU(env
);
201 int ret
, interrupt_request
;
202 TranslationBlock
*tb
;
204 tcg_target_ulong next_tb
;
207 if (!cpu_has_work(cpu
)) {
214 cpu_single_env
= env
;
216 if (unlikely(exit_request
)) {
217 cpu
->exit_request
= 1;
220 #if defined(TARGET_I386)
221 /* put eflags in CPU temporary format */
222 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
223 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
224 CC_OP
= CC_OP_EFLAGS
;
225 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
226 #elif defined(TARGET_SPARC)
227 #elif defined(TARGET_M68K)
228 env
->cc_op
= CC_OP_FLAGS
;
229 env
->cc_dest
= env
->sr
& 0xf;
230 env
->cc_x
= (env
->sr
>> 4) & 1;
231 #elif defined(TARGET_ALPHA)
232 #elif defined(TARGET_ARM)
233 #elif defined(TARGET_UNICORE32)
234 #elif defined(TARGET_PPC)
235 env
->reserve_addr
= -1;
236 #elif defined(TARGET_LM32)
237 #elif defined(TARGET_MICROBLAZE)
238 #elif defined(TARGET_MIPS)
239 #elif defined(TARGET_OPENRISC)
240 #elif defined(TARGET_SH4)
241 #elif defined(TARGET_CRIS)
242 #elif defined(TARGET_S390X)
243 #elif defined(TARGET_XTENSA)
246 #error unsupported target CPU
248 env
->exception_index
= -1;
250 /* prepare setjmp context for exception handling */
252 if (sigsetjmp(env
->jmp_env
, 0) == 0) {
253 /* if an exception is pending, we execute it here */
254 if (env
->exception_index
>= 0) {
255 if (env
->exception_index
>= EXCP_INTERRUPT
) {
256 /* exit request from the cpu execution loop */
257 ret
= env
->exception_index
;
258 if (ret
== EXCP_DEBUG
) {
259 cpu_handle_debug_exception(env
);
263 #if defined(CONFIG_USER_ONLY)
264 /* if user mode only, we simulate a fake exception
265 which will be handled outside the cpu execution
267 #if defined(TARGET_I386)
270 ret
= env
->exception_index
;
274 env
->exception_index
= -1;
279 next_tb
= 0; /* force lookup of first TB */
281 interrupt_request
= cpu
->interrupt_request
;
282 if (unlikely(interrupt_request
)) {
283 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
284 /* Mask out external interrupts for this step. */
285 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
287 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
288 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
289 env
->exception_index
= EXCP_DEBUG
;
292 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
293 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
294 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
295 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
296 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
298 env
->exception_index
= EXCP_HLT
;
302 #if defined(TARGET_I386)
303 #if !defined(CONFIG_USER_ONLY)
304 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
305 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
306 apic_poll_irq(env
->apic_state
);
309 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
310 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
,
312 do_cpu_init(x86_env_get_cpu(env
));
313 env
->exception_index
= EXCP_HALTED
;
315 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
316 do_cpu_sipi(x86_env_get_cpu(env
));
317 } else if (env
->hflags2
& HF2_GIF_MASK
) {
318 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
319 !(env
->hflags
& HF_SMM_MASK
)) {
320 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
322 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
325 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
326 !(env
->hflags2
& HF2_NMI_MASK
)) {
327 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
328 env
->hflags2
|= HF2_NMI_MASK
;
329 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
331 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
332 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
333 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
335 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
336 (((env
->hflags2
& HF2_VINTR_MASK
) &&
337 (env
->hflags2
& HF2_HIF_MASK
)) ||
338 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
339 (env
->eflags
& IF_MASK
&&
340 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
342 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
344 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
346 intno
= cpu_get_pic_interrupt(env
);
347 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
348 do_interrupt_x86_hardirq(env
, intno
, 1);
349 /* ensure that no TB jump will be modified as
350 the program flow was changed */
352 #if !defined(CONFIG_USER_ONLY)
353 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
354 (env
->eflags
& IF_MASK
) &&
355 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
357 /* FIXME: this should respect TPR */
358 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
360 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
361 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
362 do_interrupt_x86_hardirq(env
, intno
, 1);
363 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
368 #elif defined(TARGET_PPC)
369 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
372 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
373 ppc_hw_interrupt(env
);
374 if (env
->pending_interrupts
== 0) {
375 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
379 #elif defined(TARGET_LM32)
380 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
381 && (env
->ie
& IE_IE
)) {
382 env
->exception_index
= EXCP_IRQ
;
386 #elif defined(TARGET_MICROBLAZE)
387 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
388 && (env
->sregs
[SR_MSR
] & MSR_IE
)
389 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
390 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
391 env
->exception_index
= EXCP_IRQ
;
395 #elif defined(TARGET_MIPS)
396 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
397 cpu_mips_hw_interrupts_pending(env
)) {
399 env
->exception_index
= EXCP_EXT_INTERRUPT
;
404 #elif defined(TARGET_OPENRISC)
407 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
408 && (env
->sr
& SR_IEE
)) {
411 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
412 && (env
->sr
& SR_TEE
)) {
416 env
->exception_index
= idx
;
421 #elif defined(TARGET_SPARC)
422 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
423 if (cpu_interrupts_enabled(env
) &&
424 env
->interrupt_index
> 0) {
425 int pil
= env
->interrupt_index
& 0xf;
426 int type
= env
->interrupt_index
& 0xf0;
428 if (((type
== TT_EXTINT
) &&
429 cpu_pil_allowed(env
, pil
)) ||
431 env
->exception_index
= env
->interrupt_index
;
437 #elif defined(TARGET_ARM)
438 if (interrupt_request
& CPU_INTERRUPT_FIQ
439 && !(env
->uncached_cpsr
& CPSR_F
)) {
440 env
->exception_index
= EXCP_FIQ
;
444 /* ARMv7-M interrupt return works by loading a magic value
445 into the PC. On real hardware the load causes the
446 return to occur. The qemu implementation performs the
447 jump normally, then does the exception return when the
448 CPU tries to execute code at the magic address.
449 This will cause the magic PC value to be pushed to
450 the stack if an interrupt occurred at the wrong time.
451 We avoid this by disabling interrupts when
452 pc contains a magic address. */
453 if (interrupt_request
& CPU_INTERRUPT_HARD
454 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
455 || !(env
->uncached_cpsr
& CPSR_I
))) {
456 env
->exception_index
= EXCP_IRQ
;
460 #elif defined(TARGET_UNICORE32)
461 if (interrupt_request
& CPU_INTERRUPT_HARD
462 && !(env
->uncached_asr
& ASR_I
)) {
463 env
->exception_index
= UC32_EXCP_INTR
;
467 #elif defined(TARGET_SH4)
468 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
472 #elif defined(TARGET_ALPHA)
475 /* ??? This hard-codes the OSF/1 interrupt levels. */
476 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
478 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
479 idx
= EXCP_DEV_INTERRUPT
;
483 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
484 idx
= EXCP_CLK_INTERRUPT
;
488 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
489 idx
= EXCP_SMP_INTERRUPT
;
493 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
498 env
->exception_index
= idx
;
504 #elif defined(TARGET_CRIS)
505 if (interrupt_request
& CPU_INTERRUPT_HARD
506 && (env
->pregs
[PR_CCS
] & I_FLAG
)
507 && !env
->locked_irq
) {
508 env
->exception_index
= EXCP_IRQ
;
512 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
513 unsigned int m_flag_archval
;
514 if (env
->pregs
[PR_VR
] < 32) {
515 m_flag_archval
= M_FLAG_V10
;
517 m_flag_archval
= M_FLAG_V32
;
519 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
520 env
->exception_index
= EXCP_NMI
;
525 #elif defined(TARGET_M68K)
526 if (interrupt_request
& CPU_INTERRUPT_HARD
527 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
528 < env
->pending_level
) {
529 /* Real hardware gets the interrupt vector via an
530 IACK cycle at this point. Current emulated
531 hardware doesn't rely on this, so we
532 provide/save the vector when the interrupt is
534 env
->exception_index
= env
->pending_vector
;
535 do_interrupt_m68k_hardirq(env
);
538 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
539 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
540 (env
->psw
.mask
& PSW_MASK_EXT
)) {
544 #elif defined(TARGET_XTENSA)
545 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
546 env
->exception_index
= EXC_IRQ
;
551 /* Don't use the cached interrupt_request value,
552 do_interrupt may have updated the EXITTB flag. */
553 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
554 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
555 /* ensure that no TB jump will be modified as
556 the program flow was changed */
560 if (unlikely(cpu
->exit_request
)) {
561 cpu
->exit_request
= 0;
562 env
->exception_index
= EXCP_INTERRUPT
;
565 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
566 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
567 /* restore flags in standard format */
568 #if defined(TARGET_I386)
569 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
571 log_cpu_state(env
, CPU_DUMP_CCOP
);
572 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
573 #elif defined(TARGET_M68K)
574 cpu_m68k_flush_flags(env
, env
->cc_op
);
575 env
->cc_op
= CC_OP_FLAGS
;
576 env
->sr
= (env
->sr
& 0xffe0)
577 | env
->cc_dest
| (env
->cc_x
<< 4);
578 log_cpu_state(env
, 0);
580 log_cpu_state(env
, 0);
583 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
584 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
585 tb
= tb_find_fast(env
);
586 /* Note: we do it here to avoid a gcc bug on Mac OS X when
587 doing it in tb_find_slow */
588 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
589 /* as some TB could have been invalidated because
590 of memory exceptions while generating the code, we
591 must recompute the hash index here */
593 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
595 #ifdef CONFIG_DEBUG_EXEC
596 qemu_log_mask(CPU_LOG_EXEC
, "Trace %p [" TARGET_FMT_lx
"] %s\n",
598 lookup_symbol(tb
->pc
));
600 /* see if we can patch the calling TB. When the TB
601 spans two pages, we cannot safely do a direct
603 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
604 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
605 next_tb
& TB_EXIT_MASK
, tb
);
607 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
609 /* cpu_interrupt might be called while translating the
610 TB, but before it is linked into a potentially
611 infinite loop and becomes env->current_tb. Avoid
612 starting execution if there is a pending interrupt. */
613 cpu
->current_tb
= tb
;
615 if (likely(!cpu
->exit_request
)) {
617 /* execute the generated code */
618 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
619 switch (next_tb
& TB_EXIT_MASK
) {
620 case TB_EXIT_REQUESTED
:
621 /* Something asked us to stop executing
622 * chained TBs; just continue round the main
623 * loop. Whatever requested the exit will also
624 * have set something else (eg exit_request or
625 * interrupt_request) which we will handle
626 * next time around the loop.
628 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
631 case TB_EXIT_ICOUNT_EXPIRED
:
633 /* Instruction counter expired. */
635 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
636 insns_left
= env
->icount_decr
.u32
;
637 if (env
->icount_extra
&& insns_left
>= 0) {
638 /* Refill decrementer and continue execution. */
639 env
->icount_extra
+= insns_left
;
640 if (env
->icount_extra
> 0xffff) {
643 insns_left
= env
->icount_extra
;
645 env
->icount_extra
-= insns_left
;
646 env
->icount_decr
.u16
.low
= insns_left
;
648 if (insns_left
> 0) {
649 /* Execute remaining instructions. */
650 cpu_exec_nocache(env
, insns_left
, tb
);
652 env
->exception_index
= EXCP_INTERRUPT
;
662 cpu
->current_tb
= NULL
;
663 /* reset soft MMU for next block (it can currently
664 only be set by a memory fault) */
667 /* Reload env after longjmp - the compiler may have smashed all
668 * local variables as longjmp is marked 'noreturn'. */
669 env
= cpu_single_env
;
674 #if defined(TARGET_I386)
675 /* restore flags in standard format */
676 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
678 #elif defined(TARGET_ARM)
679 /* XXX: Save/restore host fpu exception state?. */
680 #elif defined(TARGET_UNICORE32)
681 #elif defined(TARGET_SPARC)
682 #elif defined(TARGET_PPC)
683 #elif defined(TARGET_LM32)
684 #elif defined(TARGET_M68K)
685 cpu_m68k_flush_flags(env
, env
->cc_op
);
686 env
->cc_op
= CC_OP_FLAGS
;
687 env
->sr
= (env
->sr
& 0xffe0)
688 | env
->cc_dest
| (env
->cc_x
<< 4);
689 #elif defined(TARGET_MICROBLAZE)
690 #elif defined(TARGET_MIPS)
691 #elif defined(TARGET_OPENRISC)
692 #elif defined(TARGET_SH4)
693 #elif defined(TARGET_ALPHA)
694 #elif defined(TARGET_CRIS)
695 #elif defined(TARGET_S390X)
696 #elif defined(TARGET_XTENSA)
699 #error unsupported target CPU
702 /* fail safe : never use cpu_single_env outside cpu_exec() */
703 cpu_single_env
= NULL
;