2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 void cpu_loop_exit(CPUArchState
*env
)
28 CPUState
*cpu
= ENV_GET_CPU(env
);
30 cpu
->current_tb
= NULL
;
31 siglongjmp(env
->jmp_env
, 1);
34 /* exit the current TB from a signal handler. The host registers are
35 restored in a state compatible with the CPU emulator
37 #if defined(CONFIG_SOFTMMU)
38 void cpu_resume_from_signal(CPUArchState
*env
, void *puc
)
40 /* XXX: restore cpu registers saved in host registers */
42 env
->exception_index
= -1;
43 siglongjmp(env
->jmp_env
, 1);
47 /* Execute a TB, and fix up the CPU state afterwards if necessary */
48 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
50 CPUArchState
*env
= cpu
->env_ptr
;
53 #if defined(DEBUG_DISAS)
54 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
55 #if defined(TARGET_I386)
56 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
57 #elif defined(TARGET_M68K)
58 /* ??? Should not modify env state for dumping. */
59 cpu_m68k_flush_flags(env
, env
->cc_op
);
60 env
->cc_op
= CC_OP_FLAGS
;
61 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
62 log_cpu_state(cpu
, 0);
64 log_cpu_state(cpu
, 0);
67 #endif /* DEBUG_DISAS */
69 next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
70 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
71 /* We didn't start executing this TB (eg because the instruction
72 * counter hit zero); we must restore the guest PC to the address
73 * of the start of the TB.
75 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
76 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
77 if (cc
->synchronize_from_tb
) {
78 cc
->synchronize_from_tb(cpu
, tb
);
81 cc
->set_pc(cpu
, tb
->pc
);
84 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
85 /* We were asked to stop executing TBs (probably a pending
86 * interrupt. We've now stopped, so clear the flag.
88 cpu
->tcg_exit_req
= 0;
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
96 TranslationBlock
*orig_tb
)
98 CPUState
*cpu
= ENV_GET_CPU(env
);
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles
> CF_COUNT_MASK
)
104 max_cycles
= CF_COUNT_MASK
;
106 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
108 cpu
->current_tb
= tb
;
109 /* execute the generated code */
110 cpu_tb_exec(cpu
, tb
->tc_ptr
);
111 cpu
->current_tb
= NULL
;
112 tb_phys_invalidate(tb
, -1);
116 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
118 target_ulong cs_base
,
121 TranslationBlock
*tb
, **ptb1
;
123 tb_page_addr_t phys_pc
, phys_page1
;
124 target_ulong virt_page2
;
126 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
128 /* find translated block using physical mappings */
129 phys_pc
= get_page_addr_code(env
, pc
);
130 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
131 h
= tb_phys_hash_func(phys_pc
);
132 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
138 tb
->page_addr
[0] == phys_page1
&&
139 tb
->cs_base
== cs_base
&&
140 tb
->flags
== flags
) {
141 /* check next page if needed */
142 if (tb
->page_addr
[1] != -1) {
143 tb_page_addr_t phys_page2
;
145 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
147 phys_page2
= get_page_addr_code(env
, virt_page2
);
148 if (tb
->page_addr
[1] == phys_page2
)
154 ptb1
= &tb
->phys_hash_next
;
157 /* if no translated code available, then translate it now */
158 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
161 /* Move the last found TB to the head of the list */
163 *ptb1
= tb
->phys_hash_next
;
164 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
165 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
167 /* we add the TB in the virtual pc hash table */
168 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
172 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
174 TranslationBlock
*tb
;
175 target_ulong cs_base
, pc
;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
181 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
182 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
183 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
184 tb
->flags
!= flags
)) {
185 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
190 static CPUDebugExcpHandler
*debug_excp_handler
;
192 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
194 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUArchState
*env
)
201 if (!env
->watchpoint_hit
) {
202 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
203 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
206 if (debug_excp_handler
) {
207 debug_excp_handler(env
);
211 /* main execution loop */
213 volatile sig_atomic_t exit_request
;
215 int cpu_exec(CPUArchState
*env
)
217 CPUState
*cpu
= ENV_GET_CPU(env
);
218 #if !(defined(CONFIG_USER_ONLY) && \
219 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
220 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
223 X86CPU
*x86_cpu
= X86_CPU(cpu
);
225 int ret
, interrupt_request
;
226 TranslationBlock
*tb
;
231 if (!cpu_has_work(cpu
)) {
240 /* As long as current_cpu is null, up to the assignment just above,
241 * requests by other threads to exit the execution loop are expected to
242 * be issued using the exit_request global. We must make sure that our
243 * evaluation of the global value is performed past the current_cpu
244 * value transition point, which requires a memory barrier as well as
245 * an instruction scheduling constraint on modern architectures. */
248 if (unlikely(exit_request
)) {
249 cpu
->exit_request
= 1;
252 #if defined(TARGET_I386)
253 /* put eflags in CPU temporary format */
254 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
255 env
->df
= 1 - (2 * ((env
->eflags
>> 10) & 1));
256 CC_OP
= CC_OP_EFLAGS
;
257 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
258 #elif defined(TARGET_SPARC)
259 #elif defined(TARGET_M68K)
260 env
->cc_op
= CC_OP_FLAGS
;
261 env
->cc_dest
= env
->sr
& 0xf;
262 env
->cc_x
= (env
->sr
>> 4) & 1;
263 #elif defined(TARGET_ALPHA)
264 #elif defined(TARGET_ARM)
265 #elif defined(TARGET_UNICORE32)
266 #elif defined(TARGET_PPC)
267 env
->reserve_addr
= -1;
268 #elif defined(TARGET_LM32)
269 #elif defined(TARGET_MICROBLAZE)
270 #elif defined(TARGET_MIPS)
271 #elif defined(TARGET_MOXIE)
272 #elif defined(TARGET_OPENRISC)
273 #elif defined(TARGET_SH4)
274 #elif defined(TARGET_CRIS)
275 #elif defined(TARGET_S390X)
276 #elif defined(TARGET_XTENSA)
279 #error unsupported target CPU
281 env
->exception_index
= -1;
283 /* prepare setjmp context for exception handling */
285 if (sigsetjmp(env
->jmp_env
, 0) == 0) {
286 /* if an exception is pending, we execute it here */
287 if (env
->exception_index
>= 0) {
288 if (env
->exception_index
>= EXCP_INTERRUPT
) {
289 /* exit request from the cpu execution loop */
290 ret
= env
->exception_index
;
291 if (ret
== EXCP_DEBUG
) {
292 cpu_handle_debug_exception(env
);
296 #if defined(CONFIG_USER_ONLY)
297 /* if user mode only, we simulate a fake exception
298 which will be handled outside the cpu execution
300 #if defined(TARGET_I386)
301 cc
->do_interrupt(cpu
);
303 ret
= env
->exception_index
;
306 cc
->do_interrupt(cpu
);
307 env
->exception_index
= -1;
312 next_tb
= 0; /* force lookup of first TB */
314 interrupt_request
= cpu
->interrupt_request
;
315 if (unlikely(interrupt_request
)) {
316 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
317 /* Mask out external interrupts for this step. */
318 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
320 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
321 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
322 env
->exception_index
= EXCP_DEBUG
;
325 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
326 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
327 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
328 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
329 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
331 env
->exception_index
= EXCP_HLT
;
335 #if defined(TARGET_I386)
336 #if !defined(CONFIG_USER_ONLY)
337 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
338 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
339 apic_poll_irq(x86_cpu
->apic_state
);
342 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
343 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
,
345 do_cpu_init(x86_cpu
);
346 env
->exception_index
= EXCP_HALTED
;
348 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
349 do_cpu_sipi(x86_cpu
);
350 } else if (env
->hflags2
& HF2_GIF_MASK
) {
351 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
352 !(env
->hflags
& HF_SMM_MASK
)) {
353 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
355 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
356 do_smm_enter(x86_cpu
);
358 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
359 !(env
->hflags2
& HF2_NMI_MASK
)) {
360 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
361 env
->hflags2
|= HF2_NMI_MASK
;
362 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
364 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
365 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
366 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
368 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
369 (((env
->hflags2
& HF2_VINTR_MASK
) &&
370 (env
->hflags2
& HF2_HIF_MASK
)) ||
371 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
372 (env
->eflags
& IF_MASK
&&
373 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
375 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
377 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
379 intno
= cpu_get_pic_interrupt(env
);
380 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
381 do_interrupt_x86_hardirq(env
, intno
, 1);
382 /* ensure that no TB jump will be modified as
383 the program flow was changed */
385 #if !defined(CONFIG_USER_ONLY)
386 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
387 (env
->eflags
& IF_MASK
) &&
388 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
390 /* FIXME: this should respect TPR */
391 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
393 intno
= ldl_phys(cpu
->as
,
395 + offsetof(struct vmcb
,
396 control
.int_vector
));
397 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
398 do_interrupt_x86_hardirq(env
, intno
, 1);
399 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
404 #elif defined(TARGET_PPC)
405 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
408 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
409 ppc_hw_interrupt(env
);
410 if (env
->pending_interrupts
== 0) {
411 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
415 #elif defined(TARGET_LM32)
416 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
417 && (env
->ie
& IE_IE
)) {
418 env
->exception_index
= EXCP_IRQ
;
419 cc
->do_interrupt(cpu
);
422 #elif defined(TARGET_MICROBLAZE)
423 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
424 && (env
->sregs
[SR_MSR
] & MSR_IE
)
425 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
426 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
427 env
->exception_index
= EXCP_IRQ
;
428 cc
->do_interrupt(cpu
);
431 #elif defined(TARGET_MIPS)
432 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
433 cpu_mips_hw_interrupts_pending(env
)) {
435 env
->exception_index
= EXCP_EXT_INTERRUPT
;
437 cc
->do_interrupt(cpu
);
440 #elif defined(TARGET_OPENRISC)
443 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
444 && (env
->sr
& SR_IEE
)) {
447 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
448 && (env
->sr
& SR_TEE
)) {
452 env
->exception_index
= idx
;
453 cc
->do_interrupt(cpu
);
457 #elif defined(TARGET_SPARC)
458 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
459 if (cpu_interrupts_enabled(env
) &&
460 env
->interrupt_index
> 0) {
461 int pil
= env
->interrupt_index
& 0xf;
462 int type
= env
->interrupt_index
& 0xf0;
464 if (((type
== TT_EXTINT
) &&
465 cpu_pil_allowed(env
, pil
)) ||
467 env
->exception_index
= env
->interrupt_index
;
468 cc
->do_interrupt(cpu
);
473 #elif defined(TARGET_ARM)
474 if (interrupt_request
& CPU_INTERRUPT_FIQ
475 && !(env
->daif
& PSTATE_F
)) {
476 env
->exception_index
= EXCP_FIQ
;
477 cc
->do_interrupt(cpu
);
480 /* ARMv7-M interrupt return works by loading a magic value
481 into the PC. On real hardware the load causes the
482 return to occur. The qemu implementation performs the
483 jump normally, then does the exception return when the
484 CPU tries to execute code at the magic address.
485 This will cause the magic PC value to be pushed to
486 the stack if an interrupt occurred at the wrong time.
487 We avoid this by disabling interrupts when
488 pc contains a magic address. */
489 if (interrupt_request
& CPU_INTERRUPT_HARD
490 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
491 || !(env
->daif
& PSTATE_I
))) {
492 env
->exception_index
= EXCP_IRQ
;
493 cc
->do_interrupt(cpu
);
496 #elif defined(TARGET_UNICORE32)
497 if (interrupt_request
& CPU_INTERRUPT_HARD
498 && !(env
->uncached_asr
& ASR_I
)) {
499 env
->exception_index
= UC32_EXCP_INTR
;
500 cc
->do_interrupt(cpu
);
503 #elif defined(TARGET_SH4)
504 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
505 cc
->do_interrupt(cpu
);
508 #elif defined(TARGET_ALPHA)
511 /* ??? This hard-codes the OSF/1 interrupt levels. */
512 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
514 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
515 idx
= EXCP_DEV_INTERRUPT
;
519 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
520 idx
= EXCP_CLK_INTERRUPT
;
524 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
525 idx
= EXCP_SMP_INTERRUPT
;
529 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
534 env
->exception_index
= idx
;
536 cc
->do_interrupt(cpu
);
540 #elif defined(TARGET_CRIS)
541 if (interrupt_request
& CPU_INTERRUPT_HARD
542 && (env
->pregs
[PR_CCS
] & I_FLAG
)
543 && !env
->locked_irq
) {
544 env
->exception_index
= EXCP_IRQ
;
545 cc
->do_interrupt(cpu
);
548 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
549 unsigned int m_flag_archval
;
550 if (env
->pregs
[PR_VR
] < 32) {
551 m_flag_archval
= M_FLAG_V10
;
553 m_flag_archval
= M_FLAG_V32
;
555 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
556 env
->exception_index
= EXCP_NMI
;
557 cc
->do_interrupt(cpu
);
561 #elif defined(TARGET_M68K)
562 if (interrupt_request
& CPU_INTERRUPT_HARD
563 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
564 < env
->pending_level
) {
565 /* Real hardware gets the interrupt vector via an
566 IACK cycle at this point. Current emulated
567 hardware doesn't rely on this, so we
568 provide/save the vector when the interrupt is
570 env
->exception_index
= env
->pending_vector
;
571 do_interrupt_m68k_hardirq(env
);
574 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
575 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
576 (env
->psw
.mask
& PSW_MASK_EXT
)) {
577 cc
->do_interrupt(cpu
);
580 #elif defined(TARGET_XTENSA)
581 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
582 env
->exception_index
= EXC_IRQ
;
583 cc
->do_interrupt(cpu
);
587 /* Don't use the cached interrupt_request value,
588 do_interrupt may have updated the EXITTB flag. */
589 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
590 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
591 /* ensure that no TB jump will be modified as
592 the program flow was changed */
596 if (unlikely(cpu
->exit_request
)) {
597 cpu
->exit_request
= 0;
598 env
->exception_index
= EXCP_INTERRUPT
;
601 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
602 tb
= tb_find_fast(env
);
603 /* Note: we do it here to avoid a gcc bug on Mac OS X when
604 doing it in tb_find_slow */
605 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
606 /* as some TB could have been invalidated because
607 of memory exceptions while generating the code, we
608 must recompute the hash index here */
610 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
612 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
613 qemu_log("Trace %p [" TARGET_FMT_lx
"] %s\n",
614 tb
->tc_ptr
, tb
->pc
, lookup_symbol(tb
->pc
));
616 /* see if we can patch the calling TB. When the TB
617 spans two pages, we cannot safely do a direct
619 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
620 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
621 next_tb
& TB_EXIT_MASK
, tb
);
623 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
625 /* cpu_interrupt might be called while translating the
626 TB, but before it is linked into a potentially
627 infinite loop and becomes env->current_tb. Avoid
628 starting execution if there is a pending interrupt. */
629 cpu
->current_tb
= tb
;
631 if (likely(!cpu
->exit_request
)) {
633 /* execute the generated code */
634 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
635 switch (next_tb
& TB_EXIT_MASK
) {
636 case TB_EXIT_REQUESTED
:
637 /* Something asked us to stop executing
638 * chained TBs; just continue round the main
639 * loop. Whatever requested the exit will also
640 * have set something else (eg exit_request or
641 * interrupt_request) which we will handle
642 * next time around the loop.
644 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
647 case TB_EXIT_ICOUNT_EXPIRED
:
649 /* Instruction counter expired. */
651 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
652 insns_left
= env
->icount_decr
.u32
;
653 if (env
->icount_extra
&& insns_left
>= 0) {
654 /* Refill decrementer and continue execution. */
655 env
->icount_extra
+= insns_left
;
656 if (env
->icount_extra
> 0xffff) {
659 insns_left
= env
->icount_extra
;
661 env
->icount_extra
-= insns_left
;
662 env
->icount_decr
.u16
.low
= insns_left
;
664 if (insns_left
> 0) {
665 /* Execute remaining instructions. */
666 cpu_exec_nocache(env
, insns_left
, tb
);
668 env
->exception_index
= EXCP_INTERRUPT
;
678 cpu
->current_tb
= NULL
;
679 /* reset soft MMU for next block (it can currently
680 only be set by a memory fault) */
683 /* Reload env after longjmp - the compiler may have smashed all
684 * local variables as longjmp is marked 'noreturn'. */
687 #if !(defined(CONFIG_USER_ONLY) && \
688 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
689 cc
= CPU_GET_CLASS(cpu
);
692 x86_cpu
= X86_CPU(cpu
);
698 #if defined(TARGET_I386)
699 /* restore flags in standard format */
700 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
701 | (env
->df
& DF_MASK
);
702 #elif defined(TARGET_ARM)
703 /* XXX: Save/restore host fpu exception state?. */
704 #elif defined(TARGET_UNICORE32)
705 #elif defined(TARGET_SPARC)
706 #elif defined(TARGET_PPC)
707 #elif defined(TARGET_LM32)
708 #elif defined(TARGET_M68K)
709 cpu_m68k_flush_flags(env
, env
->cc_op
);
710 env
->cc_op
= CC_OP_FLAGS
;
711 env
->sr
= (env
->sr
& 0xffe0)
712 | env
->cc_dest
| (env
->cc_x
<< 4);
713 #elif defined(TARGET_MICROBLAZE)
714 #elif defined(TARGET_MIPS)
715 #elif defined(TARGET_MOXIE)
716 #elif defined(TARGET_OPENRISC)
717 #elif defined(TARGET_SH4)
718 #elif defined(TARGET_ALPHA)
719 #elif defined(TARGET_CRIS)
720 #elif defined(TARGET_S390X)
721 #elif defined(TARGET_XTENSA)
724 #error unsupported target CPU
727 /* fail safe : never use current_cpu outside cpu_exec() */