2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 void cpu_loop_exit(CPUState
*cpu
)
28 cpu
->current_tb
= NULL
;
29 siglongjmp(cpu
->jmp_env
, 1);
32 /* exit the current TB from a signal handler. The host registers are
33 restored in a state compatible with the CPU emulator
35 #if defined(CONFIG_SOFTMMU)
36 void cpu_resume_from_signal(CPUState
*cpu
, void *puc
)
38 /* XXX: restore cpu registers saved in host registers */
40 cpu
->exception_index
= -1;
41 siglongjmp(cpu
->jmp_env
, 1);
45 /* Execute a TB, and fix up the CPU state afterwards if necessary */
46 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
48 CPUArchState
*env
= cpu
->env_ptr
;
51 #if defined(DEBUG_DISAS)
52 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
53 #if defined(TARGET_I386)
54 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
55 #elif defined(TARGET_M68K)
56 /* ??? Should not modify env state for dumping. */
57 cpu_m68k_flush_flags(env
, env
->cc_op
);
58 env
->cc_op
= CC_OP_FLAGS
;
59 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
60 log_cpu_state(cpu
, 0);
62 log_cpu_state(cpu
, 0);
65 #endif /* DEBUG_DISAS */
67 next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
68 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
69 /* We didn't start executing this TB (eg because the instruction
70 * counter hit zero); we must restore the guest PC to the address
71 * of the start of the TB.
73 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
74 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
75 if (cc
->synchronize_from_tb
) {
76 cc
->synchronize_from_tb(cpu
, tb
);
79 cc
->set_pc(cpu
, tb
->pc
);
82 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
83 /* We were asked to stop executing TBs (probably a pending
84 * interrupt. We've now stopped, so clear the flag.
86 cpu
->tcg_exit_req
= 0;
91 /* Execute the code without caching the generated code. An interpreter
92 could be used if available. */
93 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
94 TranslationBlock
*orig_tb
)
96 CPUState
*cpu
= ENV_GET_CPU(env
);
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles
> CF_COUNT_MASK
)
102 max_cycles
= CF_COUNT_MASK
;
104 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
106 cpu
->current_tb
= tb
;
107 /* execute the generated code */
108 cpu_tb_exec(cpu
, tb
->tc_ptr
);
109 cpu
->current_tb
= NULL
;
110 tb_phys_invalidate(tb
, -1);
114 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
116 target_ulong cs_base
,
119 CPUState
*cpu
= ENV_GET_CPU(env
);
120 TranslationBlock
*tb
, **ptb1
;
122 tb_page_addr_t phys_pc
, phys_page1
;
123 target_ulong virt_page2
;
125 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
127 /* find translated block using physical mappings */
128 phys_pc
= get_page_addr_code(env
, pc
);
129 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
130 h
= tb_phys_hash_func(phys_pc
);
131 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
137 tb
->page_addr
[0] == phys_page1
&&
138 tb
->cs_base
== cs_base
&&
139 tb
->flags
== flags
) {
140 /* check next page if needed */
141 if (tb
->page_addr
[1] != -1) {
142 tb_page_addr_t phys_page2
;
144 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
146 phys_page2
= get_page_addr_code(env
, virt_page2
);
147 if (tb
->page_addr
[1] == phys_page2
)
153 ptb1
= &tb
->phys_hash_next
;
156 /* if no translated code available, then translate it now */
157 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, 0);
160 /* Move the last found TB to the head of the list */
162 *ptb1
= tb
->phys_hash_next
;
163 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
164 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
166 /* we add the TB in the virtual pc hash table */
167 cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
171 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
173 CPUState
*cpu
= ENV_GET_CPU(env
);
174 TranslationBlock
*tb
;
175 target_ulong cs_base
, pc
;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
181 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
182 tb
= cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
183 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
184 tb
->flags
!= flags
)) {
185 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
190 static CPUDebugExcpHandler
*debug_excp_handler
;
192 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
194 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUArchState
*env
)
199 CPUState
*cpu
= ENV_GET_CPU(env
);
202 if (!cpu
->watchpoint_hit
) {
203 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
204 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
207 if (debug_excp_handler
) {
208 debug_excp_handler(env
);
212 /* main execution loop */
214 volatile sig_atomic_t exit_request
;
216 int cpu_exec(CPUArchState
*env
)
218 CPUState
*cpu
= ENV_GET_CPU(env
);
219 #if !(defined(CONFIG_USER_ONLY) && \
220 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
221 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
224 X86CPU
*x86_cpu
= X86_CPU(cpu
);
226 int ret
, interrupt_request
;
227 TranslationBlock
*tb
;
230 /* This must be volatile so it is not trashed by longjmp() */
231 volatile bool have_tb_lock
= false;
234 if (!cpu_has_work(cpu
)) {
243 /* As long as current_cpu is null, up to the assignment just above,
244 * requests by other threads to exit the execution loop are expected to
245 * be issued using the exit_request global. We must make sure that our
246 * evaluation of the global value is performed past the current_cpu
247 * value transition point, which requires a memory barrier as well as
248 * an instruction scheduling constraint on modern architectures. */
251 if (unlikely(exit_request
)) {
252 cpu
->exit_request
= 1;
255 #if defined(TARGET_I386)
256 /* put eflags in CPU temporary format */
257 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
258 env
->df
= 1 - (2 * ((env
->eflags
>> 10) & 1));
259 CC_OP
= CC_OP_EFLAGS
;
260 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
261 #elif defined(TARGET_SPARC)
262 #elif defined(TARGET_M68K)
263 env
->cc_op
= CC_OP_FLAGS
;
264 env
->cc_dest
= env
->sr
& 0xf;
265 env
->cc_x
= (env
->sr
>> 4) & 1;
266 #elif defined(TARGET_ALPHA)
267 #elif defined(TARGET_ARM)
268 #elif defined(TARGET_UNICORE32)
269 #elif defined(TARGET_PPC)
270 env
->reserve_addr
= -1;
271 #elif defined(TARGET_LM32)
272 #elif defined(TARGET_MICROBLAZE)
273 #elif defined(TARGET_MIPS)
274 #elif defined(TARGET_MOXIE)
275 #elif defined(TARGET_OPENRISC)
276 #elif defined(TARGET_SH4)
277 #elif defined(TARGET_CRIS)
278 #elif defined(TARGET_S390X)
279 #elif defined(TARGET_XTENSA)
282 #error unsupported target CPU
284 cpu
->exception_index
= -1;
286 /* prepare setjmp context for exception handling */
288 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
289 /* if an exception is pending, we execute it here */
290 if (cpu
->exception_index
>= 0) {
291 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
292 /* exit request from the cpu execution loop */
293 ret
= cpu
->exception_index
;
294 if (ret
== EXCP_DEBUG
) {
295 cpu_handle_debug_exception(env
);
299 #if defined(CONFIG_USER_ONLY)
300 /* if user mode only, we simulate a fake exception
301 which will be handled outside the cpu execution
303 #if defined(TARGET_I386)
304 cc
->do_interrupt(cpu
);
306 ret
= cpu
->exception_index
;
309 cc
->do_interrupt(cpu
);
310 cpu
->exception_index
= -1;
315 next_tb
= 0; /* force lookup of first TB */
317 interrupt_request
= cpu
->interrupt_request
;
318 if (unlikely(interrupt_request
)) {
319 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
320 /* Mask out external interrupts for this step. */
321 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
323 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
324 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
325 cpu
->exception_index
= EXCP_DEBUG
;
328 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
329 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
330 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
331 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
332 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
334 cpu
->exception_index
= EXCP_HLT
;
338 #if defined(TARGET_I386)
339 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
340 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0);
341 do_cpu_init(x86_cpu
);
342 cpu
->exception_index
= EXCP_HALTED
;
346 if (interrupt_request
& CPU_INTERRUPT_RESET
) {
350 #if defined(TARGET_I386)
351 #if !defined(CONFIG_USER_ONLY)
352 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
353 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
354 apic_poll_irq(x86_cpu
->apic_state
);
357 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
358 do_cpu_sipi(x86_cpu
);
359 } else if (env
->hflags2
& HF2_GIF_MASK
) {
360 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
361 !(env
->hflags
& HF_SMM_MASK
)) {
362 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
364 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
365 do_smm_enter(x86_cpu
);
367 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
368 !(env
->hflags2
& HF2_NMI_MASK
)) {
369 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
370 env
->hflags2
|= HF2_NMI_MASK
;
371 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
373 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
374 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
375 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
377 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
378 (((env
->hflags2
& HF2_VINTR_MASK
) &&
379 (env
->hflags2
& HF2_HIF_MASK
)) ||
380 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
381 (env
->eflags
& IF_MASK
&&
382 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
384 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
386 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
388 intno
= cpu_get_pic_interrupt(env
);
389 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
390 do_interrupt_x86_hardirq(env
, intno
, 1);
391 /* ensure that no TB jump will be modified as
392 the program flow was changed */
394 #if !defined(CONFIG_USER_ONLY)
395 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
396 (env
->eflags
& IF_MASK
) &&
397 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
399 /* FIXME: this should respect TPR */
400 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
402 intno
= ldl_phys(cpu
->as
,
404 + offsetof(struct vmcb
,
405 control
.int_vector
));
406 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
407 do_interrupt_x86_hardirq(env
, intno
, 1);
408 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
413 #elif defined(TARGET_PPC)
414 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
415 ppc_hw_interrupt(env
);
416 if (env
->pending_interrupts
== 0) {
417 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
421 #elif defined(TARGET_LM32)
422 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
423 && (env
->ie
& IE_IE
)) {
424 cpu
->exception_index
= EXCP_IRQ
;
425 cc
->do_interrupt(cpu
);
428 #elif defined(TARGET_MICROBLAZE)
429 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
430 && (env
->sregs
[SR_MSR
] & MSR_IE
)
431 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
432 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
433 cpu
->exception_index
= EXCP_IRQ
;
434 cc
->do_interrupt(cpu
);
437 #elif defined(TARGET_MIPS)
438 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
439 cpu_mips_hw_interrupts_pending(env
)) {
441 cpu
->exception_index
= EXCP_EXT_INTERRUPT
;
443 cc
->do_interrupt(cpu
);
446 #elif defined(TARGET_OPENRISC)
449 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
450 && (env
->sr
& SR_IEE
)) {
453 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
454 && (env
->sr
& SR_TEE
)) {
458 cpu
->exception_index
= idx
;
459 cc
->do_interrupt(cpu
);
463 #elif defined(TARGET_SPARC)
464 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
465 if (cpu_interrupts_enabled(env
) &&
466 env
->interrupt_index
> 0) {
467 int pil
= env
->interrupt_index
& 0xf;
468 int type
= env
->interrupt_index
& 0xf0;
470 if (((type
== TT_EXTINT
) &&
471 cpu_pil_allowed(env
, pil
)) ||
473 cpu
->exception_index
= env
->interrupt_index
;
474 cc
->do_interrupt(cpu
);
479 #elif defined(TARGET_ARM)
480 if (interrupt_request
& CPU_INTERRUPT_FIQ
481 && !(env
->daif
& PSTATE_F
)) {
482 cpu
->exception_index
= EXCP_FIQ
;
483 cc
->do_interrupt(cpu
);
486 /* ARMv7-M interrupt return works by loading a magic value
487 into the PC. On real hardware the load causes the
488 return to occur. The qemu implementation performs the
489 jump normally, then does the exception return when the
490 CPU tries to execute code at the magic address.
491 This will cause the magic PC value to be pushed to
492 the stack if an interrupt occurred at the wrong time.
493 We avoid this by disabling interrupts when
494 pc contains a magic address. */
495 if (interrupt_request
& CPU_INTERRUPT_HARD
496 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
497 || !(env
->daif
& PSTATE_I
))) {
498 cpu
->exception_index
= EXCP_IRQ
;
499 cc
->do_interrupt(cpu
);
502 #elif defined(TARGET_UNICORE32)
503 if (interrupt_request
& CPU_INTERRUPT_HARD
504 && !(env
->uncached_asr
& ASR_I
)) {
505 cpu
->exception_index
= UC32_EXCP_INTR
;
506 cc
->do_interrupt(cpu
);
509 #elif defined(TARGET_SH4)
510 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
511 cc
->do_interrupt(cpu
);
514 #elif defined(TARGET_ALPHA)
517 /* ??? This hard-codes the OSF/1 interrupt levels. */
518 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
520 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
521 idx
= EXCP_DEV_INTERRUPT
;
525 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
526 idx
= EXCP_CLK_INTERRUPT
;
530 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
531 idx
= EXCP_SMP_INTERRUPT
;
535 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
540 cpu
->exception_index
= idx
;
542 cc
->do_interrupt(cpu
);
546 #elif defined(TARGET_CRIS)
547 if (interrupt_request
& CPU_INTERRUPT_HARD
548 && (env
->pregs
[PR_CCS
] & I_FLAG
)
549 && !env
->locked_irq
) {
550 cpu
->exception_index
= EXCP_IRQ
;
551 cc
->do_interrupt(cpu
);
554 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
555 unsigned int m_flag_archval
;
556 if (env
->pregs
[PR_VR
] < 32) {
557 m_flag_archval
= M_FLAG_V10
;
559 m_flag_archval
= M_FLAG_V32
;
561 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
562 cpu
->exception_index
= EXCP_NMI
;
563 cc
->do_interrupt(cpu
);
567 #elif defined(TARGET_M68K)
568 if (interrupt_request
& CPU_INTERRUPT_HARD
569 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
570 < env
->pending_level
) {
571 /* Real hardware gets the interrupt vector via an
572 IACK cycle at this point. Current emulated
573 hardware doesn't rely on this, so we
574 provide/save the vector when the interrupt is
576 cpu
->exception_index
= env
->pending_vector
;
577 do_interrupt_m68k_hardirq(env
);
580 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
581 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
582 (env
->psw
.mask
& PSW_MASK_EXT
)) {
583 cc
->do_interrupt(cpu
);
586 #elif defined(TARGET_XTENSA)
587 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
588 cpu
->exception_index
= EXC_IRQ
;
589 cc
->do_interrupt(cpu
);
593 /* Don't use the cached interrupt_request value,
594 do_interrupt may have updated the EXITTB flag. */
595 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
596 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
597 /* ensure that no TB jump will be modified as
598 the program flow was changed */
602 if (unlikely(cpu
->exit_request
)) {
603 cpu
->exit_request
= 0;
604 cpu
->exception_index
= EXCP_INTERRUPT
;
607 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
609 tb
= tb_find_fast(env
);
610 /* Note: we do it here to avoid a gcc bug on Mac OS X when
611 doing it in tb_find_slow */
612 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
613 /* as some TB could have been invalidated because
614 of memory exceptions while generating the code, we
615 must recompute the hash index here */
617 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
619 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
620 qemu_log("Trace %p [" TARGET_FMT_lx
"] %s\n",
621 tb
->tc_ptr
, tb
->pc
, lookup_symbol(tb
->pc
));
623 /* see if we can patch the calling TB. When the TB
624 spans two pages, we cannot safely do a direct
626 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
627 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
628 next_tb
& TB_EXIT_MASK
, tb
);
630 have_tb_lock
= false;
631 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
633 /* cpu_interrupt might be called while translating the
634 TB, but before it is linked into a potentially
635 infinite loop and becomes env->current_tb. Avoid
636 starting execution if there is a pending interrupt. */
637 cpu
->current_tb
= tb
;
639 if (likely(!cpu
->exit_request
)) {
641 /* execute the generated code */
642 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
643 switch (next_tb
& TB_EXIT_MASK
) {
644 case TB_EXIT_REQUESTED
:
645 /* Something asked us to stop executing
646 * chained TBs; just continue round the main
647 * loop. Whatever requested the exit will also
648 * have set something else (eg exit_request or
649 * interrupt_request) which we will handle
650 * next time around the loop.
652 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
655 case TB_EXIT_ICOUNT_EXPIRED
:
657 /* Instruction counter expired. */
659 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
660 insns_left
= cpu
->icount_decr
.u32
;
661 if (cpu
->icount_extra
&& insns_left
>= 0) {
662 /* Refill decrementer and continue execution. */
663 cpu
->icount_extra
+= insns_left
;
664 if (cpu
->icount_extra
> 0xffff) {
667 insns_left
= cpu
->icount_extra
;
669 cpu
->icount_extra
-= insns_left
;
670 cpu
->icount_decr
.u16
.low
= insns_left
;
672 if (insns_left
> 0) {
673 /* Execute remaining instructions. */
674 cpu_exec_nocache(env
, insns_left
, tb
);
676 cpu
->exception_index
= EXCP_INTERRUPT
;
686 cpu
->current_tb
= NULL
;
687 /* reset soft MMU for next block (it can currently
688 only be set by a memory fault) */
691 /* Reload env after longjmp - the compiler may have smashed all
692 * local variables as longjmp is marked 'noreturn'. */
695 #if !(defined(CONFIG_USER_ONLY) && \
696 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
697 cc
= CPU_GET_CLASS(cpu
);
700 x86_cpu
= X86_CPU(cpu
);
703 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
704 have_tb_lock
= false;
710 #if defined(TARGET_I386)
711 /* restore flags in standard format */
712 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
713 | (env
->df
& DF_MASK
);
714 #elif defined(TARGET_ARM)
715 /* XXX: Save/restore host fpu exception state?. */
716 #elif defined(TARGET_UNICORE32)
717 #elif defined(TARGET_SPARC)
718 #elif defined(TARGET_PPC)
719 #elif defined(TARGET_LM32)
720 #elif defined(TARGET_M68K)
721 cpu_m68k_flush_flags(env
, env
->cc_op
);
722 env
->cc_op
= CC_OP_FLAGS
;
723 env
->sr
= (env
->sr
& 0xffe0)
724 | env
->cc_dest
| (env
->cc_x
<< 4);
725 #elif defined(TARGET_MICROBLAZE)
726 #elif defined(TARGET_MIPS)
727 #elif defined(TARGET_MOXIE)
728 #elif defined(TARGET_OPENRISC)
729 #elif defined(TARGET_SH4)
730 #elif defined(TARGET_ALPHA)
731 #elif defined(TARGET_CRIS)
732 #elif defined(TARGET_S390X)
733 #elif defined(TARGET_XTENSA)
736 #error unsupported target CPU
739 /* fail safe : never use current_cpu outside cpu_exec() */