2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 void cpu_loop_exit(CPUState
*cpu
)
28 cpu
->current_tb
= NULL
;
29 siglongjmp(cpu
->jmp_env
, 1);
32 /* exit the current TB from a signal handler. The host registers are
33 restored in a state compatible with the CPU emulator
35 #if defined(CONFIG_SOFTMMU)
36 void cpu_resume_from_signal(CPUState
*cpu
, void *puc
)
38 /* XXX: restore cpu registers saved in host registers */
40 cpu
->exception_index
= -1;
41 siglongjmp(cpu
->jmp_env
, 1);
45 /* Execute a TB, and fix up the CPU state afterwards if necessary */
46 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
48 CPUArchState
*env
= cpu
->env_ptr
;
51 #if defined(DEBUG_DISAS)
52 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
53 #if defined(TARGET_I386)
54 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
55 #elif defined(TARGET_M68K)
56 /* ??? Should not modify env state for dumping. */
57 cpu_m68k_flush_flags(env
, env
->cc_op
);
58 env
->cc_op
= CC_OP_FLAGS
;
59 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
60 log_cpu_state(cpu
, 0);
62 log_cpu_state(cpu
, 0);
65 #endif /* DEBUG_DISAS */
67 next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
68 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
69 /* We didn't start executing this TB (eg because the instruction
70 * counter hit zero); we must restore the guest PC to the address
71 * of the start of the TB.
73 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
74 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
75 if (cc
->synchronize_from_tb
) {
76 cc
->synchronize_from_tb(cpu
, tb
);
79 cc
->set_pc(cpu
, tb
->pc
);
82 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
83 /* We were asked to stop executing TBs (probably a pending
84 * interrupt. We've now stopped, so clear the flag.
86 cpu
->tcg_exit_req
= 0;
91 /* Execute the code without caching the generated code. An interpreter
92 could be used if available. */
93 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
94 TranslationBlock
*orig_tb
)
96 CPUState
*cpu
= ENV_GET_CPU(env
);
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles
> CF_COUNT_MASK
)
102 max_cycles
= CF_COUNT_MASK
;
104 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
106 cpu
->current_tb
= tb
;
107 /* execute the generated code */
108 cpu_tb_exec(cpu
, tb
->tc_ptr
);
109 cpu
->current_tb
= NULL
;
110 tb_phys_invalidate(tb
, -1);
114 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
116 target_ulong cs_base
,
119 CPUState
*cpu
= ENV_GET_CPU(env
);
120 TranslationBlock
*tb
, **ptb1
;
122 tb_page_addr_t phys_pc
, phys_page1
;
123 target_ulong virt_page2
;
125 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
127 /* find translated block using physical mappings */
128 phys_pc
= get_page_addr_code(env
, pc
);
129 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
130 h
= tb_phys_hash_func(phys_pc
);
131 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
137 tb
->page_addr
[0] == phys_page1
&&
138 tb
->cs_base
== cs_base
&&
139 tb
->flags
== flags
) {
140 /* check next page if needed */
141 if (tb
->page_addr
[1] != -1) {
142 tb_page_addr_t phys_page2
;
144 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
146 phys_page2
= get_page_addr_code(env
, virt_page2
);
147 if (tb
->page_addr
[1] == phys_page2
)
153 ptb1
= &tb
->phys_hash_next
;
156 /* if no translated code available, then translate it now */
157 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, 0);
160 /* Move the last found TB to the head of the list */
162 *ptb1
= tb
->phys_hash_next
;
163 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
164 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
166 /* we add the TB in the virtual pc hash table */
167 cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
171 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
173 CPUState
*cpu
= ENV_GET_CPU(env
);
174 TranslationBlock
*tb
;
175 target_ulong cs_base
, pc
;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
181 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
182 tb
= cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
183 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
184 tb
->flags
!= flags
)) {
185 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
190 static CPUDebugExcpHandler
*debug_excp_handler
;
192 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
194 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUArchState
*env
)
199 CPUState
*cpu
= ENV_GET_CPU(env
);
202 if (!cpu
->watchpoint_hit
) {
203 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
204 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
207 if (debug_excp_handler
) {
208 debug_excp_handler(env
);
212 /* main execution loop */
214 volatile sig_atomic_t exit_request
;
216 int cpu_exec(CPUArchState
*env
)
218 CPUState
*cpu
= ENV_GET_CPU(env
);
219 #if !(defined(CONFIG_USER_ONLY) && \
220 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
221 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
224 X86CPU
*x86_cpu
= X86_CPU(cpu
);
226 int ret
, interrupt_request
;
227 TranslationBlock
*tb
;
230 /* This must be volatile so it is not trashed by longjmp() */
231 volatile bool have_tb_lock
= false;
234 if (!cpu_has_work(cpu
)) {
243 /* As long as current_cpu is null, up to the assignment just above,
244 * requests by other threads to exit the execution loop are expected to
245 * be issued using the exit_request global. We must make sure that our
246 * evaluation of the global value is performed past the current_cpu
247 * value transition point, which requires a memory barrier as well as
248 * an instruction scheduling constraint on modern architectures. */
251 if (unlikely(exit_request
)) {
252 cpu
->exit_request
= 1;
255 #if defined(TARGET_I386)
256 /* put eflags in CPU temporary format */
257 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
258 env
->df
= 1 - (2 * ((env
->eflags
>> 10) & 1));
259 CC_OP
= CC_OP_EFLAGS
;
260 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
261 #elif defined(TARGET_SPARC)
262 #elif defined(TARGET_M68K)
263 env
->cc_op
= CC_OP_FLAGS
;
264 env
->cc_dest
= env
->sr
& 0xf;
265 env
->cc_x
= (env
->sr
>> 4) & 1;
266 #elif defined(TARGET_ALPHA)
267 #elif defined(TARGET_ARM)
268 #elif defined(TARGET_UNICORE32)
269 #elif defined(TARGET_PPC)
270 env
->reserve_addr
= -1;
271 #elif defined(TARGET_LM32)
272 #elif defined(TARGET_MICROBLAZE)
273 #elif defined(TARGET_MIPS)
274 #elif defined(TARGET_MOXIE)
275 #elif defined(TARGET_OPENRISC)
276 #elif defined(TARGET_SH4)
277 #elif defined(TARGET_CRIS)
278 #elif defined(TARGET_S390X)
279 #elif defined(TARGET_XTENSA)
282 #error unsupported target CPU
284 cpu
->exception_index
= -1;
286 /* prepare setjmp context for exception handling */
288 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
289 /* if an exception is pending, we execute it here */
290 if (cpu
->exception_index
>= 0) {
291 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
292 /* exit request from the cpu execution loop */
293 ret
= cpu
->exception_index
;
294 if (ret
== EXCP_DEBUG
) {
295 cpu_handle_debug_exception(env
);
299 #if defined(CONFIG_USER_ONLY)
300 /* if user mode only, we simulate a fake exception
301 which will be handled outside the cpu execution
303 #if defined(TARGET_I386)
304 cc
->do_interrupt(cpu
);
306 ret
= cpu
->exception_index
;
309 cc
->do_interrupt(cpu
);
310 cpu
->exception_index
= -1;
315 next_tb
= 0; /* force lookup of first TB */
317 interrupt_request
= cpu
->interrupt_request
;
318 if (unlikely(interrupt_request
)) {
319 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
320 /* Mask out external interrupts for this step. */
321 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
323 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
324 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
325 cpu
->exception_index
= EXCP_DEBUG
;
328 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
329 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
330 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
331 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
332 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
334 cpu
->exception_index
= EXCP_HLT
;
338 #if defined(TARGET_I386)
339 #if !defined(CONFIG_USER_ONLY)
340 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
341 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
342 apic_poll_irq(x86_cpu
->apic_state
);
345 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
346 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
,
348 do_cpu_init(x86_cpu
);
349 cpu
->exception_index
= EXCP_HALTED
;
351 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
352 do_cpu_sipi(x86_cpu
);
353 } else if (env
->hflags2
& HF2_GIF_MASK
) {
354 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
355 !(env
->hflags
& HF_SMM_MASK
)) {
356 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
358 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
359 do_smm_enter(x86_cpu
);
361 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
362 !(env
->hflags2
& HF2_NMI_MASK
)) {
363 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
364 env
->hflags2
|= HF2_NMI_MASK
;
365 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
367 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
368 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
369 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
371 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
372 (((env
->hflags2
& HF2_VINTR_MASK
) &&
373 (env
->hflags2
& HF2_HIF_MASK
)) ||
374 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
375 (env
->eflags
& IF_MASK
&&
376 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
378 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
380 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
382 intno
= cpu_get_pic_interrupt(env
);
383 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
384 do_interrupt_x86_hardirq(env
, intno
, 1);
385 /* ensure that no TB jump will be modified as
386 the program flow was changed */
388 #if !defined(CONFIG_USER_ONLY)
389 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
390 (env
->eflags
& IF_MASK
) &&
391 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
393 /* FIXME: this should respect TPR */
394 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
396 intno
= ldl_phys(cpu
->as
,
398 + offsetof(struct vmcb
,
399 control
.int_vector
));
400 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
401 do_interrupt_x86_hardirq(env
, intno
, 1);
402 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
407 #elif defined(TARGET_PPC)
408 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
411 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
412 ppc_hw_interrupt(env
);
413 if (env
->pending_interrupts
== 0) {
414 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
418 #elif defined(TARGET_LM32)
419 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
420 && (env
->ie
& IE_IE
)) {
421 cpu
->exception_index
= EXCP_IRQ
;
422 cc
->do_interrupt(cpu
);
425 #elif defined(TARGET_MICROBLAZE)
426 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
427 && (env
->sregs
[SR_MSR
] & MSR_IE
)
428 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
429 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
430 cpu
->exception_index
= EXCP_IRQ
;
431 cc
->do_interrupt(cpu
);
434 #elif defined(TARGET_MIPS)
435 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
436 cpu_mips_hw_interrupts_pending(env
)) {
438 cpu
->exception_index
= EXCP_EXT_INTERRUPT
;
440 cc
->do_interrupt(cpu
);
443 #elif defined(TARGET_OPENRISC)
446 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
447 && (env
->sr
& SR_IEE
)) {
450 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
451 && (env
->sr
& SR_TEE
)) {
455 cpu
->exception_index
= idx
;
456 cc
->do_interrupt(cpu
);
460 #elif defined(TARGET_SPARC)
461 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
462 if (cpu_interrupts_enabled(env
) &&
463 env
->interrupt_index
> 0) {
464 int pil
= env
->interrupt_index
& 0xf;
465 int type
= env
->interrupt_index
& 0xf0;
467 if (((type
== TT_EXTINT
) &&
468 cpu_pil_allowed(env
, pil
)) ||
470 cpu
->exception_index
= env
->interrupt_index
;
471 cc
->do_interrupt(cpu
);
476 #elif defined(TARGET_ARM)
477 if (interrupt_request
& CPU_INTERRUPT_FIQ
478 && !(env
->daif
& PSTATE_F
)) {
479 cpu
->exception_index
= EXCP_FIQ
;
480 cc
->do_interrupt(cpu
);
483 /* ARMv7-M interrupt return works by loading a magic value
484 into the PC. On real hardware the load causes the
485 return to occur. The qemu implementation performs the
486 jump normally, then does the exception return when the
487 CPU tries to execute code at the magic address.
488 This will cause the magic PC value to be pushed to
489 the stack if an interrupt occurred at the wrong time.
490 We avoid this by disabling interrupts when
491 pc contains a magic address. */
492 if (interrupt_request
& CPU_INTERRUPT_HARD
493 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
494 || !(env
->daif
& PSTATE_I
))) {
495 cpu
->exception_index
= EXCP_IRQ
;
496 cc
->do_interrupt(cpu
);
499 #elif defined(TARGET_UNICORE32)
500 if (interrupt_request
& CPU_INTERRUPT_HARD
501 && !(env
->uncached_asr
& ASR_I
)) {
502 cpu
->exception_index
= UC32_EXCP_INTR
;
503 cc
->do_interrupt(cpu
);
506 #elif defined(TARGET_SH4)
507 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
508 cc
->do_interrupt(cpu
);
511 #elif defined(TARGET_ALPHA)
514 /* ??? This hard-codes the OSF/1 interrupt levels. */
515 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
517 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
518 idx
= EXCP_DEV_INTERRUPT
;
522 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
523 idx
= EXCP_CLK_INTERRUPT
;
527 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
528 idx
= EXCP_SMP_INTERRUPT
;
532 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
537 cpu
->exception_index
= idx
;
539 cc
->do_interrupt(cpu
);
543 #elif defined(TARGET_CRIS)
544 if (interrupt_request
& CPU_INTERRUPT_HARD
545 && (env
->pregs
[PR_CCS
] & I_FLAG
)
546 && !env
->locked_irq
) {
547 cpu
->exception_index
= EXCP_IRQ
;
548 cc
->do_interrupt(cpu
);
551 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
552 unsigned int m_flag_archval
;
553 if (env
->pregs
[PR_VR
] < 32) {
554 m_flag_archval
= M_FLAG_V10
;
556 m_flag_archval
= M_FLAG_V32
;
558 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
559 cpu
->exception_index
= EXCP_NMI
;
560 cc
->do_interrupt(cpu
);
564 #elif defined(TARGET_M68K)
565 if (interrupt_request
& CPU_INTERRUPT_HARD
566 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
567 < env
->pending_level
) {
568 /* Real hardware gets the interrupt vector via an
569 IACK cycle at this point. Current emulated
570 hardware doesn't rely on this, so we
571 provide/save the vector when the interrupt is
573 cpu
->exception_index
= env
->pending_vector
;
574 do_interrupt_m68k_hardirq(env
);
577 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
578 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
579 (env
->psw
.mask
& PSW_MASK_EXT
)) {
580 cc
->do_interrupt(cpu
);
583 #elif defined(TARGET_XTENSA)
584 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
585 cpu
->exception_index
= EXC_IRQ
;
586 cc
->do_interrupt(cpu
);
590 /* Don't use the cached interrupt_request value,
591 do_interrupt may have updated the EXITTB flag. */
592 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
593 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
594 /* ensure that no TB jump will be modified as
595 the program flow was changed */
599 if (unlikely(cpu
->exit_request
)) {
600 cpu
->exit_request
= 0;
601 cpu
->exception_index
= EXCP_INTERRUPT
;
604 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
606 tb
= tb_find_fast(env
);
607 /* Note: we do it here to avoid a gcc bug on Mac OS X when
608 doing it in tb_find_slow */
609 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
610 /* as some TB could have been invalidated because
611 of memory exceptions while generating the code, we
612 must recompute the hash index here */
614 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
616 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
617 qemu_log("Trace %p [" TARGET_FMT_lx
"] %s\n",
618 tb
->tc_ptr
, tb
->pc
, lookup_symbol(tb
->pc
));
620 /* see if we can patch the calling TB. When the TB
621 spans two pages, we cannot safely do a direct
623 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
624 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
625 next_tb
& TB_EXIT_MASK
, tb
);
627 have_tb_lock
= false;
628 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
630 /* cpu_interrupt might be called while translating the
631 TB, but before it is linked into a potentially
632 infinite loop and becomes env->current_tb. Avoid
633 starting execution if there is a pending interrupt. */
634 cpu
->current_tb
= tb
;
636 if (likely(!cpu
->exit_request
)) {
638 /* execute the generated code */
639 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
640 switch (next_tb
& TB_EXIT_MASK
) {
641 case TB_EXIT_REQUESTED
:
642 /* Something asked us to stop executing
643 * chained TBs; just continue round the main
644 * loop. Whatever requested the exit will also
645 * have set something else (eg exit_request or
646 * interrupt_request) which we will handle
647 * next time around the loop.
649 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
652 case TB_EXIT_ICOUNT_EXPIRED
:
654 /* Instruction counter expired. */
656 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
657 insns_left
= cpu
->icount_decr
.u32
;
658 if (cpu
->icount_extra
&& insns_left
>= 0) {
659 /* Refill decrementer and continue execution. */
660 cpu
->icount_extra
+= insns_left
;
661 if (cpu
->icount_extra
> 0xffff) {
664 insns_left
= cpu
->icount_extra
;
666 cpu
->icount_extra
-= insns_left
;
667 cpu
->icount_decr
.u16
.low
= insns_left
;
669 if (insns_left
> 0) {
670 /* Execute remaining instructions. */
671 cpu_exec_nocache(env
, insns_left
, tb
);
673 cpu
->exception_index
= EXCP_INTERRUPT
;
683 cpu
->current_tb
= NULL
;
684 /* reset soft MMU for next block (it can currently
685 only be set by a memory fault) */
688 /* Reload env after longjmp - the compiler may have smashed all
689 * local variables as longjmp is marked 'noreturn'. */
692 #if !(defined(CONFIG_USER_ONLY) && \
693 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
694 cc
= CPU_GET_CLASS(cpu
);
697 x86_cpu
= X86_CPU(cpu
);
700 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
701 have_tb_lock
= false;
707 #if defined(TARGET_I386)
708 /* restore flags in standard format */
709 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
710 | (env
->df
& DF_MASK
);
711 #elif defined(TARGET_ARM)
712 /* XXX: Save/restore host fpu exception state?. */
713 #elif defined(TARGET_UNICORE32)
714 #elif defined(TARGET_SPARC)
715 #elif defined(TARGET_PPC)
716 #elif defined(TARGET_LM32)
717 #elif defined(TARGET_M68K)
718 cpu_m68k_flush_flags(env
, env
->cc_op
);
719 env
->cc_op
= CC_OP_FLAGS
;
720 env
->sr
= (env
->sr
& 0xffe0)
721 | env
->cc_dest
| (env
->cc_x
<< 4);
722 #elif defined(TARGET_MICROBLAZE)
723 #elif defined(TARGET_MIPS)
724 #elif defined(TARGET_MOXIE)
725 #elif defined(TARGET_OPENRISC)
726 #elif defined(TARGET_SH4)
727 #elif defined(TARGET_ALPHA)
728 #elif defined(TARGET_CRIS)
729 #elif defined(TARGET_S390X)
730 #elif defined(TARGET_XTENSA)
733 #error unsupported target CPU
736 /* fail safe : never use current_cpu outside cpu_exec() */