target-ppc: Turn "ppc32" and "ppc64" CPUs into aliases
[qemu/agraf.git] / cpu-exec.c
blob9092145d0b4e6493dd329d1e598bb18d76297f59
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 //#define CONFIG_DEBUG_EXEC
28 bool qemu_cpu_has_work(CPUState *cpu)
30 return cpu_has_work(cpu);
33 void cpu_loop_exit(CPUArchState *env)
35 CPUState *cpu = ENV_GET_CPU(env);
37 cpu->current_tb = NULL;
38 siglongjmp(env->jmp_env, 1);
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
47 /* XXX: restore cpu registers saved in host registers */
49 env->exception_index = -1;
50 siglongjmp(env->jmp_env, 1);
52 #endif
54 /* Execute a TB, and fix up the CPU state afterwards if necessary */
55 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
57 CPUArchState *env = cpu->env_ptr;
58 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
59 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
64 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65 cpu_pc_from_tb(env, tb);
67 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
71 cpu->tcg_exit_req = 0;
73 return next_tb;
76 /* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
78 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
79 TranslationBlock *orig_tb)
81 CPUState *cpu = ENV_GET_CPU(env);
82 TranslationBlock *tb;
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles > CF_COUNT_MASK)
87 max_cycles = CF_COUNT_MASK;
89 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
90 max_cycles);
91 cpu->current_tb = tb;
92 /* execute the generated code */
93 cpu_tb_exec(cpu, tb->tc_ptr);
94 cpu->current_tb = NULL;
95 tb_phys_invalidate(tb, -1);
96 tb_free(tb);
99 static TranslationBlock *tb_find_slow(CPUArchState *env,
100 target_ulong pc,
101 target_ulong cs_base,
102 uint64_t flags)
104 TranslationBlock *tb, **ptb1;
105 unsigned int h;
106 tb_page_addr_t phys_pc, phys_page1;
107 target_ulong virt_page2;
109 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
111 /* find translated block using physical mappings */
112 phys_pc = get_page_addr_code(env, pc);
113 phys_page1 = phys_pc & TARGET_PAGE_MASK;
114 h = tb_phys_hash_func(phys_pc);
115 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
116 for(;;) {
117 tb = *ptb1;
118 if (!tb)
119 goto not_found;
120 if (tb->pc == pc &&
121 tb->page_addr[0] == phys_page1 &&
122 tb->cs_base == cs_base &&
123 tb->flags == flags) {
124 /* check next page if needed */
125 if (tb->page_addr[1] != -1) {
126 tb_page_addr_t phys_page2;
128 virt_page2 = (pc & TARGET_PAGE_MASK) +
129 TARGET_PAGE_SIZE;
130 phys_page2 = get_page_addr_code(env, virt_page2);
131 if (tb->page_addr[1] == phys_page2)
132 goto found;
133 } else {
134 goto found;
137 ptb1 = &tb->phys_hash_next;
139 not_found:
140 /* if no translated code available, then translate it now */
141 tb = tb_gen_code(env, pc, cs_base, flags, 0);
143 found:
144 /* Move the last found TB to the head of the list */
145 if (likely(*ptb1)) {
146 *ptb1 = tb->phys_hash_next;
147 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
148 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
150 /* we add the TB in the virtual pc hash table */
151 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
152 return tb;
155 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
157 TranslationBlock *tb;
158 target_ulong cs_base, pc;
159 int flags;
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
163 is executed. */
164 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
165 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
166 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
167 tb->flags != flags)) {
168 tb = tb_find_slow(env, pc, cs_base, flags);
170 return tb;
173 static CPUDebugExcpHandler *debug_excp_handler;
175 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
177 debug_excp_handler = handler;
180 static void cpu_handle_debug_exception(CPUArchState *env)
182 CPUWatchpoint *wp;
184 if (!env->watchpoint_hit) {
185 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
186 wp->flags &= ~BP_WATCHPOINT_HIT;
189 if (debug_excp_handler) {
190 debug_excp_handler(env);
194 /* main execution loop */
196 volatile sig_atomic_t exit_request;
198 int cpu_exec(CPUArchState *env)
200 CPUState *cpu = ENV_GET_CPU(env);
201 int ret, interrupt_request;
202 TranslationBlock *tb;
203 uint8_t *tc_ptr;
204 tcg_target_ulong next_tb;
206 if (env->halted) {
207 if (!cpu_has_work(cpu)) {
208 return EXCP_HALTED;
211 env->halted = 0;
214 cpu_single_env = env;
216 if (unlikely(exit_request)) {
217 cpu->exit_request = 1;
220 #if defined(TARGET_I386)
221 /* put eflags in CPU temporary format */
222 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
223 DF = 1 - (2 * ((env->eflags >> 10) & 1));
224 CC_OP = CC_OP_EFLAGS;
225 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
226 #elif defined(TARGET_SPARC)
227 #elif defined(TARGET_M68K)
228 env->cc_op = CC_OP_FLAGS;
229 env->cc_dest = env->sr & 0xf;
230 env->cc_x = (env->sr >> 4) & 1;
231 #elif defined(TARGET_ALPHA)
232 #elif defined(TARGET_ARM)
233 #elif defined(TARGET_UNICORE32)
234 #elif defined(TARGET_PPC)
235 env->reserve_addr = -1;
236 #elif defined(TARGET_LM32)
237 #elif defined(TARGET_MICROBLAZE)
238 #elif defined(TARGET_MIPS)
239 #elif defined(TARGET_OPENRISC)
240 #elif defined(TARGET_SH4)
241 #elif defined(TARGET_CRIS)
242 #elif defined(TARGET_S390X)
243 #elif defined(TARGET_XTENSA)
244 /* XXXXX */
245 #else
246 #error unsupported target CPU
247 #endif
248 env->exception_index = -1;
250 /* prepare setjmp context for exception handling */
251 for(;;) {
252 if (sigsetjmp(env->jmp_env, 0) == 0) {
253 /* if an exception is pending, we execute it here */
254 if (env->exception_index >= 0) {
255 if (env->exception_index >= EXCP_INTERRUPT) {
256 /* exit request from the cpu execution loop */
257 ret = env->exception_index;
258 if (ret == EXCP_DEBUG) {
259 cpu_handle_debug_exception(env);
261 break;
262 } else {
263 #if defined(CONFIG_USER_ONLY)
264 /* if user mode only, we simulate a fake exception
265 which will be handled outside the cpu execution
266 loop */
267 #if defined(TARGET_I386)
268 do_interrupt(env);
269 #endif
270 ret = env->exception_index;
271 break;
272 #else
273 do_interrupt(env);
274 env->exception_index = -1;
275 #endif
279 next_tb = 0; /* force lookup of first TB */
280 for(;;) {
281 interrupt_request = env->interrupt_request;
282 if (unlikely(interrupt_request)) {
283 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
284 /* Mask out external interrupts for this step. */
285 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
287 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
288 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
289 env->exception_index = EXCP_DEBUG;
290 cpu_loop_exit(env);
292 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
293 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
294 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
295 if (interrupt_request & CPU_INTERRUPT_HALT) {
296 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
297 env->halted = 1;
298 env->exception_index = EXCP_HLT;
299 cpu_loop_exit(env);
301 #endif
302 #if defined(TARGET_I386)
303 #if !defined(CONFIG_USER_ONLY)
304 if (interrupt_request & CPU_INTERRUPT_POLL) {
305 env->interrupt_request &= ~CPU_INTERRUPT_POLL;
306 apic_poll_irq(env->apic_state);
308 #endif
309 if (interrupt_request & CPU_INTERRUPT_INIT) {
310 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
312 do_cpu_init(x86_env_get_cpu(env));
313 env->exception_index = EXCP_HALTED;
314 cpu_loop_exit(env);
315 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
316 do_cpu_sipi(x86_env_get_cpu(env));
317 } else if (env->hflags2 & HF2_GIF_MASK) {
318 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
319 !(env->hflags & HF_SMM_MASK)) {
320 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
322 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
323 do_smm_enter(env);
324 next_tb = 0;
325 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
326 !(env->hflags2 & HF2_NMI_MASK)) {
327 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
328 env->hflags2 |= HF2_NMI_MASK;
329 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
330 next_tb = 0;
331 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
332 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
333 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
334 next_tb = 0;
335 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
336 (((env->hflags2 & HF2_VINTR_MASK) &&
337 (env->hflags2 & HF2_HIF_MASK)) ||
338 (!(env->hflags2 & HF2_VINTR_MASK) &&
339 (env->eflags & IF_MASK &&
340 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
341 int intno;
342 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
344 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
345 intno = cpu_get_pic_interrupt(env);
346 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
347 do_interrupt_x86_hardirq(env, intno, 1);
348 /* ensure that no TB jump will be modified as
349 the program flow was changed */
350 next_tb = 0;
351 #if !defined(CONFIG_USER_ONLY)
352 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
353 (env->eflags & IF_MASK) &&
354 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
355 int intno;
356 /* FIXME: this should respect TPR */
357 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
359 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
360 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
361 do_interrupt_x86_hardirq(env, intno, 1);
362 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
363 next_tb = 0;
364 #endif
367 #elif defined(TARGET_PPC)
368 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
369 cpu_reset(cpu);
371 if (interrupt_request & CPU_INTERRUPT_HARD) {
372 ppc_hw_interrupt(env);
373 if (env->pending_interrupts == 0)
374 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
375 next_tb = 0;
377 #elif defined(TARGET_LM32)
378 if ((interrupt_request & CPU_INTERRUPT_HARD)
379 && (env->ie & IE_IE)) {
380 env->exception_index = EXCP_IRQ;
381 do_interrupt(env);
382 next_tb = 0;
384 #elif defined(TARGET_MICROBLAZE)
385 if ((interrupt_request & CPU_INTERRUPT_HARD)
386 && (env->sregs[SR_MSR] & MSR_IE)
387 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
388 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
389 env->exception_index = EXCP_IRQ;
390 do_interrupt(env);
391 next_tb = 0;
393 #elif defined(TARGET_MIPS)
394 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
395 cpu_mips_hw_interrupts_pending(env)) {
396 /* Raise it */
397 env->exception_index = EXCP_EXT_INTERRUPT;
398 env->error_code = 0;
399 do_interrupt(env);
400 next_tb = 0;
402 #elif defined(TARGET_OPENRISC)
404 int idx = -1;
405 if ((interrupt_request & CPU_INTERRUPT_HARD)
406 && (env->sr & SR_IEE)) {
407 idx = EXCP_INT;
409 if ((interrupt_request & CPU_INTERRUPT_TIMER)
410 && (env->sr & SR_TEE)) {
411 idx = EXCP_TICK;
413 if (idx >= 0) {
414 env->exception_index = idx;
415 do_interrupt(env);
416 next_tb = 0;
419 #elif defined(TARGET_SPARC)
420 if (interrupt_request & CPU_INTERRUPT_HARD) {
421 if (cpu_interrupts_enabled(env) &&
422 env->interrupt_index > 0) {
423 int pil = env->interrupt_index & 0xf;
424 int type = env->interrupt_index & 0xf0;
426 if (((type == TT_EXTINT) &&
427 cpu_pil_allowed(env, pil)) ||
428 type != TT_EXTINT) {
429 env->exception_index = env->interrupt_index;
430 do_interrupt(env);
431 next_tb = 0;
435 #elif defined(TARGET_ARM)
436 if (interrupt_request & CPU_INTERRUPT_FIQ
437 && !(env->uncached_cpsr & CPSR_F)) {
438 env->exception_index = EXCP_FIQ;
439 do_interrupt(env);
440 next_tb = 0;
442 /* ARMv7-M interrupt return works by loading a magic value
443 into the PC. On real hardware the load causes the
444 return to occur. The qemu implementation performs the
445 jump normally, then does the exception return when the
446 CPU tries to execute code at the magic address.
447 This will cause the magic PC value to be pushed to
448 the stack if an interrupt occurred at the wrong time.
449 We avoid this by disabling interrupts when
450 pc contains a magic address. */
451 if (interrupt_request & CPU_INTERRUPT_HARD
452 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
453 || !(env->uncached_cpsr & CPSR_I))) {
454 env->exception_index = EXCP_IRQ;
455 do_interrupt(env);
456 next_tb = 0;
458 #elif defined(TARGET_UNICORE32)
459 if (interrupt_request & CPU_INTERRUPT_HARD
460 && !(env->uncached_asr & ASR_I)) {
461 env->exception_index = UC32_EXCP_INTR;
462 do_interrupt(env);
463 next_tb = 0;
465 #elif defined(TARGET_SH4)
466 if (interrupt_request & CPU_INTERRUPT_HARD) {
467 do_interrupt(env);
468 next_tb = 0;
470 #elif defined(TARGET_ALPHA)
472 int idx = -1;
473 /* ??? This hard-codes the OSF/1 interrupt levels. */
474 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
475 case 0 ... 3:
476 if (interrupt_request & CPU_INTERRUPT_HARD) {
477 idx = EXCP_DEV_INTERRUPT;
479 /* FALLTHRU */
480 case 4:
481 if (interrupt_request & CPU_INTERRUPT_TIMER) {
482 idx = EXCP_CLK_INTERRUPT;
484 /* FALLTHRU */
485 case 5:
486 if (interrupt_request & CPU_INTERRUPT_SMP) {
487 idx = EXCP_SMP_INTERRUPT;
489 /* FALLTHRU */
490 case 6:
491 if (interrupt_request & CPU_INTERRUPT_MCHK) {
492 idx = EXCP_MCHK;
495 if (idx >= 0) {
496 env->exception_index = idx;
497 env->error_code = 0;
498 do_interrupt(env);
499 next_tb = 0;
502 #elif defined(TARGET_CRIS)
503 if (interrupt_request & CPU_INTERRUPT_HARD
504 && (env->pregs[PR_CCS] & I_FLAG)
505 && !env->locked_irq) {
506 env->exception_index = EXCP_IRQ;
507 do_interrupt(env);
508 next_tb = 0;
510 if (interrupt_request & CPU_INTERRUPT_NMI) {
511 unsigned int m_flag_archval;
512 if (env->pregs[PR_VR] < 32) {
513 m_flag_archval = M_FLAG_V10;
514 } else {
515 m_flag_archval = M_FLAG_V32;
517 if ((env->pregs[PR_CCS] & m_flag_archval)) {
518 env->exception_index = EXCP_NMI;
519 do_interrupt(env);
520 next_tb = 0;
523 #elif defined(TARGET_M68K)
524 if (interrupt_request & CPU_INTERRUPT_HARD
525 && ((env->sr & SR_I) >> SR_I_SHIFT)
526 < env->pending_level) {
527 /* Real hardware gets the interrupt vector via an
528 IACK cycle at this point. Current emulated
529 hardware doesn't rely on this, so we
530 provide/save the vector when the interrupt is
531 first signalled. */
532 env->exception_index = env->pending_vector;
533 do_interrupt_m68k_hardirq(env);
534 next_tb = 0;
536 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
537 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
538 (env->psw.mask & PSW_MASK_EXT)) {
539 do_interrupt(env);
540 next_tb = 0;
542 #elif defined(TARGET_XTENSA)
543 if (interrupt_request & CPU_INTERRUPT_HARD) {
544 env->exception_index = EXC_IRQ;
545 do_interrupt(env);
546 next_tb = 0;
548 #endif
549 /* Don't use the cached interrupt_request value,
550 do_interrupt may have updated the EXITTB flag. */
551 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
552 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
553 /* ensure that no TB jump will be modified as
554 the program flow was changed */
555 next_tb = 0;
558 if (unlikely(cpu->exit_request)) {
559 cpu->exit_request = 0;
560 env->exception_index = EXCP_INTERRUPT;
561 cpu_loop_exit(env);
563 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
564 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
565 /* restore flags in standard format */
566 #if defined(TARGET_I386)
567 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
568 | (DF & DF_MASK);
569 log_cpu_state(env, CPU_DUMP_CCOP);
570 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
571 #elif defined(TARGET_M68K)
572 cpu_m68k_flush_flags(env, env->cc_op);
573 env->cc_op = CC_OP_FLAGS;
574 env->sr = (env->sr & 0xffe0)
575 | env->cc_dest | (env->cc_x << 4);
576 log_cpu_state(env, 0);
577 #else
578 log_cpu_state(env, 0);
579 #endif
581 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
582 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
583 tb = tb_find_fast(env);
584 /* Note: we do it here to avoid a gcc bug on Mac OS X when
585 doing it in tb_find_slow */
586 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
587 /* as some TB could have been invalidated because
588 of memory exceptions while generating the code, we
589 must recompute the hash index here */
590 next_tb = 0;
591 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
593 #ifdef CONFIG_DEBUG_EXEC
594 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
595 tb->tc_ptr, tb->pc,
596 lookup_symbol(tb->pc));
597 #endif
598 /* see if we can patch the calling TB. When the TB
599 spans two pages, we cannot safely do a direct
600 jump. */
601 if (next_tb != 0 && tb->page_addr[1] == -1) {
602 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
603 next_tb & TB_EXIT_MASK, tb);
605 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
607 /* cpu_interrupt might be called while translating the
608 TB, but before it is linked into a potentially
609 infinite loop and becomes env->current_tb. Avoid
610 starting execution if there is a pending interrupt. */
611 cpu->current_tb = tb;
612 barrier();
613 if (likely(!cpu->exit_request)) {
614 tc_ptr = tb->tc_ptr;
615 /* execute the generated code */
616 next_tb = cpu_tb_exec(cpu, tc_ptr);
617 switch (next_tb & TB_EXIT_MASK) {
618 case TB_EXIT_REQUESTED:
619 /* Something asked us to stop executing
620 * chained TBs; just continue round the main
621 * loop. Whatever requested the exit will also
622 * have set something else (eg exit_request or
623 * interrupt_request) which we will handle
624 * next time around the loop.
626 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
627 next_tb = 0;
628 break;
629 case TB_EXIT_ICOUNT_EXPIRED:
631 /* Instruction counter expired. */
632 int insns_left;
633 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
634 insns_left = env->icount_decr.u32;
635 if (env->icount_extra && insns_left >= 0) {
636 /* Refill decrementer and continue execution. */
637 env->icount_extra += insns_left;
638 if (env->icount_extra > 0xffff) {
639 insns_left = 0xffff;
640 } else {
641 insns_left = env->icount_extra;
643 env->icount_extra -= insns_left;
644 env->icount_decr.u16.low = insns_left;
645 } else {
646 if (insns_left > 0) {
647 /* Execute remaining instructions. */
648 cpu_exec_nocache(env, insns_left, tb);
650 env->exception_index = EXCP_INTERRUPT;
651 next_tb = 0;
652 cpu_loop_exit(env);
654 break;
656 default:
657 break;
660 cpu->current_tb = NULL;
661 /* reset soft MMU for next block (it can currently
662 only be set by a memory fault) */
663 } /* for(;;) */
664 } else {
665 /* Reload env after longjmp - the compiler may have smashed all
666 * local variables as longjmp is marked 'noreturn'. */
667 env = cpu_single_env;
669 } /* for(;;) */
672 #if defined(TARGET_I386)
673 /* restore flags in standard format */
674 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
675 | (DF & DF_MASK);
676 #elif defined(TARGET_ARM)
677 /* XXX: Save/restore host fpu exception state?. */
678 #elif defined(TARGET_UNICORE32)
679 #elif defined(TARGET_SPARC)
680 #elif defined(TARGET_PPC)
681 #elif defined(TARGET_LM32)
682 #elif defined(TARGET_M68K)
683 cpu_m68k_flush_flags(env, env->cc_op);
684 env->cc_op = CC_OP_FLAGS;
685 env->sr = (env->sr & 0xffe0)
686 | env->cc_dest | (env->cc_x << 4);
687 #elif defined(TARGET_MICROBLAZE)
688 #elif defined(TARGET_MIPS)
689 #elif defined(TARGET_OPENRISC)
690 #elif defined(TARGET_SH4)
691 #elif defined(TARGET_ALPHA)
692 #elif defined(TARGET_CRIS)
693 #elif defined(TARGET_S390X)
694 #elif defined(TARGET_XTENSA)
695 /* XXXXX */
696 #else
697 #error unsupported target CPU
698 #endif
700 /* fail safe : never use cpu_single_env outside cpu_exec() */
701 cpu_single_env = NULL;
702 return ret;