disas/i386.c: disassemble aes-ni instructions
[qemu/ar7.git] / cpu-exec.c
blob235ddeed78f7e23f533967d99cd546c722bff26d
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 bool qemu_cpu_has_work(CPUState *cpu)
28 return cpu_has_work(cpu);
31 void cpu_loop_exit(CPUArchState *env)
33 CPUState *cpu = ENV_GET_CPU(env);
35 cpu->current_tb = NULL;
36 siglongjmp(env->jmp_env, 1);
39 /* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState *env, void *puc)
45 /* XXX: restore cpu registers saved in host registers */
47 env->exception_index = -1;
48 siglongjmp(env->jmp_env, 1);
50 #endif
52 /* Execute a TB, and fix up the CPU state afterwards if necessary */
53 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
55 CPUArchState *env = cpu->env_ptr;
56 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
57 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
58 /* We didn't start executing this TB (eg because the instruction
59 * counter hit zero); we must restore the guest PC to the address
60 * of the start of the TB.
62 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
63 cpu_pc_from_tb(env, tb);
65 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
66 /* We were asked to stop executing TBs (probably a pending
67 * interrupt. We've now stopped, so clear the flag.
69 cpu->tcg_exit_req = 0;
71 return next_tb;
74 /* Execute the code without caching the generated code. An interpreter
75 could be used if available. */
76 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
77 TranslationBlock *orig_tb)
79 CPUState *cpu = ENV_GET_CPU(env);
80 TranslationBlock *tb;
82 /* Should never happen.
83 We only end up here when an existing TB is too long. */
84 if (max_cycles > CF_COUNT_MASK)
85 max_cycles = CF_COUNT_MASK;
87 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
88 max_cycles);
89 cpu->current_tb = tb;
90 /* execute the generated code */
91 cpu_tb_exec(cpu, tb->tc_ptr);
92 cpu->current_tb = NULL;
93 tb_phys_invalidate(tb, -1);
94 tb_free(tb);
97 static TranslationBlock *tb_find_slow(CPUArchState *env,
98 target_ulong pc,
99 target_ulong cs_base,
100 uint64_t flags)
102 TranslationBlock *tb, **ptb1;
103 unsigned int h;
104 tb_page_addr_t phys_pc, phys_page1;
105 target_ulong virt_page2;
107 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
109 /* find translated block using physical mappings */
110 phys_pc = get_page_addr_code(env, pc);
111 phys_page1 = phys_pc & TARGET_PAGE_MASK;
112 h = tb_phys_hash_func(phys_pc);
113 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
114 for(;;) {
115 tb = *ptb1;
116 if (!tb)
117 goto not_found;
118 if (tb->pc == pc &&
119 tb->page_addr[0] == phys_page1 &&
120 tb->cs_base == cs_base &&
121 tb->flags == flags) {
122 /* check next page if needed */
123 if (tb->page_addr[1] != -1) {
124 tb_page_addr_t phys_page2;
126 virt_page2 = (pc & TARGET_PAGE_MASK) +
127 TARGET_PAGE_SIZE;
128 phys_page2 = get_page_addr_code(env, virt_page2);
129 if (tb->page_addr[1] == phys_page2)
130 goto found;
131 } else {
132 goto found;
135 ptb1 = &tb->phys_hash_next;
137 not_found:
138 /* if no translated code available, then translate it now */
139 tb = tb_gen_code(env, pc, cs_base, flags, 0);
141 found:
142 /* Move the last found TB to the head of the list */
143 if (likely(*ptb1)) {
144 *ptb1 = tb->phys_hash_next;
145 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
146 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 return tb;
153 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
155 TranslationBlock *tb;
156 target_ulong cs_base, pc;
157 int flags;
159 /* we record a subset of the CPU state. It will
160 always be the same before a given translated block
161 is executed. */
162 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
163 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
164 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
165 tb->flags != flags)) {
166 tb = tb_find_slow(env, pc, cs_base, flags);
168 return tb;
171 static CPUDebugExcpHandler *debug_excp_handler;
173 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
175 debug_excp_handler = handler;
178 static void cpu_handle_debug_exception(CPUArchState *env)
180 CPUWatchpoint *wp;
182 if (!env->watchpoint_hit) {
183 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
184 wp->flags &= ~BP_WATCHPOINT_HIT;
187 if (debug_excp_handler) {
188 debug_excp_handler(env);
192 /* main execution loop */
194 volatile sig_atomic_t exit_request;
196 int cpu_exec(CPUArchState *env)
198 CPUState *cpu = ENV_GET_CPU(env);
199 #if !(defined(CONFIG_USER_ONLY) && \
200 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
201 CPUClass *cc = CPU_GET_CLASS(cpu);
202 #endif
203 int ret, interrupt_request;
204 TranslationBlock *tb;
205 uint8_t *tc_ptr;
206 tcg_target_ulong next_tb;
208 if (cpu->halted) {
209 if (!cpu_has_work(cpu)) {
210 return EXCP_HALTED;
213 cpu->halted = 0;
216 cpu_single_env = env;
218 if (unlikely(exit_request)) {
219 cpu->exit_request = 1;
222 #if defined(TARGET_I386)
223 /* put eflags in CPU temporary format */
224 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
225 DF = 1 - (2 * ((env->eflags >> 10) & 1));
226 CC_OP = CC_OP_EFLAGS;
227 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
228 #elif defined(TARGET_SPARC)
229 #elif defined(TARGET_M68K)
230 env->cc_op = CC_OP_FLAGS;
231 env->cc_dest = env->sr & 0xf;
232 env->cc_x = (env->sr >> 4) & 1;
233 #elif defined(TARGET_ALPHA)
234 #elif defined(TARGET_ARM)
235 #elif defined(TARGET_UNICORE32)
236 #elif defined(TARGET_PPC)
237 env->reserve_addr = -1;
238 #elif defined(TARGET_LM32)
239 #elif defined(TARGET_MICROBLAZE)
240 #elif defined(TARGET_MIPS)
241 #elif defined(TARGET_MOXIE)
242 #elif defined(TARGET_OPENRISC)
243 #elif defined(TARGET_SH4)
244 #elif defined(TARGET_CRIS)
245 #elif defined(TARGET_S390X)
246 #elif defined(TARGET_XTENSA)
247 /* XXXXX */
248 #else
249 #error unsupported target CPU
250 #endif
251 env->exception_index = -1;
253 /* prepare setjmp context for exception handling */
254 for(;;) {
255 if (sigsetjmp(env->jmp_env, 0) == 0) {
256 /* if an exception is pending, we execute it here */
257 if (env->exception_index >= 0) {
258 if (env->exception_index >= EXCP_INTERRUPT) {
259 /* exit request from the cpu execution loop */
260 ret = env->exception_index;
261 if (ret == EXCP_DEBUG) {
262 cpu_handle_debug_exception(env);
264 break;
265 } else {
266 #if defined(CONFIG_USER_ONLY)
267 /* if user mode only, we simulate a fake exception
268 which will be handled outside the cpu execution
269 loop */
270 #if defined(TARGET_I386)
271 cc->do_interrupt(cpu);
272 #endif
273 ret = env->exception_index;
274 break;
275 #else
276 cc->do_interrupt(cpu);
277 env->exception_index = -1;
278 #endif
282 next_tb = 0; /* force lookup of first TB */
283 for(;;) {
284 interrupt_request = cpu->interrupt_request;
285 if (unlikely(interrupt_request)) {
286 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
287 /* Mask out external interrupts for this step. */
288 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
290 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
291 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
292 env->exception_index = EXCP_DEBUG;
293 cpu_loop_exit(env);
295 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
296 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
297 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
298 if (interrupt_request & CPU_INTERRUPT_HALT) {
299 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
300 cpu->halted = 1;
301 env->exception_index = EXCP_HLT;
302 cpu_loop_exit(env);
304 #endif
305 #if defined(TARGET_I386)
306 #if !defined(CONFIG_USER_ONLY)
307 if (interrupt_request & CPU_INTERRUPT_POLL) {
308 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
309 apic_poll_irq(env->apic_state);
311 #endif
312 if (interrupt_request & CPU_INTERRUPT_INIT) {
313 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
315 do_cpu_init(x86_env_get_cpu(env));
316 env->exception_index = EXCP_HALTED;
317 cpu_loop_exit(env);
318 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
319 do_cpu_sipi(x86_env_get_cpu(env));
320 } else if (env->hflags2 & HF2_GIF_MASK) {
321 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
322 !(env->hflags & HF_SMM_MASK)) {
323 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
325 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
326 do_smm_enter(env);
327 next_tb = 0;
328 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
329 !(env->hflags2 & HF2_NMI_MASK)) {
330 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
331 env->hflags2 |= HF2_NMI_MASK;
332 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
333 next_tb = 0;
334 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
335 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
336 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
337 next_tb = 0;
338 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
339 (((env->hflags2 & HF2_VINTR_MASK) &&
340 (env->hflags2 & HF2_HIF_MASK)) ||
341 (!(env->hflags2 & HF2_VINTR_MASK) &&
342 (env->eflags & IF_MASK &&
343 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
344 int intno;
345 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
347 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
348 CPU_INTERRUPT_VIRQ);
349 intno = cpu_get_pic_interrupt(env);
350 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
351 do_interrupt_x86_hardirq(env, intno, 1);
352 /* ensure that no TB jump will be modified as
353 the program flow was changed */
354 next_tb = 0;
355 #if !defined(CONFIG_USER_ONLY)
356 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
357 (env->eflags & IF_MASK) &&
358 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
359 int intno;
360 /* FIXME: this should respect TPR */
361 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
363 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
364 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
365 do_interrupt_x86_hardirq(env, intno, 1);
366 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
367 next_tb = 0;
368 #endif
371 #elif defined(TARGET_PPC)
372 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
373 cpu_reset(cpu);
375 if (interrupt_request & CPU_INTERRUPT_HARD) {
376 ppc_hw_interrupt(env);
377 if (env->pending_interrupts == 0) {
378 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
380 next_tb = 0;
382 #elif defined(TARGET_LM32)
383 if ((interrupt_request & CPU_INTERRUPT_HARD)
384 && (env->ie & IE_IE)) {
385 env->exception_index = EXCP_IRQ;
386 cc->do_interrupt(cpu);
387 next_tb = 0;
389 #elif defined(TARGET_MICROBLAZE)
390 if ((interrupt_request & CPU_INTERRUPT_HARD)
391 && (env->sregs[SR_MSR] & MSR_IE)
392 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
393 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
394 env->exception_index = EXCP_IRQ;
395 cc->do_interrupt(cpu);
396 next_tb = 0;
398 #elif defined(TARGET_MIPS)
399 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
400 cpu_mips_hw_interrupts_pending(env)) {
401 /* Raise it */
402 env->exception_index = EXCP_EXT_INTERRUPT;
403 env->error_code = 0;
404 cc->do_interrupt(cpu);
405 next_tb = 0;
407 #elif defined(TARGET_OPENRISC)
409 int idx = -1;
410 if ((interrupt_request & CPU_INTERRUPT_HARD)
411 && (env->sr & SR_IEE)) {
412 idx = EXCP_INT;
414 if ((interrupt_request & CPU_INTERRUPT_TIMER)
415 && (env->sr & SR_TEE)) {
416 idx = EXCP_TICK;
418 if (idx >= 0) {
419 env->exception_index = idx;
420 cc->do_interrupt(cpu);
421 next_tb = 0;
424 #elif defined(TARGET_SPARC)
425 if (interrupt_request & CPU_INTERRUPT_HARD) {
426 if (cpu_interrupts_enabled(env) &&
427 env->interrupt_index > 0) {
428 int pil = env->interrupt_index & 0xf;
429 int type = env->interrupt_index & 0xf0;
431 if (((type == TT_EXTINT) &&
432 cpu_pil_allowed(env, pil)) ||
433 type != TT_EXTINT) {
434 env->exception_index = env->interrupt_index;
435 cc->do_interrupt(cpu);
436 next_tb = 0;
440 #elif defined(TARGET_ARM)
441 if (interrupt_request & CPU_INTERRUPT_FIQ
442 && !(env->uncached_cpsr & CPSR_F)) {
443 env->exception_index = EXCP_FIQ;
444 cc->do_interrupt(cpu);
445 next_tb = 0;
447 /* ARMv7-M interrupt return works by loading a magic value
448 into the PC. On real hardware the load causes the
449 return to occur. The qemu implementation performs the
450 jump normally, then does the exception return when the
451 CPU tries to execute code at the magic address.
452 This will cause the magic PC value to be pushed to
453 the stack if an interrupt occurred at the wrong time.
454 We avoid this by disabling interrupts when
455 pc contains a magic address. */
456 if (interrupt_request & CPU_INTERRUPT_HARD
457 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
458 || !(env->uncached_cpsr & CPSR_I))) {
459 env->exception_index = EXCP_IRQ;
460 cc->do_interrupt(cpu);
461 next_tb = 0;
463 #elif defined(TARGET_UNICORE32)
464 if (interrupt_request & CPU_INTERRUPT_HARD
465 && !(env->uncached_asr & ASR_I)) {
466 env->exception_index = UC32_EXCP_INTR;
467 cc->do_interrupt(cpu);
468 next_tb = 0;
470 #elif defined(TARGET_SH4)
471 if (interrupt_request & CPU_INTERRUPT_HARD) {
472 cc->do_interrupt(cpu);
473 next_tb = 0;
475 #elif defined(TARGET_ALPHA)
477 int idx = -1;
478 /* ??? This hard-codes the OSF/1 interrupt levels. */
479 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
480 case 0 ... 3:
481 if (interrupt_request & CPU_INTERRUPT_HARD) {
482 idx = EXCP_DEV_INTERRUPT;
484 /* FALLTHRU */
485 case 4:
486 if (interrupt_request & CPU_INTERRUPT_TIMER) {
487 idx = EXCP_CLK_INTERRUPT;
489 /* FALLTHRU */
490 case 5:
491 if (interrupt_request & CPU_INTERRUPT_SMP) {
492 idx = EXCP_SMP_INTERRUPT;
494 /* FALLTHRU */
495 case 6:
496 if (interrupt_request & CPU_INTERRUPT_MCHK) {
497 idx = EXCP_MCHK;
500 if (idx >= 0) {
501 env->exception_index = idx;
502 env->error_code = 0;
503 cc->do_interrupt(cpu);
504 next_tb = 0;
507 #elif defined(TARGET_CRIS)
508 if (interrupt_request & CPU_INTERRUPT_HARD
509 && (env->pregs[PR_CCS] & I_FLAG)
510 && !env->locked_irq) {
511 env->exception_index = EXCP_IRQ;
512 cc->do_interrupt(cpu);
513 next_tb = 0;
515 if (interrupt_request & CPU_INTERRUPT_NMI) {
516 unsigned int m_flag_archval;
517 if (env->pregs[PR_VR] < 32) {
518 m_flag_archval = M_FLAG_V10;
519 } else {
520 m_flag_archval = M_FLAG_V32;
522 if ((env->pregs[PR_CCS] & m_flag_archval)) {
523 env->exception_index = EXCP_NMI;
524 cc->do_interrupt(cpu);
525 next_tb = 0;
528 #elif defined(TARGET_M68K)
529 if (interrupt_request & CPU_INTERRUPT_HARD
530 && ((env->sr & SR_I) >> SR_I_SHIFT)
531 < env->pending_level) {
532 /* Real hardware gets the interrupt vector via an
533 IACK cycle at this point. Current emulated
534 hardware doesn't rely on this, so we
535 provide/save the vector when the interrupt is
536 first signalled. */
537 env->exception_index = env->pending_vector;
538 do_interrupt_m68k_hardirq(env);
539 next_tb = 0;
541 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
542 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
543 (env->psw.mask & PSW_MASK_EXT)) {
544 cc->do_interrupt(cpu);
545 next_tb = 0;
547 #elif defined(TARGET_XTENSA)
548 if (interrupt_request & CPU_INTERRUPT_HARD) {
549 env->exception_index = EXC_IRQ;
550 cc->do_interrupt(cpu);
551 next_tb = 0;
553 #endif
554 /* Don't use the cached interrupt_request value,
555 do_interrupt may have updated the EXITTB flag. */
556 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
557 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
558 /* ensure that no TB jump will be modified as
559 the program flow was changed */
560 next_tb = 0;
563 if (unlikely(cpu->exit_request)) {
564 cpu->exit_request = 0;
565 env->exception_index = EXCP_INTERRUPT;
566 cpu_loop_exit(env);
568 #if defined(DEBUG_DISAS)
569 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
570 /* restore flags in standard format */
571 #if defined(TARGET_I386)
572 log_cpu_state(env, CPU_DUMP_CCOP);
573 #elif defined(TARGET_M68K)
574 cpu_m68k_flush_flags(env, env->cc_op);
575 env->cc_op = CC_OP_FLAGS;
576 env->sr = (env->sr & 0xffe0)
577 | env->cc_dest | (env->cc_x << 4);
578 log_cpu_state(env, 0);
579 #else
580 log_cpu_state(env, 0);
581 #endif
583 #endif /* DEBUG_DISAS */
584 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
585 tb = tb_find_fast(env);
586 /* Note: we do it here to avoid a gcc bug on Mac OS X when
587 doing it in tb_find_slow */
588 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
589 /* as some TB could have been invalidated because
590 of memory exceptions while generating the code, we
591 must recompute the hash index here */
592 next_tb = 0;
593 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
595 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
596 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
597 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
599 /* see if we can patch the calling TB. When the TB
600 spans two pages, we cannot safely do a direct
601 jump. */
602 if (next_tb != 0 && tb->page_addr[1] == -1) {
603 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
604 next_tb & TB_EXIT_MASK, tb);
606 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
608 /* cpu_interrupt might be called while translating the
609 TB, but before it is linked into a potentially
610 infinite loop and becomes env->current_tb. Avoid
611 starting execution if there is a pending interrupt. */
612 cpu->current_tb = tb;
613 barrier();
614 if (likely(!cpu->exit_request)) {
615 tc_ptr = tb->tc_ptr;
616 /* execute the generated code */
617 next_tb = cpu_tb_exec(cpu, tc_ptr);
618 switch (next_tb & TB_EXIT_MASK) {
619 case TB_EXIT_REQUESTED:
620 /* Something asked us to stop executing
621 * chained TBs; just continue round the main
622 * loop. Whatever requested the exit will also
623 * have set something else (eg exit_request or
624 * interrupt_request) which we will handle
625 * next time around the loop.
627 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
628 next_tb = 0;
629 break;
630 case TB_EXIT_ICOUNT_EXPIRED:
632 /* Instruction counter expired. */
633 int insns_left;
634 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
635 insns_left = env->icount_decr.u32;
636 if (env->icount_extra && insns_left >= 0) {
637 /* Refill decrementer and continue execution. */
638 env->icount_extra += insns_left;
639 if (env->icount_extra > 0xffff) {
640 insns_left = 0xffff;
641 } else {
642 insns_left = env->icount_extra;
644 env->icount_extra -= insns_left;
645 env->icount_decr.u16.low = insns_left;
646 } else {
647 if (insns_left > 0) {
648 /* Execute remaining instructions. */
649 cpu_exec_nocache(env, insns_left, tb);
651 env->exception_index = EXCP_INTERRUPT;
652 next_tb = 0;
653 cpu_loop_exit(env);
655 break;
657 default:
658 break;
661 cpu->current_tb = NULL;
662 /* reset soft MMU for next block (it can currently
663 only be set by a memory fault) */
664 } /* for(;;) */
665 } else {
666 /* Reload env after longjmp - the compiler may have smashed all
667 * local variables as longjmp is marked 'noreturn'. */
668 env = cpu_single_env;
670 } /* for(;;) */
673 #if defined(TARGET_I386)
674 /* restore flags in standard format */
675 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
676 | (DF & DF_MASK);
677 #elif defined(TARGET_ARM)
678 /* XXX: Save/restore host fpu exception state?. */
679 #elif defined(TARGET_UNICORE32)
680 #elif defined(TARGET_SPARC)
681 #elif defined(TARGET_PPC)
682 #elif defined(TARGET_LM32)
683 #elif defined(TARGET_M68K)
684 cpu_m68k_flush_flags(env, env->cc_op);
685 env->cc_op = CC_OP_FLAGS;
686 env->sr = (env->sr & 0xffe0)
687 | env->cc_dest | (env->cc_x << 4);
688 #elif defined(TARGET_MICROBLAZE)
689 #elif defined(TARGET_MIPS)
690 #elif defined(TARGET_MOXIE)
691 #elif defined(TARGET_OPENRISC)
692 #elif defined(TARGET_SH4)
693 #elif defined(TARGET_ALPHA)
694 #elif defined(TARGET_CRIS)
695 #elif defined(TARGET_S390X)
696 #elif defined(TARGET_XTENSA)
697 /* XXXXX */
698 #else
699 #error unsupported target CPU
700 #endif
702 /* fail safe : never use cpu_single_env outside cpu_exec() */
703 cpu_single_env = NULL;
704 return ret;