usb-linux: allow "compatible" high speed devices to connect at fullspeed
[qemu.git] / cpu-exec.c
blob7aa1d004e8411a8d6c1f6fb2fc5f558f2743921d
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
25 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
26 // Work around ugly bugs in glibc that mangle global register contents
27 #undef env
28 #define env cpu_single_env
29 #endif
31 int tb_invalidated_flag;
33 //#define CONFIG_DEBUG_EXEC
35 int qemu_cpu_has_work(CPUState *env)
37 return cpu_has_work(env);
40 void cpu_loop_exit(void)
42 env->current_tb = NULL;
43 longjmp(env->jmp_env, 1);
46 /* exit the current TB from a signal handler. The host registers are
47 restored in a state compatible with the CPU emulator
49 #if defined(CONFIG_SOFTMMU)
50 void cpu_resume_from_signal(CPUState *env1, void *puc)
52 env = env1;
54 /* XXX: restore cpu registers saved in host registers */
56 env->exception_index = -1;
57 longjmp(env->jmp_env, 1);
59 #endif
61 /* Execute the code without caching the generated code. An interpreter
62 could be used if available. */
63 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
65 unsigned long next_tb;
66 TranslationBlock *tb;
68 /* Should never happen.
69 We only end up here when an existing TB is too long. */
70 if (max_cycles > CF_COUNT_MASK)
71 max_cycles = CF_COUNT_MASK;
73 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
74 max_cycles);
75 env->current_tb = tb;
76 /* execute the generated code */
77 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
78 env->current_tb = NULL;
80 if ((next_tb & 3) == 2) {
81 /* Restore PC. This may happen if async event occurs before
82 the TB starts executing. */
83 cpu_pc_from_tb(env, tb);
85 tb_phys_invalidate(tb, -1);
86 tb_free(tb);
89 static TranslationBlock *tb_find_slow(target_ulong pc,
90 target_ulong cs_base,
91 uint64_t flags)
93 TranslationBlock *tb, **ptb1;
94 unsigned int h;
95 tb_page_addr_t phys_pc, phys_page1, phys_page2;
96 target_ulong virt_page2;
98 tb_invalidated_flag = 0;
100 /* find translated block using physical mappings */
101 phys_pc = get_page_addr_code(env, pc);
102 phys_page1 = phys_pc & TARGET_PAGE_MASK;
103 phys_page2 = -1;
104 h = tb_phys_hash_func(phys_pc);
105 ptb1 = &tb_phys_hash[h];
106 for(;;) {
107 tb = *ptb1;
108 if (!tb)
109 goto not_found;
110 if (tb->pc == pc &&
111 tb->page_addr[0] == phys_page1 &&
112 tb->cs_base == cs_base &&
113 tb->flags == flags) {
114 /* check next page if needed */
115 if (tb->page_addr[1] != -1) {
116 virt_page2 = (pc & TARGET_PAGE_MASK) +
117 TARGET_PAGE_SIZE;
118 phys_page2 = get_page_addr_code(env, virt_page2);
119 if (tb->page_addr[1] == phys_page2)
120 goto found;
121 } else {
122 goto found;
125 ptb1 = &tb->phys_hash_next;
127 not_found:
128 /* if no translated code available, then translate it now */
129 tb = tb_gen_code(env, pc, cs_base, flags, 0);
131 found:
132 /* Move the last found TB to the head of the list */
133 if (likely(*ptb1)) {
134 *ptb1 = tb->phys_hash_next;
135 tb->phys_hash_next = tb_phys_hash[h];
136 tb_phys_hash[h] = tb;
138 /* we add the TB in the virtual pc hash table */
139 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
140 return tb;
143 static inline TranslationBlock *tb_find_fast(void)
145 TranslationBlock *tb;
146 target_ulong cs_base, pc;
147 int flags;
149 /* we record a subset of the CPU state. It will
150 always be the same before a given translated block
151 is executed. */
152 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
153 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
154 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
155 tb->flags != flags)) {
156 tb = tb_find_slow(pc, cs_base, flags);
158 return tb;
161 static CPUDebugExcpHandler *debug_excp_handler;
163 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
165 CPUDebugExcpHandler *old_handler = debug_excp_handler;
167 debug_excp_handler = handler;
168 return old_handler;
171 static void cpu_handle_debug_exception(CPUState *env)
173 CPUWatchpoint *wp;
175 if (!env->watchpoint_hit) {
176 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
177 wp->flags &= ~BP_WATCHPOINT_HIT;
180 if (debug_excp_handler) {
181 debug_excp_handler(env);
185 /* main execution loop */
187 volatile sig_atomic_t exit_request;
189 int cpu_exec(CPUState *env1)
191 volatile host_reg_t saved_env_reg;
192 int ret, interrupt_request;
193 TranslationBlock *tb;
194 uint8_t *tc_ptr;
195 unsigned long next_tb;
197 if (env1->halted) {
198 if (!cpu_has_work(env1)) {
199 return EXCP_HALTED;
202 env1->halted = 0;
205 cpu_single_env = env1;
207 /* the access to env below is actually saving the global register's
208 value, so that files not including target-xyz/exec.h are free to
209 use it. */
210 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
211 saved_env_reg = (host_reg_t) env;
212 barrier();
213 env = env1;
215 if (unlikely(exit_request)) {
216 env->exit_request = 1;
219 #if defined(TARGET_I386)
220 /* put eflags in CPU temporary format */
221 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
222 DF = 1 - (2 * ((env->eflags >> 10) & 1));
223 CC_OP = CC_OP_EFLAGS;
224 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
225 #elif defined(TARGET_SPARC)
226 #elif defined(TARGET_M68K)
227 env->cc_op = CC_OP_FLAGS;
228 env->cc_dest = env->sr & 0xf;
229 env->cc_x = (env->sr >> 4) & 1;
230 #elif defined(TARGET_ALPHA)
231 #elif defined(TARGET_ARM)
232 #elif defined(TARGET_UNICORE32)
233 #elif defined(TARGET_PPC)
234 #elif defined(TARGET_LM32)
235 #elif defined(TARGET_MICROBLAZE)
236 #elif defined(TARGET_MIPS)
237 #elif defined(TARGET_SH4)
238 #elif defined(TARGET_CRIS)
239 #elif defined(TARGET_S390X)
240 /* XXXXX */
241 #else
242 #error unsupported target CPU
243 #endif
244 env->exception_index = -1;
246 /* prepare setjmp context for exception handling */
247 for(;;) {
248 if (setjmp(env->jmp_env) == 0) {
249 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
250 #undef env
251 env = cpu_single_env;
252 #define env cpu_single_env
253 #endif
254 /* if an exception is pending, we execute it here */
255 if (env->exception_index >= 0) {
256 if (env->exception_index >= EXCP_INTERRUPT) {
257 /* exit request from the cpu execution loop */
258 ret = env->exception_index;
259 if (ret == EXCP_DEBUG) {
260 cpu_handle_debug_exception(env);
262 break;
263 } else {
264 #if defined(CONFIG_USER_ONLY)
265 /* if user mode only, we simulate a fake exception
266 which will be handled outside the cpu execution
267 loop */
268 #if defined(TARGET_I386)
269 do_interrupt_user(env->exception_index,
270 env->exception_is_int,
271 env->error_code,
272 env->exception_next_eip);
273 /* successfully delivered */
274 env->old_exception = -1;
275 #endif
276 ret = env->exception_index;
277 break;
278 #else
279 #if defined(TARGET_I386)
280 /* simulate a real cpu exception. On i386, it can
281 trigger new exceptions, but we do not handle
282 double or triple faults yet. */
283 do_interrupt(env->exception_index,
284 env->exception_is_int,
285 env->error_code,
286 env->exception_next_eip, 0);
287 /* successfully delivered */
288 env->old_exception = -1;
289 #elif defined(TARGET_PPC)
290 do_interrupt(env);
291 #elif defined(TARGET_LM32)
292 do_interrupt(env);
293 #elif defined(TARGET_MICROBLAZE)
294 do_interrupt(env);
295 #elif defined(TARGET_MIPS)
296 do_interrupt(env);
297 #elif defined(TARGET_SPARC)
298 do_interrupt(env);
299 #elif defined(TARGET_ARM)
300 do_interrupt(env);
301 #elif defined(TARGET_UNICORE32)
302 do_interrupt(env);
303 #elif defined(TARGET_SH4)
304 do_interrupt(env);
305 #elif defined(TARGET_ALPHA)
306 do_interrupt(env);
307 #elif defined(TARGET_CRIS)
308 do_interrupt(env);
309 #elif defined(TARGET_M68K)
310 do_interrupt(0);
311 #elif defined(TARGET_S390X)
312 do_interrupt(env);
313 #endif
314 env->exception_index = -1;
315 #endif
319 next_tb = 0; /* force lookup of first TB */
320 for(;;) {
321 interrupt_request = env->interrupt_request;
322 if (unlikely(interrupt_request)) {
323 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
324 /* Mask out external interrupts for this step. */
325 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
327 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
328 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
329 env->exception_index = EXCP_DEBUG;
330 cpu_loop_exit();
332 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
333 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
334 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
335 if (interrupt_request & CPU_INTERRUPT_HALT) {
336 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
337 env->halted = 1;
338 env->exception_index = EXCP_HLT;
339 cpu_loop_exit();
341 #endif
342 #if defined(TARGET_I386)
343 if (interrupt_request & CPU_INTERRUPT_INIT) {
344 svm_check_intercept(SVM_EXIT_INIT);
345 do_cpu_init(env);
346 env->exception_index = EXCP_HALTED;
347 cpu_loop_exit();
348 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
349 do_cpu_sipi(env);
350 } else if (env->hflags2 & HF2_GIF_MASK) {
351 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
352 !(env->hflags & HF_SMM_MASK)) {
353 svm_check_intercept(SVM_EXIT_SMI);
354 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
355 do_smm_enter();
356 next_tb = 0;
357 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
358 !(env->hflags2 & HF2_NMI_MASK)) {
359 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
360 env->hflags2 |= HF2_NMI_MASK;
361 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
362 next_tb = 0;
363 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
364 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
365 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
366 next_tb = 0;
367 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
368 (((env->hflags2 & HF2_VINTR_MASK) &&
369 (env->hflags2 & HF2_HIF_MASK)) ||
370 (!(env->hflags2 & HF2_VINTR_MASK) &&
371 (env->eflags & IF_MASK &&
372 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
373 int intno;
374 svm_check_intercept(SVM_EXIT_INTR);
375 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
376 intno = cpu_get_pic_interrupt(env);
377 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
378 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
379 #undef env
380 env = cpu_single_env;
381 #define env cpu_single_env
382 #endif
383 do_interrupt(intno, 0, 0, 0, 1);
384 /* ensure that no TB jump will be modified as
385 the program flow was changed */
386 next_tb = 0;
387 #if !defined(CONFIG_USER_ONLY)
388 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
389 (env->eflags & IF_MASK) &&
390 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
391 int intno;
392 /* FIXME: this should respect TPR */
393 svm_check_intercept(SVM_EXIT_VINTR);
394 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
395 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
396 do_interrupt(intno, 0, 0, 0, 1);
397 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
398 next_tb = 0;
399 #endif
402 #elif defined(TARGET_PPC)
403 #if 0
404 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
405 cpu_reset(env);
407 #endif
408 if (interrupt_request & CPU_INTERRUPT_HARD) {
409 ppc_hw_interrupt(env);
410 if (env->pending_interrupts == 0)
411 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
412 next_tb = 0;
414 #elif defined(TARGET_LM32)
415 if ((interrupt_request & CPU_INTERRUPT_HARD)
416 && (env->ie & IE_IE)) {
417 env->exception_index = EXCP_IRQ;
418 do_interrupt(env);
419 next_tb = 0;
421 #elif defined(TARGET_MICROBLAZE)
422 if ((interrupt_request & CPU_INTERRUPT_HARD)
423 && (env->sregs[SR_MSR] & MSR_IE)
424 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
425 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
426 env->exception_index = EXCP_IRQ;
427 do_interrupt(env);
428 next_tb = 0;
430 #elif defined(TARGET_MIPS)
431 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
432 cpu_mips_hw_interrupts_pending(env)) {
433 /* Raise it */
434 env->exception_index = EXCP_EXT_INTERRUPT;
435 env->error_code = 0;
436 do_interrupt(env);
437 next_tb = 0;
439 #elif defined(TARGET_SPARC)
440 if (interrupt_request & CPU_INTERRUPT_HARD) {
441 if (cpu_interrupts_enabled(env) &&
442 env->interrupt_index > 0) {
443 int pil = env->interrupt_index & 0xf;
444 int type = env->interrupt_index & 0xf0;
446 if (((type == TT_EXTINT) &&
447 cpu_pil_allowed(env, pil)) ||
448 type != TT_EXTINT) {
449 env->exception_index = env->interrupt_index;
450 do_interrupt(env);
451 next_tb = 0;
455 #elif defined(TARGET_ARM)
456 if (interrupt_request & CPU_INTERRUPT_FIQ
457 && !(env->uncached_cpsr & CPSR_F)) {
458 env->exception_index = EXCP_FIQ;
459 do_interrupt(env);
460 next_tb = 0;
462 /* ARMv7-M interrupt return works by loading a magic value
463 into the PC. On real hardware the load causes the
464 return to occur. The qemu implementation performs the
465 jump normally, then does the exception return when the
466 CPU tries to execute code at the magic address.
467 This will cause the magic PC value to be pushed to
468 the stack if an interrupt occurred at the wrong time.
469 We avoid this by disabling interrupts when
470 pc contains a magic address. */
471 if (interrupt_request & CPU_INTERRUPT_HARD
472 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
473 || !(env->uncached_cpsr & CPSR_I))) {
474 env->exception_index = EXCP_IRQ;
475 do_interrupt(env);
476 next_tb = 0;
478 #elif defined(TARGET_UNICORE32)
479 if (interrupt_request & CPU_INTERRUPT_HARD
480 && !(env->uncached_asr & ASR_I)) {
481 do_interrupt(env);
482 next_tb = 0;
484 #elif defined(TARGET_SH4)
485 if (interrupt_request & CPU_INTERRUPT_HARD) {
486 do_interrupt(env);
487 next_tb = 0;
489 #elif defined(TARGET_ALPHA)
491 int idx = -1;
492 /* ??? This hard-codes the OSF/1 interrupt levels. */
493 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
494 case 0 ... 3:
495 if (interrupt_request & CPU_INTERRUPT_HARD) {
496 idx = EXCP_DEV_INTERRUPT;
498 /* FALLTHRU */
499 case 4:
500 if (interrupt_request & CPU_INTERRUPT_TIMER) {
501 idx = EXCP_CLK_INTERRUPT;
503 /* FALLTHRU */
504 case 5:
505 if (interrupt_request & CPU_INTERRUPT_SMP) {
506 idx = EXCP_SMP_INTERRUPT;
508 /* FALLTHRU */
509 case 6:
510 if (interrupt_request & CPU_INTERRUPT_MCHK) {
511 idx = EXCP_MCHK;
514 if (idx >= 0) {
515 env->exception_index = idx;
516 env->error_code = 0;
517 do_interrupt(env);
518 next_tb = 0;
521 #elif defined(TARGET_CRIS)
522 if (interrupt_request & CPU_INTERRUPT_HARD
523 && (env->pregs[PR_CCS] & I_FLAG)
524 && !env->locked_irq) {
525 env->exception_index = EXCP_IRQ;
526 do_interrupt(env);
527 next_tb = 0;
529 if (interrupt_request & CPU_INTERRUPT_NMI
530 && (env->pregs[PR_CCS] & M_FLAG)) {
531 env->exception_index = EXCP_NMI;
532 do_interrupt(env);
533 next_tb = 0;
535 #elif defined(TARGET_M68K)
536 if (interrupt_request & CPU_INTERRUPT_HARD
537 && ((env->sr & SR_I) >> SR_I_SHIFT)
538 < env->pending_level) {
539 /* Real hardware gets the interrupt vector via an
540 IACK cycle at this point. Current emulated
541 hardware doesn't rely on this, so we
542 provide/save the vector when the interrupt is
543 first signalled. */
544 env->exception_index = env->pending_vector;
545 do_interrupt(1);
546 next_tb = 0;
548 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
549 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
550 (env->psw.mask & PSW_MASK_EXT)) {
551 do_interrupt(env);
552 next_tb = 0;
554 #endif
555 /* Don't use the cached interrupt_request value,
556 do_interrupt may have updated the EXITTB flag. */
557 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
558 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
559 /* ensure that no TB jump will be modified as
560 the program flow was changed */
561 next_tb = 0;
564 if (unlikely(env->exit_request)) {
565 env->exit_request = 0;
566 env->exception_index = EXCP_INTERRUPT;
567 cpu_loop_exit();
569 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
570 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
571 /* restore flags in standard format */
572 #if defined(TARGET_I386)
573 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
574 log_cpu_state(env, X86_DUMP_CCOP);
575 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
576 #elif defined(TARGET_M68K)
577 cpu_m68k_flush_flags(env, env->cc_op);
578 env->cc_op = CC_OP_FLAGS;
579 env->sr = (env->sr & 0xffe0)
580 | env->cc_dest | (env->cc_x << 4);
581 log_cpu_state(env, 0);
582 #else
583 log_cpu_state(env, 0);
584 #endif
586 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
587 spin_lock(&tb_lock);
588 tb = tb_find_fast();
589 /* Note: we do it here to avoid a gcc bug on Mac OS X when
590 doing it in tb_find_slow */
591 if (tb_invalidated_flag) {
592 /* as some TB could have been invalidated because
593 of memory exceptions while generating the code, we
594 must recompute the hash index here */
595 next_tb = 0;
596 tb_invalidated_flag = 0;
598 #ifdef CONFIG_DEBUG_EXEC
599 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
600 (long)tb->tc_ptr, tb->pc,
601 lookup_symbol(tb->pc));
602 #endif
603 /* see if we can patch the calling TB. When the TB
604 spans two pages, we cannot safely do a direct
605 jump. */
606 if (next_tb != 0 && tb->page_addr[1] == -1) {
607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
609 spin_unlock(&tb_lock);
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
615 env->current_tb = tb;
616 barrier();
617 if (likely(!env->exit_request)) {
618 tc_ptr = tb->tc_ptr;
619 /* execute the generated code */
620 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
621 #undef env
622 env = cpu_single_env;
623 #define env cpu_single_env
624 #endif
625 next_tb = tcg_qemu_tb_exec(tc_ptr);
626 if ((next_tb & 3) == 2) {
627 /* Instruction counter expired. */
628 int insns_left;
629 tb = (TranslationBlock *)(long)(next_tb & ~3);
630 /* Restore PC. */
631 cpu_pc_from_tb(env, tb);
632 insns_left = env->icount_decr.u32;
633 if (env->icount_extra && insns_left >= 0) {
634 /* Refill decrementer and continue execution. */
635 env->icount_extra += insns_left;
636 if (env->icount_extra > 0xffff) {
637 insns_left = 0xffff;
638 } else {
639 insns_left = env->icount_extra;
641 env->icount_extra -= insns_left;
642 env->icount_decr.u16.low = insns_left;
643 } else {
644 if (insns_left > 0) {
645 /* Execute remaining instructions. */
646 cpu_exec_nocache(insns_left, tb);
648 env->exception_index = EXCP_INTERRUPT;
649 next_tb = 0;
650 cpu_loop_exit();
654 env->current_tb = NULL;
655 /* reset soft MMU for next block (it can currently
656 only be set by a memory fault) */
657 } /* for(;;) */
659 } /* for(;;) */
662 #if defined(TARGET_I386)
663 /* restore flags in standard format */
664 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
665 #elif defined(TARGET_ARM)
666 /* XXX: Save/restore host fpu exception state?. */
667 #elif defined(TARGET_UNICORE32)
668 #elif defined(TARGET_SPARC)
669 #elif defined(TARGET_PPC)
670 #elif defined(TARGET_LM32)
671 #elif defined(TARGET_M68K)
672 cpu_m68k_flush_flags(env, env->cc_op);
673 env->cc_op = CC_OP_FLAGS;
674 env->sr = (env->sr & 0xffe0)
675 | env->cc_dest | (env->cc_x << 4);
676 #elif defined(TARGET_MICROBLAZE)
677 #elif defined(TARGET_MIPS)
678 #elif defined(TARGET_SH4)
679 #elif defined(TARGET_ALPHA)
680 #elif defined(TARGET_CRIS)
681 #elif defined(TARGET_S390X)
682 /* XXXXX */
683 #else
684 #error unsupported target CPU
685 #endif
687 /* restore global registers */
688 barrier();
689 env = (void *) saved_env_reg;
691 /* fail safe : never use cpu_single_env outside cpu_exec() */
692 cpu_single_env = NULL;
693 return ret;