qemu-kvm: Fix in-kernel PIT reset
[qemu/qemu-dev-zwu.git] / cpu-exec.c
blob34eb220f5059aa30fd7c2ded6c47fe89efa42792
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "qemu-barrier.h"
27 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
28 // Work around ugly bugs in glibc that mangle global register contents
29 #undef env
30 #define env cpu_single_env
31 #endif
33 int tb_invalidated_flag;
35 //#define CONFIG_DEBUG_EXEC
37 int qemu_cpu_has_work(CPUState *env)
39 return cpu_has_work(env);
42 void cpu_loop_exit(void)
44 env->current_tb = NULL;
45 longjmp(env->jmp_env, 1);
48 /* exit the current TB from a signal handler. The host registers are
49 restored in a state compatible with the CPU emulator
51 #if defined(CONFIG_SOFTMMU)
52 void cpu_resume_from_signal(CPUState *env1, void *puc)
54 env = env1;
56 /* XXX: restore cpu registers saved in host registers */
58 env->exception_index = -1;
59 longjmp(env->jmp_env, 1);
61 #endif
63 /* Execute the code without caching the generated code. An interpreter
64 could be used if available. */
65 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
67 unsigned long next_tb;
68 TranslationBlock *tb;
70 /* Should never happen.
71 We only end up here when an existing TB is too long. */
72 if (max_cycles > CF_COUNT_MASK)
73 max_cycles = CF_COUNT_MASK;
75 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
76 max_cycles);
77 env->current_tb = tb;
78 /* execute the generated code */
79 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
80 env->current_tb = NULL;
82 if ((next_tb & 3) == 2) {
83 /* Restore PC. This may happen if async event occurs before
84 the TB starts executing. */
85 cpu_pc_from_tb(env, tb);
87 tb_phys_invalidate(tb, -1);
88 tb_free(tb);
91 static TranslationBlock *tb_find_slow(target_ulong pc,
92 target_ulong cs_base,
93 uint64_t flags)
95 TranslationBlock *tb, **ptb1;
96 unsigned int h;
97 tb_page_addr_t phys_pc, phys_page1, phys_page2;
98 target_ulong virt_page2;
100 tb_invalidated_flag = 0;
102 /* find translated block using physical mappings */
103 phys_pc = get_page_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
112 if (tb->pc == pc &&
113 tb->page_addr[0] == phys_page1 &&
114 tb->cs_base == cs_base &&
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
118 virt_page2 = (pc & TARGET_PAGE_MASK) +
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_page_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
127 ptb1 = &tb->phys_hash_next;
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_gen_code(env, pc, cs_base, flags, 0);
133 found:
134 /* Move the last found TB to the head of the list */
135 if (likely(*ptb1)) {
136 *ptb1 = tb->phys_hash_next;
137 tb->phys_hash_next = tb_phys_hash[h];
138 tb_phys_hash[h] = tb;
140 /* we add the TB in the virtual pc hash table */
141 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
142 return tb;
145 static inline TranslationBlock *tb_find_fast(void)
147 TranslationBlock *tb;
148 target_ulong cs_base, pc;
149 int flags;
151 /* we record a subset of the CPU state. It will
152 always be the same before a given translated block
153 is executed. */
154 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
155 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
156 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
157 tb->flags != flags)) {
158 tb = tb_find_slow(pc, cs_base, flags);
160 return tb;
163 static CPUDebugExcpHandler *debug_excp_handler;
165 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
167 CPUDebugExcpHandler *old_handler = debug_excp_handler;
169 debug_excp_handler = handler;
170 return old_handler;
173 static void cpu_handle_debug_exception(CPUState *env)
175 CPUWatchpoint *wp;
177 if (!env->watchpoint_hit) {
178 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
179 wp->flags &= ~BP_WATCHPOINT_HIT;
182 if (debug_excp_handler) {
183 debug_excp_handler(env);
187 /* main execution loop */
189 volatile sig_atomic_t exit_request;
191 int cpu_exec(CPUState *env1)
193 volatile host_reg_t saved_env_reg;
194 int ret, interrupt_request;
195 TranslationBlock *tb;
196 uint8_t *tc_ptr;
197 unsigned long next_tb;
199 if (env1->halted) {
200 if (!cpu_has_work(env1)) {
201 return EXCP_HALTED;
204 env1->halted = 0;
207 cpu_single_env = env1;
209 /* the access to env below is actually saving the global register's
210 value, so that files not including target-xyz/exec.h are free to
211 use it. */
212 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
213 saved_env_reg = (host_reg_t) env;
214 barrier();
215 env = env1;
217 if (unlikely(exit_request)) {
218 env->exit_request = 1;
221 #if defined(TARGET_I386)
222 /* put eflags in CPU temporary format */
223 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
224 DF = 1 - (2 * ((env->eflags >> 10) & 1));
225 CC_OP = CC_OP_EFLAGS;
226 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
227 #elif defined(TARGET_SPARC)
228 #elif defined(TARGET_M68K)
229 env->cc_op = CC_OP_FLAGS;
230 env->cc_dest = env->sr & 0xf;
231 env->cc_x = (env->sr >> 4) & 1;
232 #elif defined(TARGET_ALPHA)
233 #elif defined(TARGET_ARM)
234 #elif defined(TARGET_UNICORE32)
235 #elif defined(TARGET_PPC)
236 #elif defined(TARGET_LM32)
237 #elif defined(TARGET_MICROBLAZE)
238 #elif defined(TARGET_MIPS)
239 #elif defined(TARGET_SH4)
240 #elif defined(TARGET_CRIS)
241 #elif defined(TARGET_S390X)
242 #elif defined(TARGET_IA64)
243 /* XXXXX */
244 #else
245 #error unsupported target CPU
246 #endif
247 env->exception_index = -1;
249 /* prepare setjmp context for exception handling */
250 for(;;) {
251 if (setjmp(env->jmp_env) == 0) {
252 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
253 #undef env
254 env = cpu_single_env;
255 #define env cpu_single_env
256 #endif
257 /* if an exception is pending, we execute it here */
258 if (env->exception_index >= 0) {
259 if (env->exception_index >= EXCP_INTERRUPT) {
260 /* exit request from the cpu execution loop */
261 ret = env->exception_index;
262 if (ret == EXCP_DEBUG) {
263 cpu_handle_debug_exception(env);
265 break;
266 } else {
267 #if defined(CONFIG_USER_ONLY)
268 /* if user mode only, we simulate a fake exception
269 which will be handled outside the cpu execution
270 loop */
271 #if defined(TARGET_I386)
272 do_interrupt_user(env->exception_index,
273 env->exception_is_int,
274 env->error_code,
275 env->exception_next_eip);
276 /* successfully delivered */
277 env->old_exception = -1;
278 #endif
279 ret = env->exception_index;
280 break;
281 #else
282 #if defined(TARGET_I386)
283 /* simulate a real cpu exception. On i386, it can
284 trigger new exceptions, but we do not handle
285 double or triple faults yet. */
286 do_interrupt(env->exception_index,
287 env->exception_is_int,
288 env->error_code,
289 env->exception_next_eip, 0);
290 /* successfully delivered */
291 env->old_exception = -1;
292 #elif defined(TARGET_PPC)
293 do_interrupt(env);
294 #elif defined(TARGET_LM32)
295 do_interrupt(env);
296 #elif defined(TARGET_MICROBLAZE)
297 do_interrupt(env);
298 #elif defined(TARGET_MIPS)
299 do_interrupt(env);
300 #elif defined(TARGET_SPARC)
301 do_interrupt(env);
302 #elif defined(TARGET_ARM)
303 do_interrupt(env);
304 #elif defined(TARGET_UNICORE32)
305 do_interrupt(env);
306 #elif defined(TARGET_SH4)
307 do_interrupt(env);
308 #elif defined(TARGET_ALPHA)
309 do_interrupt(env);
310 #elif defined(TARGET_CRIS)
311 do_interrupt(env);
312 #elif defined(TARGET_M68K)
313 do_interrupt(0);
314 #elif defined(TARGET_IA64)
315 do_interrupt(env);
316 #elif defined(TARGET_S390X)
317 do_interrupt(env);
318 #endif
319 env->exception_index = -1;
320 #endif
324 next_tb = 0; /* force lookup of first TB */
325 for(;;) {
326 interrupt_request = env->interrupt_request;
327 if (unlikely(interrupt_request)) {
328 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
329 /* Mask out external interrupts for this step. */
330 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
332 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
333 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
334 env->exception_index = EXCP_DEBUG;
335 cpu_loop_exit();
337 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
338 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
339 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
340 if (interrupt_request & CPU_INTERRUPT_HALT) {
341 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
342 env->halted = 1;
343 env->exception_index = EXCP_HLT;
344 cpu_loop_exit();
346 #endif
347 #if defined(TARGET_I386)
348 if (interrupt_request & CPU_INTERRUPT_INIT) {
349 svm_check_intercept(SVM_EXIT_INIT);
350 do_cpu_init(env);
351 env->exception_index = EXCP_HALTED;
352 cpu_loop_exit();
353 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
354 do_cpu_sipi(env);
355 } else if (env->hflags2 & HF2_GIF_MASK) {
356 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
357 !(env->hflags & HF_SMM_MASK)) {
358 svm_check_intercept(SVM_EXIT_SMI);
359 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
360 do_smm_enter();
361 next_tb = 0;
362 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
363 !(env->hflags2 & HF2_NMI_MASK)) {
364 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
365 env->hflags2 |= HF2_NMI_MASK;
366 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
367 next_tb = 0;
368 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
369 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
370 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
371 next_tb = 0;
372 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
373 (((env->hflags2 & HF2_VINTR_MASK) &&
374 (env->hflags2 & HF2_HIF_MASK)) ||
375 (!(env->hflags2 & HF2_VINTR_MASK) &&
376 (env->eflags & IF_MASK &&
377 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
378 int intno;
379 svm_check_intercept(SVM_EXIT_INTR);
380 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
381 intno = cpu_get_pic_interrupt(env);
382 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
383 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
384 #undef env
385 env = cpu_single_env;
386 #define env cpu_single_env
387 #endif
388 do_interrupt(intno, 0, 0, 0, 1);
389 /* ensure that no TB jump will be modified as
390 the program flow was changed */
391 next_tb = 0;
392 #if !defined(CONFIG_USER_ONLY)
393 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
394 (env->eflags & IF_MASK) &&
395 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
396 int intno;
397 /* FIXME: this should respect TPR */
398 svm_check_intercept(SVM_EXIT_VINTR);
399 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
400 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
401 do_interrupt(intno, 0, 0, 0, 1);
402 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
403 next_tb = 0;
404 #endif
407 #elif defined(TARGET_PPC)
408 #if 0
409 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
410 cpu_reset(env);
412 #endif
413 if (interrupt_request & CPU_INTERRUPT_HARD) {
414 ppc_hw_interrupt(env);
415 if (env->pending_interrupts == 0)
416 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
417 next_tb = 0;
419 #elif defined(TARGET_LM32)
420 if ((interrupt_request & CPU_INTERRUPT_HARD)
421 && (env->ie & IE_IE)) {
422 env->exception_index = EXCP_IRQ;
423 do_interrupt(env);
424 next_tb = 0;
426 #elif defined(TARGET_MICROBLAZE)
427 if ((interrupt_request & CPU_INTERRUPT_HARD)
428 && (env->sregs[SR_MSR] & MSR_IE)
429 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
430 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
431 env->exception_index = EXCP_IRQ;
432 do_interrupt(env);
433 next_tb = 0;
435 #elif defined(TARGET_MIPS)
436 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
437 cpu_mips_hw_interrupts_pending(env)) {
438 /* Raise it */
439 env->exception_index = EXCP_EXT_INTERRUPT;
440 env->error_code = 0;
441 do_interrupt(env);
442 next_tb = 0;
444 #elif defined(TARGET_SPARC)
445 if (interrupt_request & CPU_INTERRUPT_HARD) {
446 if (cpu_interrupts_enabled(env) &&
447 env->interrupt_index > 0) {
448 int pil = env->interrupt_index & 0xf;
449 int type = env->interrupt_index & 0xf0;
451 if (((type == TT_EXTINT) &&
452 cpu_pil_allowed(env, pil)) ||
453 type != TT_EXTINT) {
454 env->exception_index = env->interrupt_index;
455 do_interrupt(env);
456 next_tb = 0;
460 #elif defined(TARGET_ARM)
461 if (interrupt_request & CPU_INTERRUPT_FIQ
462 && !(env->uncached_cpsr & CPSR_F)) {
463 env->exception_index = EXCP_FIQ;
464 do_interrupt(env);
465 next_tb = 0;
467 /* ARMv7-M interrupt return works by loading a magic value
468 into the PC. On real hardware the load causes the
469 return to occur. The qemu implementation performs the
470 jump normally, then does the exception return when the
471 CPU tries to execute code at the magic address.
472 This will cause the magic PC value to be pushed to
473 the stack if an interrupt occurred at the wrong time.
474 We avoid this by disabling interrupts when
475 pc contains a magic address. */
476 if (interrupt_request & CPU_INTERRUPT_HARD
477 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
478 || !(env->uncached_cpsr & CPSR_I))) {
479 env->exception_index = EXCP_IRQ;
480 do_interrupt(env);
481 next_tb = 0;
483 #elif defined(TARGET_UNICORE32)
484 if (interrupt_request & CPU_INTERRUPT_HARD
485 && !(env->uncached_asr & ASR_I)) {
486 do_interrupt(env);
487 next_tb = 0;
489 #elif defined(TARGET_SH4)
490 if (interrupt_request & CPU_INTERRUPT_HARD) {
491 do_interrupt(env);
492 next_tb = 0;
494 #elif defined(TARGET_ALPHA)
496 int idx = -1;
497 /* ??? This hard-codes the OSF/1 interrupt levels. */
498 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
499 case 0 ... 3:
500 if (interrupt_request & CPU_INTERRUPT_HARD) {
501 idx = EXCP_DEV_INTERRUPT;
503 /* FALLTHRU */
504 case 4:
505 if (interrupt_request & CPU_INTERRUPT_TIMER) {
506 idx = EXCP_CLK_INTERRUPT;
508 /* FALLTHRU */
509 case 5:
510 if (interrupt_request & CPU_INTERRUPT_SMP) {
511 idx = EXCP_SMP_INTERRUPT;
513 /* FALLTHRU */
514 case 6:
515 if (interrupt_request & CPU_INTERRUPT_MCHK) {
516 idx = EXCP_MCHK;
519 if (idx >= 0) {
520 env->exception_index = idx;
521 env->error_code = 0;
522 do_interrupt(env);
523 next_tb = 0;
526 #elif defined(TARGET_CRIS)
527 if (interrupt_request & CPU_INTERRUPT_HARD
528 && (env->pregs[PR_CCS] & I_FLAG)
529 && !env->locked_irq) {
530 env->exception_index = EXCP_IRQ;
531 do_interrupt(env);
532 next_tb = 0;
534 if (interrupt_request & CPU_INTERRUPT_NMI
535 && (env->pregs[PR_CCS] & M_FLAG)) {
536 env->exception_index = EXCP_NMI;
537 do_interrupt(env);
538 next_tb = 0;
540 #elif defined(TARGET_M68K)
541 if (interrupt_request & CPU_INTERRUPT_HARD
542 && ((env->sr & SR_I) >> SR_I_SHIFT)
543 < env->pending_level) {
544 /* Real hardware gets the interrupt vector via an
545 IACK cycle at this point. Current emulated
546 hardware doesn't rely on this, so we
547 provide/save the vector when the interrupt is
548 first signalled. */
549 env->exception_index = env->pending_vector;
550 do_interrupt(1);
551 next_tb = 0;
553 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
554 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
555 (env->psw.mask & PSW_MASK_EXT)) {
556 do_interrupt(env);
557 next_tb = 0;
559 #endif
560 /* Don't use the cached interrupt_request value,
561 do_interrupt may have updated the EXITTB flag. */
562 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
563 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
564 /* ensure that no TB jump will be modified as
565 the program flow was changed */
566 next_tb = 0;
569 if (unlikely(env->exit_request)) {
570 env->exit_request = 0;
571 env->exception_index = EXCP_INTERRUPT;
572 cpu_loop_exit();
574 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
575 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
576 /* restore flags in standard format */
577 #if defined(TARGET_I386)
578 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
579 log_cpu_state(env, X86_DUMP_CCOP);
580 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
581 #elif defined(TARGET_M68K)
582 cpu_m68k_flush_flags(env, env->cc_op);
583 env->cc_op = CC_OP_FLAGS;
584 env->sr = (env->sr & 0xffe0)
585 | env->cc_dest | (env->cc_x << 4);
586 log_cpu_state(env, 0);
587 #else
588 log_cpu_state(env, 0);
589 #endif
591 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
592 spin_lock(&tb_lock);
593 tb = tb_find_fast();
594 /* Note: we do it here to avoid a gcc bug on Mac OS X when
595 doing it in tb_find_slow */
596 if (tb_invalidated_flag) {
597 /* as some TB could have been invalidated because
598 of memory exceptions while generating the code, we
599 must recompute the hash index here */
600 next_tb = 0;
601 tb_invalidated_flag = 0;
603 #ifdef CONFIG_DEBUG_EXEC
604 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
605 (long)tb->tc_ptr, tb->pc,
606 lookup_symbol(tb->pc));
607 #endif
608 /* see if we can patch the calling TB. When the TB
609 spans two pages, we cannot safely do a direct
610 jump. */
611 if (next_tb != 0 && tb->page_addr[1] == -1) {
612 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
614 spin_unlock(&tb_lock);
616 /* cpu_interrupt might be called while translating the
617 TB, but before it is linked into a potentially
618 infinite loop and becomes env->current_tb. Avoid
619 starting execution if there is a pending interrupt. */
620 env->current_tb = tb;
621 barrier();
622 if (likely(!env->exit_request)) {
623 tc_ptr = tb->tc_ptr;
624 /* execute the generated code */
625 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
626 #undef env
627 env = cpu_single_env;
628 #define env cpu_single_env
629 #endif
630 next_tb = tcg_qemu_tb_exec(tc_ptr);
631 if ((next_tb & 3) == 2) {
632 /* Instruction counter expired. */
633 int insns_left;
634 tb = (TranslationBlock *)(long)(next_tb & ~3);
635 /* Restore PC. */
636 cpu_pc_from_tb(env, tb);
637 insns_left = env->icount_decr.u32;
638 if (env->icount_extra && insns_left >= 0) {
639 /* Refill decrementer and continue execution. */
640 env->icount_extra += insns_left;
641 if (env->icount_extra > 0xffff) {
642 insns_left = 0xffff;
643 } else {
644 insns_left = env->icount_extra;
646 env->icount_extra -= insns_left;
647 env->icount_decr.u16.low = insns_left;
648 } else {
649 if (insns_left > 0) {
650 /* Execute remaining instructions. */
651 cpu_exec_nocache(insns_left, tb);
653 env->exception_index = EXCP_INTERRUPT;
654 next_tb = 0;
655 cpu_loop_exit();
659 env->current_tb = NULL;
660 /* reset soft MMU for next block (it can currently
661 only be set by a memory fault) */
662 } /* for(;;) */
664 } /* for(;;) */
667 #if defined(TARGET_I386)
668 /* restore flags in standard format */
669 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
670 #elif defined(TARGET_ARM)
671 /* XXX: Save/restore host fpu exception state?. */
672 #elif defined(TARGET_UNICORE32)
673 #elif defined(TARGET_SPARC)
674 #elif defined(TARGET_PPC)
675 #elif defined(TARGET_LM32)
676 #elif defined(TARGET_M68K)
677 cpu_m68k_flush_flags(env, env->cc_op);
678 env->cc_op = CC_OP_FLAGS;
679 env->sr = (env->sr & 0xffe0)
680 | env->cc_dest | (env->cc_x << 4);
681 #elif defined(TARGET_MICROBLAZE)
682 #elif defined(TARGET_MIPS)
683 #elif defined(TARGET_SH4)
684 #elif defined(TARGET_IA64)
685 #elif defined(TARGET_ALPHA)
686 #elif defined(TARGET_CRIS)
687 #elif defined(TARGET_S390X)
688 /* XXXXX */
689 #else
690 #error unsupported target CPU
691 #endif
693 /* restore global registers */
694 barrier();
695 env = (void *) saved_env_reg;
697 /* fail safe : never use cpu_single_env outside cpu_exec() */
698 cpu_single_env = NULL;
699 return ret;