target-i386: Use mulu2 and muls2
[qemu/pbrook.git] / cpu-exec.c
blobafbe4977ab18b8c3dce98e6d25d167fb37609ac7
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 //#define CONFIG_DEBUG_EXEC
28 bool qemu_cpu_has_work(CPUState *cpu)
30 return cpu_has_work(cpu);
33 void cpu_loop_exit(CPUArchState *env)
35 CPUState *cpu = ENV_GET_CPU(env);
37 cpu->current_tb = NULL;
38 siglongjmp(env->jmp_env, 1);
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
47 /* XXX: restore cpu registers saved in host registers */
49 env->exception_index = -1;
50 siglongjmp(env->jmp_env, 1);
52 #endif
54 /* Execute the code without caching the generated code. An interpreter
55 could be used if available. */
56 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
57 TranslationBlock *orig_tb)
59 CPUState *cpu = ENV_GET_CPU(env);
60 tcg_target_ulong next_tb;
61 TranslationBlock *tb;
63 /* Should never happen.
64 We only end up here when an existing TB is too long. */
65 if (max_cycles > CF_COUNT_MASK)
66 max_cycles = CF_COUNT_MASK;
68 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
69 max_cycles);
70 cpu->current_tb = tb;
71 /* execute the generated code */
72 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
73 cpu->current_tb = NULL;
75 if ((next_tb & 3) == 2) {
76 /* Restore PC. This may happen if async event occurs before
77 the TB starts executing. */
78 cpu_pc_from_tb(env, tb);
80 tb_phys_invalidate(tb, -1);
81 tb_free(tb);
84 static TranslationBlock *tb_find_slow(CPUArchState *env,
85 target_ulong pc,
86 target_ulong cs_base,
87 uint64_t flags)
89 TranslationBlock *tb, **ptb1;
90 unsigned int h;
91 tb_page_addr_t phys_pc, phys_page1;
92 target_ulong virt_page2;
94 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
96 /* find translated block using physical mappings */
97 phys_pc = get_page_addr_code(env, pc);
98 phys_page1 = phys_pc & TARGET_PAGE_MASK;
99 h = tb_phys_hash_func(phys_pc);
100 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
101 for(;;) {
102 tb = *ptb1;
103 if (!tb)
104 goto not_found;
105 if (tb->pc == pc &&
106 tb->page_addr[0] == phys_page1 &&
107 tb->cs_base == cs_base &&
108 tb->flags == flags) {
109 /* check next page if needed */
110 if (tb->page_addr[1] != -1) {
111 tb_page_addr_t phys_page2;
113 virt_page2 = (pc & TARGET_PAGE_MASK) +
114 TARGET_PAGE_SIZE;
115 phys_page2 = get_page_addr_code(env, virt_page2);
116 if (tb->page_addr[1] == phys_page2)
117 goto found;
118 } else {
119 goto found;
122 ptb1 = &tb->phys_hash_next;
124 not_found:
125 /* if no translated code available, then translate it now */
126 tb = tb_gen_code(env, pc, cs_base, flags, 0);
128 found:
129 /* Move the last found TB to the head of the list */
130 if (likely(*ptb1)) {
131 *ptb1 = tb->phys_hash_next;
132 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
133 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
135 /* we add the TB in the virtual pc hash table */
136 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
137 return tb;
140 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
142 TranslationBlock *tb;
143 target_ulong cs_base, pc;
144 int flags;
146 /* we record a subset of the CPU state. It will
147 always be the same before a given translated block
148 is executed. */
149 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
150 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
151 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
152 tb->flags != flags)) {
153 tb = tb_find_slow(env, pc, cs_base, flags);
155 return tb;
158 static CPUDebugExcpHandler *debug_excp_handler;
160 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
162 debug_excp_handler = handler;
165 static void cpu_handle_debug_exception(CPUArchState *env)
167 CPUWatchpoint *wp;
169 if (!env->watchpoint_hit) {
170 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
171 wp->flags &= ~BP_WATCHPOINT_HIT;
174 if (debug_excp_handler) {
175 debug_excp_handler(env);
179 /* main execution loop */
181 volatile sig_atomic_t exit_request;
183 int cpu_exec(CPUArchState *env)
185 CPUState *cpu = ENV_GET_CPU(env);
186 int ret, interrupt_request;
187 TranslationBlock *tb;
188 uint8_t *tc_ptr;
189 tcg_target_ulong next_tb;
191 if (env->halted) {
192 if (!cpu_has_work(cpu)) {
193 return EXCP_HALTED;
196 env->halted = 0;
199 cpu_single_env = env;
201 if (unlikely(exit_request)) {
202 cpu->exit_request = 1;
205 #if defined(TARGET_I386)
206 /* put eflags in CPU temporary format */
207 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208 DF = 1 - (2 * ((env->eflags >> 10) & 1));
209 CC_OP = CC_OP_EFLAGS;
210 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
211 #elif defined(TARGET_SPARC)
212 #elif defined(TARGET_M68K)
213 env->cc_op = CC_OP_FLAGS;
214 env->cc_dest = env->sr & 0xf;
215 env->cc_x = (env->sr >> 4) & 1;
216 #elif defined(TARGET_ALPHA)
217 #elif defined(TARGET_ARM)
218 #elif defined(TARGET_UNICORE32)
219 #elif defined(TARGET_PPC)
220 env->reserve_addr = -1;
221 #elif defined(TARGET_LM32)
222 #elif defined(TARGET_MICROBLAZE)
223 #elif defined(TARGET_MIPS)
224 #elif defined(TARGET_OPENRISC)
225 #elif defined(TARGET_SH4)
226 #elif defined(TARGET_CRIS)
227 #elif defined(TARGET_S390X)
228 #elif defined(TARGET_XTENSA)
229 /* XXXXX */
230 #else
231 #error unsupported target CPU
232 #endif
233 env->exception_index = -1;
235 /* prepare setjmp context for exception handling */
236 for(;;) {
237 if (sigsetjmp(env->jmp_env, 0) == 0) {
238 /* if an exception is pending, we execute it here */
239 if (env->exception_index >= 0) {
240 if (env->exception_index >= EXCP_INTERRUPT) {
241 /* exit request from the cpu execution loop */
242 ret = env->exception_index;
243 if (ret == EXCP_DEBUG) {
244 cpu_handle_debug_exception(env);
246 break;
247 } else {
248 #if defined(CONFIG_USER_ONLY)
249 /* if user mode only, we simulate a fake exception
250 which will be handled outside the cpu execution
251 loop */
252 #if defined(TARGET_I386)
253 do_interrupt(env);
254 #endif
255 ret = env->exception_index;
256 break;
257 #else
258 do_interrupt(env);
259 env->exception_index = -1;
260 #endif
264 next_tb = 0; /* force lookup of first TB */
265 for(;;) {
266 interrupt_request = env->interrupt_request;
267 if (unlikely(interrupt_request)) {
268 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
269 /* Mask out external interrupts for this step. */
270 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
272 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
273 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
274 env->exception_index = EXCP_DEBUG;
275 cpu_loop_exit(env);
277 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
278 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
279 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
280 if (interrupt_request & CPU_INTERRUPT_HALT) {
281 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
282 env->halted = 1;
283 env->exception_index = EXCP_HLT;
284 cpu_loop_exit(env);
286 #endif
287 #if defined(TARGET_I386)
288 #if !defined(CONFIG_USER_ONLY)
289 if (interrupt_request & CPU_INTERRUPT_POLL) {
290 env->interrupt_request &= ~CPU_INTERRUPT_POLL;
291 apic_poll_irq(env->apic_state);
293 #endif
294 if (interrupt_request & CPU_INTERRUPT_INIT) {
295 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
297 do_cpu_init(x86_env_get_cpu(env));
298 env->exception_index = EXCP_HALTED;
299 cpu_loop_exit(env);
300 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
301 do_cpu_sipi(x86_env_get_cpu(env));
302 } else if (env->hflags2 & HF2_GIF_MASK) {
303 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
304 !(env->hflags & HF_SMM_MASK)) {
305 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
307 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
308 do_smm_enter(env);
309 next_tb = 0;
310 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
311 !(env->hflags2 & HF2_NMI_MASK)) {
312 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
313 env->hflags2 |= HF2_NMI_MASK;
314 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
315 next_tb = 0;
316 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
317 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
318 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
319 next_tb = 0;
320 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
321 (((env->hflags2 & HF2_VINTR_MASK) &&
322 (env->hflags2 & HF2_HIF_MASK)) ||
323 (!(env->hflags2 & HF2_VINTR_MASK) &&
324 (env->eflags & IF_MASK &&
325 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
326 int intno;
327 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
329 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
330 intno = cpu_get_pic_interrupt(env);
331 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
332 do_interrupt_x86_hardirq(env, intno, 1);
333 /* ensure that no TB jump will be modified as
334 the program flow was changed */
335 next_tb = 0;
336 #if !defined(CONFIG_USER_ONLY)
337 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
338 (env->eflags & IF_MASK) &&
339 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
340 int intno;
341 /* FIXME: this should respect TPR */
342 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
344 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
345 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
346 do_interrupt_x86_hardirq(env, intno, 1);
347 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
348 next_tb = 0;
349 #endif
352 #elif defined(TARGET_PPC)
353 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
354 cpu_reset(cpu);
356 if (interrupt_request & CPU_INTERRUPT_HARD) {
357 ppc_hw_interrupt(env);
358 if (env->pending_interrupts == 0)
359 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
360 next_tb = 0;
362 #elif defined(TARGET_LM32)
363 if ((interrupt_request & CPU_INTERRUPT_HARD)
364 && (env->ie & IE_IE)) {
365 env->exception_index = EXCP_IRQ;
366 do_interrupt(env);
367 next_tb = 0;
369 #elif defined(TARGET_MICROBLAZE)
370 if ((interrupt_request & CPU_INTERRUPT_HARD)
371 && (env->sregs[SR_MSR] & MSR_IE)
372 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
373 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
374 env->exception_index = EXCP_IRQ;
375 do_interrupt(env);
376 next_tb = 0;
378 #elif defined(TARGET_MIPS)
379 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
380 cpu_mips_hw_interrupts_pending(env)) {
381 /* Raise it */
382 env->exception_index = EXCP_EXT_INTERRUPT;
383 env->error_code = 0;
384 do_interrupt(env);
385 next_tb = 0;
387 #elif defined(TARGET_OPENRISC)
389 int idx = -1;
390 if ((interrupt_request & CPU_INTERRUPT_HARD)
391 && (env->sr & SR_IEE)) {
392 idx = EXCP_INT;
394 if ((interrupt_request & CPU_INTERRUPT_TIMER)
395 && (env->sr & SR_TEE)) {
396 idx = EXCP_TICK;
398 if (idx >= 0) {
399 env->exception_index = idx;
400 do_interrupt(env);
401 next_tb = 0;
404 #elif defined(TARGET_SPARC)
405 if (interrupt_request & CPU_INTERRUPT_HARD) {
406 if (cpu_interrupts_enabled(env) &&
407 env->interrupt_index > 0) {
408 int pil = env->interrupt_index & 0xf;
409 int type = env->interrupt_index & 0xf0;
411 if (((type == TT_EXTINT) &&
412 cpu_pil_allowed(env, pil)) ||
413 type != TT_EXTINT) {
414 env->exception_index = env->interrupt_index;
415 do_interrupt(env);
416 next_tb = 0;
420 #elif defined(TARGET_ARM)
421 if (interrupt_request & CPU_INTERRUPT_FIQ
422 && !(env->uncached_cpsr & CPSR_F)) {
423 env->exception_index = EXCP_FIQ;
424 do_interrupt(env);
425 next_tb = 0;
427 /* ARMv7-M interrupt return works by loading a magic value
428 into the PC. On real hardware the load causes the
429 return to occur. The qemu implementation performs the
430 jump normally, then does the exception return when the
431 CPU tries to execute code at the magic address.
432 This will cause the magic PC value to be pushed to
433 the stack if an interrupt occurred at the wrong time.
434 We avoid this by disabling interrupts when
435 pc contains a magic address. */
436 if (interrupt_request & CPU_INTERRUPT_HARD
437 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
438 || !(env->uncached_cpsr & CPSR_I))) {
439 env->exception_index = EXCP_IRQ;
440 do_interrupt(env);
441 next_tb = 0;
443 #elif defined(TARGET_UNICORE32)
444 if (interrupt_request & CPU_INTERRUPT_HARD
445 && !(env->uncached_asr & ASR_I)) {
446 env->exception_index = UC32_EXCP_INTR;
447 do_interrupt(env);
448 next_tb = 0;
450 #elif defined(TARGET_SH4)
451 if (interrupt_request & CPU_INTERRUPT_HARD) {
452 do_interrupt(env);
453 next_tb = 0;
455 #elif defined(TARGET_ALPHA)
457 int idx = -1;
458 /* ??? This hard-codes the OSF/1 interrupt levels. */
459 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
460 case 0 ... 3:
461 if (interrupt_request & CPU_INTERRUPT_HARD) {
462 idx = EXCP_DEV_INTERRUPT;
464 /* FALLTHRU */
465 case 4:
466 if (interrupt_request & CPU_INTERRUPT_TIMER) {
467 idx = EXCP_CLK_INTERRUPT;
469 /* FALLTHRU */
470 case 5:
471 if (interrupt_request & CPU_INTERRUPT_SMP) {
472 idx = EXCP_SMP_INTERRUPT;
474 /* FALLTHRU */
475 case 6:
476 if (interrupt_request & CPU_INTERRUPT_MCHK) {
477 idx = EXCP_MCHK;
480 if (idx >= 0) {
481 env->exception_index = idx;
482 env->error_code = 0;
483 do_interrupt(env);
484 next_tb = 0;
487 #elif defined(TARGET_CRIS)
488 if (interrupt_request & CPU_INTERRUPT_HARD
489 && (env->pregs[PR_CCS] & I_FLAG)
490 && !env->locked_irq) {
491 env->exception_index = EXCP_IRQ;
492 do_interrupt(env);
493 next_tb = 0;
495 if (interrupt_request & CPU_INTERRUPT_NMI) {
496 unsigned int m_flag_archval;
497 if (env->pregs[PR_VR] < 32) {
498 m_flag_archval = M_FLAG_V10;
499 } else {
500 m_flag_archval = M_FLAG_V32;
502 if ((env->pregs[PR_CCS] & m_flag_archval)) {
503 env->exception_index = EXCP_NMI;
504 do_interrupt(env);
505 next_tb = 0;
508 #elif defined(TARGET_M68K)
509 if (interrupt_request & CPU_INTERRUPT_HARD
510 && ((env->sr & SR_I) >> SR_I_SHIFT)
511 < env->pending_level) {
512 /* Real hardware gets the interrupt vector via an
513 IACK cycle at this point. Current emulated
514 hardware doesn't rely on this, so we
515 provide/save the vector when the interrupt is
516 first signalled. */
517 env->exception_index = env->pending_vector;
518 do_interrupt_m68k_hardirq(env);
519 next_tb = 0;
521 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
522 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
523 (env->psw.mask & PSW_MASK_EXT)) {
524 do_interrupt(env);
525 next_tb = 0;
527 #elif defined(TARGET_XTENSA)
528 if (interrupt_request & CPU_INTERRUPT_HARD) {
529 env->exception_index = EXC_IRQ;
530 do_interrupt(env);
531 next_tb = 0;
533 #endif
534 /* Don't use the cached interrupt_request value,
535 do_interrupt may have updated the EXITTB flag. */
536 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
537 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
538 /* ensure that no TB jump will be modified as
539 the program flow was changed */
540 next_tb = 0;
543 if (unlikely(cpu->exit_request)) {
544 cpu->exit_request = 0;
545 env->exception_index = EXCP_INTERRUPT;
546 cpu_loop_exit(env);
548 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
549 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
550 /* restore flags in standard format */
551 #if defined(TARGET_I386)
552 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
553 | (DF & DF_MASK);
554 log_cpu_state(env, CPU_DUMP_CCOP);
555 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
556 #elif defined(TARGET_M68K)
557 cpu_m68k_flush_flags(env, env->cc_op);
558 env->cc_op = CC_OP_FLAGS;
559 env->sr = (env->sr & 0xffe0)
560 | env->cc_dest | (env->cc_x << 4);
561 log_cpu_state(env, 0);
562 #else
563 log_cpu_state(env, 0);
564 #endif
566 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
567 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
568 tb = tb_find_fast(env);
569 /* Note: we do it here to avoid a gcc bug on Mac OS X when
570 doing it in tb_find_slow */
571 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
572 /* as some TB could have been invalidated because
573 of memory exceptions while generating the code, we
574 must recompute the hash index here */
575 next_tb = 0;
576 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
578 #ifdef CONFIG_DEBUG_EXEC
579 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
580 tb->tc_ptr, tb->pc,
581 lookup_symbol(tb->pc));
582 #endif
583 /* see if we can patch the calling TB. When the TB
584 spans two pages, we cannot safely do a direct
585 jump. */
586 if (next_tb != 0 && tb->page_addr[1] == -1) {
587 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
589 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
591 /* cpu_interrupt might be called while translating the
592 TB, but before it is linked into a potentially
593 infinite loop and becomes env->current_tb. Avoid
594 starting execution if there is a pending interrupt. */
595 cpu->current_tb = tb;
596 barrier();
597 if (likely(!cpu->exit_request)) {
598 tc_ptr = tb->tc_ptr;
599 /* execute the generated code */
600 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
601 if ((next_tb & 3) == 2) {
602 /* Instruction counter expired. */
603 int insns_left;
604 tb = (TranslationBlock *)(next_tb & ~3);
605 /* Restore PC. */
606 cpu_pc_from_tb(env, tb);
607 insns_left = env->icount_decr.u32;
608 if (env->icount_extra && insns_left >= 0) {
609 /* Refill decrementer and continue execution. */
610 env->icount_extra += insns_left;
611 if (env->icount_extra > 0xffff) {
612 insns_left = 0xffff;
613 } else {
614 insns_left = env->icount_extra;
616 env->icount_extra -= insns_left;
617 env->icount_decr.u16.low = insns_left;
618 } else {
619 if (insns_left > 0) {
620 /* Execute remaining instructions. */
621 cpu_exec_nocache(env, insns_left, tb);
623 env->exception_index = EXCP_INTERRUPT;
624 next_tb = 0;
625 cpu_loop_exit(env);
629 cpu->current_tb = NULL;
630 /* reset soft MMU for next block (it can currently
631 only be set by a memory fault) */
632 } /* for(;;) */
633 } else {
634 /* Reload env after longjmp - the compiler may have smashed all
635 * local variables as longjmp is marked 'noreturn'. */
636 env = cpu_single_env;
638 } /* for(;;) */
641 #if defined(TARGET_I386)
642 /* restore flags in standard format */
643 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
644 | (DF & DF_MASK);
645 #elif defined(TARGET_ARM)
646 /* XXX: Save/restore host fpu exception state?. */
647 #elif defined(TARGET_UNICORE32)
648 #elif defined(TARGET_SPARC)
649 #elif defined(TARGET_PPC)
650 #elif defined(TARGET_LM32)
651 #elif defined(TARGET_M68K)
652 cpu_m68k_flush_flags(env, env->cc_op);
653 env->cc_op = CC_OP_FLAGS;
654 env->sr = (env->sr & 0xffe0)
655 | env->cc_dest | (env->cc_x << 4);
656 #elif defined(TARGET_MICROBLAZE)
657 #elif defined(TARGET_MIPS)
658 #elif defined(TARGET_OPENRISC)
659 #elif defined(TARGET_SH4)
660 #elif defined(TARGET_ALPHA)
661 #elif defined(TARGET_CRIS)
662 #elif defined(TARGET_S390X)
663 #elif defined(TARGET_XTENSA)
664 /* XXXXX */
665 #else
666 #error unsupported target CPU
667 #endif
669 /* fail safe : never use cpu_single_env outside cpu_exec() */
670 cpu_single_env = NULL;
671 return ret;