petalogix_ml605_mmu: Attach ethernet to machine
[qemu/ar7.git] / cpu-exec.c
blob31c089dac0a7db66ac4d6270c293d1294bbc598a
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 bool qemu_cpu_has_work(CPUState *cpu)
28 return cpu_has_work(cpu);
31 void cpu_loop_exit(CPUArchState *env)
33 CPUState *cpu = ENV_GET_CPU(env);
35 cpu->current_tb = NULL;
36 siglongjmp(env->jmp_env, 1);
39 /* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState *env, void *puc)
45 /* XXX: restore cpu registers saved in host registers */
47 env->exception_index = -1;
48 siglongjmp(env->jmp_env, 1);
50 #endif
52 /* Execute a TB, and fix up the CPU state afterwards if necessary */
53 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
55 CPUArchState *env = cpu->env_ptr;
56 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
57 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
58 /* We didn't start executing this TB (eg because the instruction
59 * counter hit zero); we must restore the guest PC to the address
60 * of the start of the TB.
62 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
63 cpu_pc_from_tb(env, tb);
65 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
66 /* We were asked to stop executing TBs (probably a pending
67 * interrupt. We've now stopped, so clear the flag.
69 cpu->tcg_exit_req = 0;
71 return next_tb;
74 /* Execute the code without caching the generated code. An interpreter
75 could be used if available. */
76 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
77 TranslationBlock *orig_tb)
79 CPUState *cpu = ENV_GET_CPU(env);
80 TranslationBlock *tb;
82 /* Should never happen.
83 We only end up here when an existing TB is too long. */
84 if (max_cycles > CF_COUNT_MASK)
85 max_cycles = CF_COUNT_MASK;
87 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
88 max_cycles);
89 cpu->current_tb = tb;
90 /* execute the generated code */
91 cpu_tb_exec(cpu, tb->tc_ptr);
92 cpu->current_tb = NULL;
93 tb_phys_invalidate(tb, -1);
94 tb_free(tb);
97 static TranslationBlock *tb_find_slow(CPUArchState *env,
98 target_ulong pc,
99 target_ulong cs_base,
100 uint64_t flags)
102 TranslationBlock *tb, **ptb1;
103 unsigned int h;
104 tb_page_addr_t phys_pc, phys_page1;
105 target_ulong virt_page2;
107 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
109 /* find translated block using physical mappings */
110 phys_pc = get_page_addr_code(env, pc);
111 phys_page1 = phys_pc & TARGET_PAGE_MASK;
112 h = tb_phys_hash_func(phys_pc);
113 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
114 for(;;) {
115 tb = *ptb1;
116 if (!tb)
117 goto not_found;
118 if (tb->pc == pc &&
119 tb->page_addr[0] == phys_page1 &&
120 tb->cs_base == cs_base &&
121 tb->flags == flags) {
122 /* check next page if needed */
123 if (tb->page_addr[1] != -1) {
124 tb_page_addr_t phys_page2;
126 virt_page2 = (pc & TARGET_PAGE_MASK) +
127 TARGET_PAGE_SIZE;
128 phys_page2 = get_page_addr_code(env, virt_page2);
129 if (tb->page_addr[1] == phys_page2)
130 goto found;
131 } else {
132 goto found;
135 ptb1 = &tb->phys_hash_next;
137 not_found:
138 /* if no translated code available, then translate it now */
139 tb = tb_gen_code(env, pc, cs_base, flags, 0);
141 found:
142 /* Move the last found TB to the head of the list */
143 if (likely(*ptb1)) {
144 *ptb1 = tb->phys_hash_next;
145 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
146 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 return tb;
153 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
155 TranslationBlock *tb;
156 target_ulong cs_base, pc;
157 int flags;
159 /* we record a subset of the CPU state. It will
160 always be the same before a given translated block
161 is executed. */
162 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
163 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
164 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
165 tb->flags != flags)) {
166 tb = tb_find_slow(env, pc, cs_base, flags);
168 return tb;
171 static CPUDebugExcpHandler *debug_excp_handler;
173 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
175 debug_excp_handler = handler;
178 static void cpu_handle_debug_exception(CPUArchState *env)
180 CPUWatchpoint *wp;
182 if (!env->watchpoint_hit) {
183 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
184 wp->flags &= ~BP_WATCHPOINT_HIT;
187 if (debug_excp_handler) {
188 debug_excp_handler(env);
192 /* main execution loop */
194 volatile sig_atomic_t exit_request;
196 int cpu_exec(CPUArchState *env)
198 CPUState *cpu = ENV_GET_CPU(env);
199 #if !(defined(CONFIG_USER_ONLY) && \
200 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
201 CPUClass *cc = CPU_GET_CLASS(cpu);
202 #endif
203 int ret, interrupt_request;
204 TranslationBlock *tb;
205 uint8_t *tc_ptr;
206 tcg_target_ulong next_tb;
208 if (cpu->halted) {
209 if (!cpu_has_work(cpu)) {
210 return EXCP_HALTED;
213 cpu->halted = 0;
216 cpu_single_env = env;
218 /* As long as cpu_single_env is null, up to the assignment just above,
219 * requests by other threads to exit the execution loop are expected to
220 * be issued using the exit_request global. We must make sure that our
221 * evaluation of the global value is performed past the cpu_single_env
222 * value transition point, which requires a memory barrier as well as
223 * an instruction scheduling constraint on modern architectures. */
224 smp_mb();
226 if (unlikely(exit_request)) {
227 cpu->exit_request = 1;
230 #if defined(TARGET_I386)
231 /* put eflags in CPU temporary format */
232 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233 DF = 1 - (2 * ((env->eflags >> 10) & 1));
234 CC_OP = CC_OP_EFLAGS;
235 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236 #elif defined(TARGET_SPARC)
237 #elif defined(TARGET_M68K)
238 env->cc_op = CC_OP_FLAGS;
239 env->cc_dest = env->sr & 0xf;
240 env->cc_x = (env->sr >> 4) & 1;
241 #elif defined(TARGET_ALPHA)
242 #elif defined(TARGET_ARM)
243 #elif defined(TARGET_UNICORE32)
244 #elif defined(TARGET_PPC)
245 env->reserve_addr = -1;
246 #elif defined(TARGET_LM32)
247 #elif defined(TARGET_MICROBLAZE)
248 #elif defined(TARGET_MIPS)
249 #elif defined(TARGET_MOXIE)
250 #elif defined(TARGET_OPENRISC)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
253 #elif defined(TARGET_S390X)
254 #elif defined(TARGET_XTENSA)
255 /* XXXXX */
256 #else
257 #error unsupported target CPU
258 #endif
259 env->exception_index = -1;
261 /* prepare setjmp context for exception handling */
262 for(;;) {
263 if (sigsetjmp(env->jmp_env, 0) == 0) {
264 /* if an exception is pending, we execute it here */
265 if (env->exception_index >= 0) {
266 if (env->exception_index >= EXCP_INTERRUPT) {
267 /* exit request from the cpu execution loop */
268 ret = env->exception_index;
269 if (ret == EXCP_DEBUG) {
270 cpu_handle_debug_exception(env);
272 break;
273 } else {
274 #if defined(CONFIG_USER_ONLY)
275 /* if user mode only, we simulate a fake exception
276 which will be handled outside the cpu execution
277 loop */
278 #if defined(TARGET_I386)
279 cc->do_interrupt(cpu);
280 #endif
281 ret = env->exception_index;
282 break;
283 #else
284 cc->do_interrupt(cpu);
285 env->exception_index = -1;
286 #endif
290 next_tb = 0; /* force lookup of first TB */
291 for(;;) {
292 interrupt_request = cpu->interrupt_request;
293 if (unlikely(interrupt_request)) {
294 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
295 /* Mask out external interrupts for this step. */
296 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
298 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
299 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
300 env->exception_index = EXCP_DEBUG;
301 cpu_loop_exit(env);
303 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
304 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
305 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
306 if (interrupt_request & CPU_INTERRUPT_HALT) {
307 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
308 cpu->halted = 1;
309 env->exception_index = EXCP_HLT;
310 cpu_loop_exit(env);
312 #endif
313 #if defined(TARGET_I386)
314 #if !defined(CONFIG_USER_ONLY)
315 if (interrupt_request & CPU_INTERRUPT_POLL) {
316 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
317 apic_poll_irq(env->apic_state);
319 #endif
320 if (interrupt_request & CPU_INTERRUPT_INIT) {
321 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
323 do_cpu_init(x86_env_get_cpu(env));
324 env->exception_index = EXCP_HALTED;
325 cpu_loop_exit(env);
326 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
327 do_cpu_sipi(x86_env_get_cpu(env));
328 } else if (env->hflags2 & HF2_GIF_MASK) {
329 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
330 !(env->hflags & HF_SMM_MASK)) {
331 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
333 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
334 do_smm_enter(env);
335 next_tb = 0;
336 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
337 !(env->hflags2 & HF2_NMI_MASK)) {
338 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
339 env->hflags2 |= HF2_NMI_MASK;
340 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
341 next_tb = 0;
342 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
343 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
344 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
345 next_tb = 0;
346 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
347 (((env->hflags2 & HF2_VINTR_MASK) &&
348 (env->hflags2 & HF2_HIF_MASK)) ||
349 (!(env->hflags2 & HF2_VINTR_MASK) &&
350 (env->eflags & IF_MASK &&
351 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
352 int intno;
353 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
355 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
356 CPU_INTERRUPT_VIRQ);
357 intno = cpu_get_pic_interrupt(env);
358 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
359 do_interrupt_x86_hardirq(env, intno, 1);
360 /* ensure that no TB jump will be modified as
361 the program flow was changed */
362 next_tb = 0;
363 #if !defined(CONFIG_USER_ONLY)
364 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
365 (env->eflags & IF_MASK) &&
366 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
367 int intno;
368 /* FIXME: this should respect TPR */
369 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
371 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
372 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
373 do_interrupt_x86_hardirq(env, intno, 1);
374 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
375 next_tb = 0;
376 #endif
379 #elif defined(TARGET_PPC)
380 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
381 cpu_reset(cpu);
383 if (interrupt_request & CPU_INTERRUPT_HARD) {
384 ppc_hw_interrupt(env);
385 if (env->pending_interrupts == 0) {
386 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
388 next_tb = 0;
390 #elif defined(TARGET_LM32)
391 if ((interrupt_request & CPU_INTERRUPT_HARD)
392 && (env->ie & IE_IE)) {
393 env->exception_index = EXCP_IRQ;
394 cc->do_interrupt(cpu);
395 next_tb = 0;
397 #elif defined(TARGET_MICROBLAZE)
398 if ((interrupt_request & CPU_INTERRUPT_HARD)
399 && (env->sregs[SR_MSR] & MSR_IE)
400 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
401 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
402 env->exception_index = EXCP_IRQ;
403 cc->do_interrupt(cpu);
404 next_tb = 0;
406 #elif defined(TARGET_MIPS)
407 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
408 cpu_mips_hw_interrupts_pending(env)) {
409 /* Raise it */
410 env->exception_index = EXCP_EXT_INTERRUPT;
411 env->error_code = 0;
412 cc->do_interrupt(cpu);
413 next_tb = 0;
415 #elif defined(TARGET_OPENRISC)
417 int idx = -1;
418 if ((interrupt_request & CPU_INTERRUPT_HARD)
419 && (env->sr & SR_IEE)) {
420 idx = EXCP_INT;
422 if ((interrupt_request & CPU_INTERRUPT_TIMER)
423 && (env->sr & SR_TEE)) {
424 idx = EXCP_TICK;
426 if (idx >= 0) {
427 env->exception_index = idx;
428 cc->do_interrupt(cpu);
429 next_tb = 0;
432 #elif defined(TARGET_SPARC)
433 if (interrupt_request & CPU_INTERRUPT_HARD) {
434 if (cpu_interrupts_enabled(env) &&
435 env->interrupt_index > 0) {
436 int pil = env->interrupt_index & 0xf;
437 int type = env->interrupt_index & 0xf0;
439 if (((type == TT_EXTINT) &&
440 cpu_pil_allowed(env, pil)) ||
441 type != TT_EXTINT) {
442 env->exception_index = env->interrupt_index;
443 cc->do_interrupt(cpu);
444 next_tb = 0;
448 #elif defined(TARGET_ARM)
449 if (interrupt_request & CPU_INTERRUPT_FIQ
450 && !(env->uncached_cpsr & CPSR_F)) {
451 env->exception_index = EXCP_FIQ;
452 cc->do_interrupt(cpu);
453 next_tb = 0;
455 /* ARMv7-M interrupt return works by loading a magic value
456 into the PC. On real hardware the load causes the
457 return to occur. The qemu implementation performs the
458 jump normally, then does the exception return when the
459 CPU tries to execute code at the magic address.
460 This will cause the magic PC value to be pushed to
461 the stack if an interrupt occurred at the wrong time.
462 We avoid this by disabling interrupts when
463 pc contains a magic address. */
464 if (interrupt_request & CPU_INTERRUPT_HARD
465 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
466 || !(env->uncached_cpsr & CPSR_I))) {
467 env->exception_index = EXCP_IRQ;
468 cc->do_interrupt(cpu);
469 next_tb = 0;
471 #elif defined(TARGET_UNICORE32)
472 if (interrupt_request & CPU_INTERRUPT_HARD
473 && !(env->uncached_asr & ASR_I)) {
474 env->exception_index = UC32_EXCP_INTR;
475 cc->do_interrupt(cpu);
476 next_tb = 0;
478 #elif defined(TARGET_SH4)
479 if (interrupt_request & CPU_INTERRUPT_HARD) {
480 cc->do_interrupt(cpu);
481 next_tb = 0;
483 #elif defined(TARGET_ALPHA)
485 int idx = -1;
486 /* ??? This hard-codes the OSF/1 interrupt levels. */
487 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
488 case 0 ... 3:
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 idx = EXCP_DEV_INTERRUPT;
492 /* FALLTHRU */
493 case 4:
494 if (interrupt_request & CPU_INTERRUPT_TIMER) {
495 idx = EXCP_CLK_INTERRUPT;
497 /* FALLTHRU */
498 case 5:
499 if (interrupt_request & CPU_INTERRUPT_SMP) {
500 idx = EXCP_SMP_INTERRUPT;
502 /* FALLTHRU */
503 case 6:
504 if (interrupt_request & CPU_INTERRUPT_MCHK) {
505 idx = EXCP_MCHK;
508 if (idx >= 0) {
509 env->exception_index = idx;
510 env->error_code = 0;
511 cc->do_interrupt(cpu);
512 next_tb = 0;
515 #elif defined(TARGET_CRIS)
516 if (interrupt_request & CPU_INTERRUPT_HARD
517 && (env->pregs[PR_CCS] & I_FLAG)
518 && !env->locked_irq) {
519 env->exception_index = EXCP_IRQ;
520 cc->do_interrupt(cpu);
521 next_tb = 0;
523 if (interrupt_request & CPU_INTERRUPT_NMI) {
524 unsigned int m_flag_archval;
525 if (env->pregs[PR_VR] < 32) {
526 m_flag_archval = M_FLAG_V10;
527 } else {
528 m_flag_archval = M_FLAG_V32;
530 if ((env->pregs[PR_CCS] & m_flag_archval)) {
531 env->exception_index = EXCP_NMI;
532 cc->do_interrupt(cpu);
533 next_tb = 0;
536 #elif defined(TARGET_M68K)
537 if (interrupt_request & CPU_INTERRUPT_HARD
538 && ((env->sr & SR_I) >> SR_I_SHIFT)
539 < env->pending_level) {
540 /* Real hardware gets the interrupt vector via an
541 IACK cycle at this point. Current emulated
542 hardware doesn't rely on this, so we
543 provide/save the vector when the interrupt is
544 first signalled. */
545 env->exception_index = env->pending_vector;
546 do_interrupt_m68k_hardirq(env);
547 next_tb = 0;
549 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
550 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
551 (env->psw.mask & PSW_MASK_EXT)) {
552 cc->do_interrupt(cpu);
553 next_tb = 0;
555 #elif defined(TARGET_XTENSA)
556 if (interrupt_request & CPU_INTERRUPT_HARD) {
557 env->exception_index = EXC_IRQ;
558 cc->do_interrupt(cpu);
559 next_tb = 0;
561 #endif
562 /* Don't use the cached interrupt_request value,
563 do_interrupt may have updated the EXITTB flag. */
564 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
565 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
566 /* ensure that no TB jump will be modified as
567 the program flow was changed */
568 next_tb = 0;
571 if (unlikely(cpu->exit_request)) {
572 cpu->exit_request = 0;
573 env->exception_index = EXCP_INTERRUPT;
574 cpu_loop_exit(env);
576 #if defined(DEBUG_DISAS)
577 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
578 /* restore flags in standard format */
579 #if defined(TARGET_I386)
580 log_cpu_state(env, CPU_DUMP_CCOP);
581 #elif defined(TARGET_M68K)
582 cpu_m68k_flush_flags(env, env->cc_op);
583 env->cc_op = CC_OP_FLAGS;
584 env->sr = (env->sr & 0xffe0)
585 | env->cc_dest | (env->cc_x << 4);
586 log_cpu_state(env, 0);
587 #else
588 log_cpu_state(env, 0);
589 #endif
591 #endif /* DEBUG_DISAS */
592 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
593 tb = tb_find_fast(env);
594 /* Note: we do it here to avoid a gcc bug on Mac OS X when
595 doing it in tb_find_slow */
596 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
597 /* as some TB could have been invalidated because
598 of memory exceptions while generating the code, we
599 must recompute the hash index here */
600 next_tb = 0;
601 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
603 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
604 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
605 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
607 /* see if we can patch the calling TB. When the TB
608 spans two pages, we cannot safely do a direct
609 jump. */
610 if (next_tb != 0 && tb->page_addr[1] == -1) {
611 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
612 next_tb & TB_EXIT_MASK, tb);
614 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
616 /* cpu_interrupt might be called while translating the
617 TB, but before it is linked into a potentially
618 infinite loop and becomes env->current_tb. Avoid
619 starting execution if there is a pending interrupt. */
620 cpu->current_tb = tb;
621 barrier();
622 if (likely(!cpu->exit_request)) {
623 tc_ptr = tb->tc_ptr;
624 /* execute the generated code */
625 next_tb = cpu_tb_exec(cpu, tc_ptr);
626 switch (next_tb & TB_EXIT_MASK) {
627 case TB_EXIT_REQUESTED:
628 /* Something asked us to stop executing
629 * chained TBs; just continue round the main
630 * loop. Whatever requested the exit will also
631 * have set something else (eg exit_request or
632 * interrupt_request) which we will handle
633 * next time around the loop.
635 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
636 next_tb = 0;
637 break;
638 case TB_EXIT_ICOUNT_EXPIRED:
640 /* Instruction counter expired. */
641 int insns_left;
642 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
643 insns_left = env->icount_decr.u32;
644 if (env->icount_extra && insns_left >= 0) {
645 /* Refill decrementer and continue execution. */
646 env->icount_extra += insns_left;
647 if (env->icount_extra > 0xffff) {
648 insns_left = 0xffff;
649 } else {
650 insns_left = env->icount_extra;
652 env->icount_extra -= insns_left;
653 env->icount_decr.u16.low = insns_left;
654 } else {
655 if (insns_left > 0) {
656 /* Execute remaining instructions. */
657 cpu_exec_nocache(env, insns_left, tb);
659 env->exception_index = EXCP_INTERRUPT;
660 next_tb = 0;
661 cpu_loop_exit(env);
663 break;
665 default:
666 break;
669 cpu->current_tb = NULL;
670 /* reset soft MMU for next block (it can currently
671 only be set by a memory fault) */
672 } /* for(;;) */
673 } else {
674 /* Reload env after longjmp - the compiler may have smashed all
675 * local variables as longjmp is marked 'noreturn'. */
676 env = cpu_single_env;
678 } /* for(;;) */
681 #if defined(TARGET_I386)
682 /* restore flags in standard format */
683 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
684 | (DF & DF_MASK);
685 #elif defined(TARGET_ARM)
686 /* XXX: Save/restore host fpu exception state?. */
687 #elif defined(TARGET_UNICORE32)
688 #elif defined(TARGET_SPARC)
689 #elif defined(TARGET_PPC)
690 #elif defined(TARGET_LM32)
691 #elif defined(TARGET_M68K)
692 cpu_m68k_flush_flags(env, env->cc_op);
693 env->cc_op = CC_OP_FLAGS;
694 env->sr = (env->sr & 0xffe0)
695 | env->cc_dest | (env->cc_x << 4);
696 #elif defined(TARGET_MICROBLAZE)
697 #elif defined(TARGET_MIPS)
698 #elif defined(TARGET_MOXIE)
699 #elif defined(TARGET_OPENRISC)
700 #elif defined(TARGET_SH4)
701 #elif defined(TARGET_ALPHA)
702 #elif defined(TARGET_CRIS)
703 #elif defined(TARGET_S390X)
704 #elif defined(TARGET_XTENSA)
705 /* XXXXX */
706 #else
707 #error unsupported target CPU
708 #endif
710 /* fail safe : never use cpu_single_env outside cpu_exec() */
711 cpu_single_env = NULL;
712 return ret;