qcow2: set L2 cache dependency in qcow2_alloc_bytes()
[qemu/cris-port.git] / cpu-exec.c
blob94fedc5805bd61086e6b7b772283bf5f02bd4c91
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 //#define CONFIG_DEBUG_EXEC
28 bool qemu_cpu_has_work(CPUState *cpu)
30 return cpu_has_work(cpu);
33 void cpu_loop_exit(CPUArchState *env)
35 CPUState *cpu = ENV_GET_CPU(env);
37 cpu->current_tb = NULL;
38 siglongjmp(env->jmp_env, 1);
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
47 /* XXX: restore cpu registers saved in host registers */
49 env->exception_index = -1;
50 siglongjmp(env->jmp_env, 1);
52 #endif
54 /* Execute a TB, and fix up the CPU state afterwards if necessary */
55 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
57 CPUArchState *env = cpu->env_ptr;
58 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
59 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
64 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65 cpu_pc_from_tb(env, tb);
67 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
71 cpu->tcg_exit_req = 0;
73 return next_tb;
76 /* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
78 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
79 TranslationBlock *orig_tb)
81 CPUState *cpu = ENV_GET_CPU(env);
82 TranslationBlock *tb;
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles > CF_COUNT_MASK)
87 max_cycles = CF_COUNT_MASK;
89 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
90 max_cycles);
91 cpu->current_tb = tb;
92 /* execute the generated code */
93 cpu_tb_exec(cpu, tb->tc_ptr);
94 cpu->current_tb = NULL;
95 tb_phys_invalidate(tb, -1);
96 tb_free(tb);
99 static TranslationBlock *tb_find_slow(CPUArchState *env,
100 target_ulong pc,
101 target_ulong cs_base,
102 uint64_t flags)
104 TranslationBlock *tb, **ptb1;
105 unsigned int h;
106 tb_page_addr_t phys_pc, phys_page1;
107 target_ulong virt_page2;
109 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
111 /* find translated block using physical mappings */
112 phys_pc = get_page_addr_code(env, pc);
113 phys_page1 = phys_pc & TARGET_PAGE_MASK;
114 h = tb_phys_hash_func(phys_pc);
115 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
116 for(;;) {
117 tb = *ptb1;
118 if (!tb)
119 goto not_found;
120 if (tb->pc == pc &&
121 tb->page_addr[0] == phys_page1 &&
122 tb->cs_base == cs_base &&
123 tb->flags == flags) {
124 /* check next page if needed */
125 if (tb->page_addr[1] != -1) {
126 tb_page_addr_t phys_page2;
128 virt_page2 = (pc & TARGET_PAGE_MASK) +
129 TARGET_PAGE_SIZE;
130 phys_page2 = get_page_addr_code(env, virt_page2);
131 if (tb->page_addr[1] == phys_page2)
132 goto found;
133 } else {
134 goto found;
137 ptb1 = &tb->phys_hash_next;
139 not_found:
140 /* if no translated code available, then translate it now */
141 tb = tb_gen_code(env, pc, cs_base, flags, 0);
143 found:
144 /* Move the last found TB to the head of the list */
145 if (likely(*ptb1)) {
146 *ptb1 = tb->phys_hash_next;
147 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
148 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
150 /* we add the TB in the virtual pc hash table */
151 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
152 return tb;
155 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
157 TranslationBlock *tb;
158 target_ulong cs_base, pc;
159 int flags;
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
163 is executed. */
164 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
165 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
166 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
167 tb->flags != flags)) {
168 tb = tb_find_slow(env, pc, cs_base, flags);
170 return tb;
173 static CPUDebugExcpHandler *debug_excp_handler;
175 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
177 debug_excp_handler = handler;
180 static void cpu_handle_debug_exception(CPUArchState *env)
182 CPUWatchpoint *wp;
184 if (!env->watchpoint_hit) {
185 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
186 wp->flags &= ~BP_WATCHPOINT_HIT;
189 if (debug_excp_handler) {
190 debug_excp_handler(env);
194 /* main execution loop */
196 volatile sig_atomic_t exit_request;
198 int cpu_exec(CPUArchState *env)
200 CPUState *cpu = ENV_GET_CPU(env);
201 #if !(defined(CONFIG_USER_ONLY) && \
202 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
203 CPUClass *cc = CPU_GET_CLASS(cpu);
204 #endif
205 int ret, interrupt_request;
206 TranslationBlock *tb;
207 uint8_t *tc_ptr;
208 tcg_target_ulong next_tb;
210 if (cpu->halted) {
211 if (!cpu_has_work(cpu)) {
212 return EXCP_HALTED;
215 cpu->halted = 0;
218 cpu_single_env = env;
220 if (unlikely(exit_request)) {
221 cpu->exit_request = 1;
224 #if defined(TARGET_I386)
225 /* put eflags in CPU temporary format */
226 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
227 DF = 1 - (2 * ((env->eflags >> 10) & 1));
228 CC_OP = CC_OP_EFLAGS;
229 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
230 #elif defined(TARGET_SPARC)
231 #elif defined(TARGET_M68K)
232 env->cc_op = CC_OP_FLAGS;
233 env->cc_dest = env->sr & 0xf;
234 env->cc_x = (env->sr >> 4) & 1;
235 #elif defined(TARGET_ALPHA)
236 #elif defined(TARGET_ARM)
237 #elif defined(TARGET_UNICORE32)
238 #elif defined(TARGET_PPC)
239 env->reserve_addr = -1;
240 #elif defined(TARGET_LM32)
241 #elif defined(TARGET_MICROBLAZE)
242 #elif defined(TARGET_MIPS)
243 #elif defined(TARGET_OPENRISC)
244 #elif defined(TARGET_SH4)
245 #elif defined(TARGET_CRIS)
246 #elif defined(TARGET_S390X)
247 #elif defined(TARGET_XTENSA)
248 /* XXXXX */
249 #else
250 #error unsupported target CPU
251 #endif
252 env->exception_index = -1;
254 /* prepare setjmp context for exception handling */
255 for(;;) {
256 if (sigsetjmp(env->jmp_env, 0) == 0) {
257 /* if an exception is pending, we execute it here */
258 if (env->exception_index >= 0) {
259 if (env->exception_index >= EXCP_INTERRUPT) {
260 /* exit request from the cpu execution loop */
261 ret = env->exception_index;
262 if (ret == EXCP_DEBUG) {
263 cpu_handle_debug_exception(env);
265 break;
266 } else {
267 #if defined(CONFIG_USER_ONLY)
268 /* if user mode only, we simulate a fake exception
269 which will be handled outside the cpu execution
270 loop */
271 #if defined(TARGET_I386)
272 cc->do_interrupt(cpu);
273 #endif
274 ret = env->exception_index;
275 break;
276 #else
277 cc->do_interrupt(cpu);
278 env->exception_index = -1;
279 #endif
283 next_tb = 0; /* force lookup of first TB */
284 for(;;) {
285 interrupt_request = cpu->interrupt_request;
286 if (unlikely(interrupt_request)) {
287 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
288 /* Mask out external interrupts for this step. */
289 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
291 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
292 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
293 env->exception_index = EXCP_DEBUG;
294 cpu_loop_exit(env);
296 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
297 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
298 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
299 if (interrupt_request & CPU_INTERRUPT_HALT) {
300 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
301 cpu->halted = 1;
302 env->exception_index = EXCP_HLT;
303 cpu_loop_exit(env);
305 #endif
306 #if defined(TARGET_I386)
307 #if !defined(CONFIG_USER_ONLY)
308 if (interrupt_request & CPU_INTERRUPT_POLL) {
309 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
310 apic_poll_irq(env->apic_state);
312 #endif
313 if (interrupt_request & CPU_INTERRUPT_INIT) {
314 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
316 do_cpu_init(x86_env_get_cpu(env));
317 env->exception_index = EXCP_HALTED;
318 cpu_loop_exit(env);
319 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
320 do_cpu_sipi(x86_env_get_cpu(env));
321 } else if (env->hflags2 & HF2_GIF_MASK) {
322 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
323 !(env->hflags & HF_SMM_MASK)) {
324 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
326 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
327 do_smm_enter(env);
328 next_tb = 0;
329 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
330 !(env->hflags2 & HF2_NMI_MASK)) {
331 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
332 env->hflags2 |= HF2_NMI_MASK;
333 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
334 next_tb = 0;
335 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
336 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
337 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
338 next_tb = 0;
339 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
340 (((env->hflags2 & HF2_VINTR_MASK) &&
341 (env->hflags2 & HF2_HIF_MASK)) ||
342 (!(env->hflags2 & HF2_VINTR_MASK) &&
343 (env->eflags & IF_MASK &&
344 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
345 int intno;
346 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
348 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
349 CPU_INTERRUPT_VIRQ);
350 intno = cpu_get_pic_interrupt(env);
351 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
352 do_interrupt_x86_hardirq(env, intno, 1);
353 /* ensure that no TB jump will be modified as
354 the program flow was changed */
355 next_tb = 0;
356 #if !defined(CONFIG_USER_ONLY)
357 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
358 (env->eflags & IF_MASK) &&
359 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
360 int intno;
361 /* FIXME: this should respect TPR */
362 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
364 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
365 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
366 do_interrupt_x86_hardirq(env, intno, 1);
367 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
368 next_tb = 0;
369 #endif
372 #elif defined(TARGET_PPC)
373 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
374 cpu_reset(cpu);
376 if (interrupt_request & CPU_INTERRUPT_HARD) {
377 ppc_hw_interrupt(env);
378 if (env->pending_interrupts == 0) {
379 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
381 next_tb = 0;
383 #elif defined(TARGET_LM32)
384 if ((interrupt_request & CPU_INTERRUPT_HARD)
385 && (env->ie & IE_IE)) {
386 env->exception_index = EXCP_IRQ;
387 cc->do_interrupt(cpu);
388 next_tb = 0;
390 #elif defined(TARGET_MICROBLAZE)
391 if ((interrupt_request & CPU_INTERRUPT_HARD)
392 && (env->sregs[SR_MSR] & MSR_IE)
393 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
394 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
395 env->exception_index = EXCP_IRQ;
396 cc->do_interrupt(cpu);
397 next_tb = 0;
399 #elif defined(TARGET_MIPS)
400 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
401 cpu_mips_hw_interrupts_pending(env)) {
402 /* Raise it */
403 env->exception_index = EXCP_EXT_INTERRUPT;
404 env->error_code = 0;
405 cc->do_interrupt(cpu);
406 next_tb = 0;
408 #elif defined(TARGET_OPENRISC)
410 int idx = -1;
411 if ((interrupt_request & CPU_INTERRUPT_HARD)
412 && (env->sr & SR_IEE)) {
413 idx = EXCP_INT;
415 if ((interrupt_request & CPU_INTERRUPT_TIMER)
416 && (env->sr & SR_TEE)) {
417 idx = EXCP_TICK;
419 if (idx >= 0) {
420 env->exception_index = idx;
421 cc->do_interrupt(cpu);
422 next_tb = 0;
425 #elif defined(TARGET_SPARC)
426 if (interrupt_request & CPU_INTERRUPT_HARD) {
427 if (cpu_interrupts_enabled(env) &&
428 env->interrupt_index > 0) {
429 int pil = env->interrupt_index & 0xf;
430 int type = env->interrupt_index & 0xf0;
432 if (((type == TT_EXTINT) &&
433 cpu_pil_allowed(env, pil)) ||
434 type != TT_EXTINT) {
435 env->exception_index = env->interrupt_index;
436 cc->do_interrupt(cpu);
437 next_tb = 0;
441 #elif defined(TARGET_ARM)
442 if (interrupt_request & CPU_INTERRUPT_FIQ
443 && !(env->uncached_cpsr & CPSR_F)) {
444 env->exception_index = EXCP_FIQ;
445 cc->do_interrupt(cpu);
446 next_tb = 0;
448 /* ARMv7-M interrupt return works by loading a magic value
449 into the PC. On real hardware the load causes the
450 return to occur. The qemu implementation performs the
451 jump normally, then does the exception return when the
452 CPU tries to execute code at the magic address.
453 This will cause the magic PC value to be pushed to
454 the stack if an interrupt occurred at the wrong time.
455 We avoid this by disabling interrupts when
456 pc contains a magic address. */
457 if (interrupt_request & CPU_INTERRUPT_HARD
458 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
459 || !(env->uncached_cpsr & CPSR_I))) {
460 env->exception_index = EXCP_IRQ;
461 cc->do_interrupt(cpu);
462 next_tb = 0;
464 #elif defined(TARGET_UNICORE32)
465 if (interrupt_request & CPU_INTERRUPT_HARD
466 && !(env->uncached_asr & ASR_I)) {
467 env->exception_index = UC32_EXCP_INTR;
468 cc->do_interrupt(cpu);
469 next_tb = 0;
471 #elif defined(TARGET_SH4)
472 if (interrupt_request & CPU_INTERRUPT_HARD) {
473 cc->do_interrupt(cpu);
474 next_tb = 0;
476 #elif defined(TARGET_ALPHA)
478 int idx = -1;
479 /* ??? This hard-codes the OSF/1 interrupt levels. */
480 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
481 case 0 ... 3:
482 if (interrupt_request & CPU_INTERRUPT_HARD) {
483 idx = EXCP_DEV_INTERRUPT;
485 /* FALLTHRU */
486 case 4:
487 if (interrupt_request & CPU_INTERRUPT_TIMER) {
488 idx = EXCP_CLK_INTERRUPT;
490 /* FALLTHRU */
491 case 5:
492 if (interrupt_request & CPU_INTERRUPT_SMP) {
493 idx = EXCP_SMP_INTERRUPT;
495 /* FALLTHRU */
496 case 6:
497 if (interrupt_request & CPU_INTERRUPT_MCHK) {
498 idx = EXCP_MCHK;
501 if (idx >= 0) {
502 env->exception_index = idx;
503 env->error_code = 0;
504 cc->do_interrupt(cpu);
505 next_tb = 0;
508 #elif defined(TARGET_CRIS)
509 if (interrupt_request & CPU_INTERRUPT_HARD
510 && (env->pregs[PR_CCS] & I_FLAG)
511 && !env->locked_irq) {
512 env->exception_index = EXCP_IRQ;
513 cc->do_interrupt(cpu);
514 next_tb = 0;
516 if (interrupt_request & CPU_INTERRUPT_NMI) {
517 unsigned int m_flag_archval;
518 if (env->pregs[PR_VR] < 32) {
519 m_flag_archval = M_FLAG_V10;
520 } else {
521 m_flag_archval = M_FLAG_V32;
523 if ((env->pregs[PR_CCS] & m_flag_archval)) {
524 env->exception_index = EXCP_NMI;
525 cc->do_interrupt(cpu);
526 next_tb = 0;
529 #elif defined(TARGET_M68K)
530 if (interrupt_request & CPU_INTERRUPT_HARD
531 && ((env->sr & SR_I) >> SR_I_SHIFT)
532 < env->pending_level) {
533 /* Real hardware gets the interrupt vector via an
534 IACK cycle at this point. Current emulated
535 hardware doesn't rely on this, so we
536 provide/save the vector when the interrupt is
537 first signalled. */
538 env->exception_index = env->pending_vector;
539 do_interrupt_m68k_hardirq(env);
540 next_tb = 0;
542 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
543 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
544 (env->psw.mask & PSW_MASK_EXT)) {
545 cc->do_interrupt(cpu);
546 next_tb = 0;
548 #elif defined(TARGET_XTENSA)
549 if (interrupt_request & CPU_INTERRUPT_HARD) {
550 env->exception_index = EXC_IRQ;
551 cc->do_interrupt(cpu);
552 next_tb = 0;
554 #endif
555 /* Don't use the cached interrupt_request value,
556 do_interrupt may have updated the EXITTB flag. */
557 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
558 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
559 /* ensure that no TB jump will be modified as
560 the program flow was changed */
561 next_tb = 0;
564 if (unlikely(cpu->exit_request)) {
565 cpu->exit_request = 0;
566 env->exception_index = EXCP_INTERRUPT;
567 cpu_loop_exit(env);
569 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
570 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
571 /* restore flags in standard format */
572 #if defined(TARGET_I386)
573 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
574 | (DF & DF_MASK);
575 log_cpu_state(env, CPU_DUMP_CCOP);
576 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
577 #elif defined(TARGET_M68K)
578 cpu_m68k_flush_flags(env, env->cc_op);
579 env->cc_op = CC_OP_FLAGS;
580 env->sr = (env->sr & 0xffe0)
581 | env->cc_dest | (env->cc_x << 4);
582 log_cpu_state(env, 0);
583 #else
584 log_cpu_state(env, 0);
585 #endif
587 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
588 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
589 tb = tb_find_fast(env);
590 /* Note: we do it here to avoid a gcc bug on Mac OS X when
591 doing it in tb_find_slow */
592 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
593 /* as some TB could have been invalidated because
594 of memory exceptions while generating the code, we
595 must recompute the hash index here */
596 next_tb = 0;
597 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
599 #ifdef CONFIG_DEBUG_EXEC
600 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
601 tb->tc_ptr, tb->pc,
602 lookup_symbol(tb->pc));
603 #endif
604 /* see if we can patch the calling TB. When the TB
605 spans two pages, we cannot safely do a direct
606 jump. */
607 if (next_tb != 0 && tb->page_addr[1] == -1) {
608 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
609 next_tb & TB_EXIT_MASK, tb);
611 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
613 /* cpu_interrupt might be called while translating the
614 TB, but before it is linked into a potentially
615 infinite loop and becomes env->current_tb. Avoid
616 starting execution if there is a pending interrupt. */
617 cpu->current_tb = tb;
618 barrier();
619 if (likely(!cpu->exit_request)) {
620 tc_ptr = tb->tc_ptr;
621 /* execute the generated code */
622 next_tb = cpu_tb_exec(cpu, tc_ptr);
623 switch (next_tb & TB_EXIT_MASK) {
624 case TB_EXIT_REQUESTED:
625 /* Something asked us to stop executing
626 * chained TBs; just continue round the main
627 * loop. Whatever requested the exit will also
628 * have set something else (eg exit_request or
629 * interrupt_request) which we will handle
630 * next time around the loop.
632 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
633 next_tb = 0;
634 break;
635 case TB_EXIT_ICOUNT_EXPIRED:
637 /* Instruction counter expired. */
638 int insns_left;
639 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
640 insns_left = env->icount_decr.u32;
641 if (env->icount_extra && insns_left >= 0) {
642 /* Refill decrementer and continue execution. */
643 env->icount_extra += insns_left;
644 if (env->icount_extra > 0xffff) {
645 insns_left = 0xffff;
646 } else {
647 insns_left = env->icount_extra;
649 env->icount_extra -= insns_left;
650 env->icount_decr.u16.low = insns_left;
651 } else {
652 if (insns_left > 0) {
653 /* Execute remaining instructions. */
654 cpu_exec_nocache(env, insns_left, tb);
656 env->exception_index = EXCP_INTERRUPT;
657 next_tb = 0;
658 cpu_loop_exit(env);
660 break;
662 default:
663 break;
666 cpu->current_tb = NULL;
667 /* reset soft MMU for next block (it can currently
668 only be set by a memory fault) */
669 } /* for(;;) */
670 } else {
671 /* Reload env after longjmp - the compiler may have smashed all
672 * local variables as longjmp is marked 'noreturn'. */
673 env = cpu_single_env;
675 } /* for(;;) */
678 #if defined(TARGET_I386)
679 /* restore flags in standard format */
680 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
681 | (DF & DF_MASK);
682 #elif defined(TARGET_ARM)
683 /* XXX: Save/restore host fpu exception state?. */
684 #elif defined(TARGET_UNICORE32)
685 #elif defined(TARGET_SPARC)
686 #elif defined(TARGET_PPC)
687 #elif defined(TARGET_LM32)
688 #elif defined(TARGET_M68K)
689 cpu_m68k_flush_flags(env, env->cc_op);
690 env->cc_op = CC_OP_FLAGS;
691 env->sr = (env->sr & 0xffe0)
692 | env->cc_dest | (env->cc_x << 4);
693 #elif defined(TARGET_MICROBLAZE)
694 #elif defined(TARGET_MIPS)
695 #elif defined(TARGET_OPENRISC)
696 #elif defined(TARGET_SH4)
697 #elif defined(TARGET_ALPHA)
698 #elif defined(TARGET_CRIS)
699 #elif defined(TARGET_S390X)
700 #elif defined(TARGET_XTENSA)
701 /* XXXXX */
702 #else
703 #error unsupported target CPU
704 #endif
706 /* fail safe : never use cpu_single_env outside cpu_exec() */
707 cpu_single_env = NULL;
708 return ret;