target-i386: Convert 'hv_vapic' to static property
[qemu/cris-port.git] / cpu-exec.c
blobf7a215cb6ec18f0b7cecc072a0ab28d0fb97fffe
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 bool qemu_cpu_has_work(CPUState *cpu)
28 return cpu_has_work(cpu);
31 void cpu_loop_exit(CPUArchState *env)
33 CPUState *cpu = ENV_GET_CPU(env);
35 cpu->current_tb = NULL;
36 siglongjmp(env->jmp_env, 1);
39 /* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState *env, void *puc)
45 /* XXX: restore cpu registers saved in host registers */
47 env->exception_index = -1;
48 siglongjmp(env->jmp_env, 1);
50 #endif
52 /* Execute a TB, and fix up the CPU state afterwards if necessary */
53 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
55 CPUArchState *env = cpu->env_ptr;
56 uintptr_t next_tb = tcg_qemu_tb_exec(env, tb_ptr);
57 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
58 /* We didn't start executing this TB (eg because the instruction
59 * counter hit zero); we must restore the guest PC to the address
60 * of the start of the TB.
62 CPUClass *cc = CPU_GET_CLASS(cpu);
63 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
64 if (cc->synchronize_from_tb) {
65 cc->synchronize_from_tb(cpu, tb);
66 } else {
67 assert(cc->set_pc);
68 cc->set_pc(cpu, tb->pc);
71 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
72 /* We were asked to stop executing TBs (probably a pending
73 * interrupt. We've now stopped, so clear the flag.
75 cpu->tcg_exit_req = 0;
77 return next_tb;
80 /* Execute the code without caching the generated code. An interpreter
81 could be used if available. */
82 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
83 TranslationBlock *orig_tb)
85 CPUState *cpu = ENV_GET_CPU(env);
86 TranslationBlock *tb;
88 /* Should never happen.
89 We only end up here when an existing TB is too long. */
90 if (max_cycles > CF_COUNT_MASK)
91 max_cycles = CF_COUNT_MASK;
93 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
94 max_cycles);
95 cpu->current_tb = tb;
96 /* execute the generated code */
97 cpu_tb_exec(cpu, tb->tc_ptr);
98 cpu->current_tb = NULL;
99 tb_phys_invalidate(tb, -1);
100 tb_free(tb);
103 static TranslationBlock *tb_find_slow(CPUArchState *env,
104 target_ulong pc,
105 target_ulong cs_base,
106 uint64_t flags)
108 TranslationBlock *tb, **ptb1;
109 unsigned int h;
110 tb_page_addr_t phys_pc, phys_page1;
111 target_ulong virt_page2;
113 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
115 /* find translated block using physical mappings */
116 phys_pc = get_page_addr_code(env, pc);
117 phys_page1 = phys_pc & TARGET_PAGE_MASK;
118 h = tb_phys_hash_func(phys_pc);
119 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
120 for(;;) {
121 tb = *ptb1;
122 if (!tb)
123 goto not_found;
124 if (tb->pc == pc &&
125 tb->page_addr[0] == phys_page1 &&
126 tb->cs_base == cs_base &&
127 tb->flags == flags) {
128 /* check next page if needed */
129 if (tb->page_addr[1] != -1) {
130 tb_page_addr_t phys_page2;
132 virt_page2 = (pc & TARGET_PAGE_MASK) +
133 TARGET_PAGE_SIZE;
134 phys_page2 = get_page_addr_code(env, virt_page2);
135 if (tb->page_addr[1] == phys_page2)
136 goto found;
137 } else {
138 goto found;
141 ptb1 = &tb->phys_hash_next;
143 not_found:
144 /* if no translated code available, then translate it now */
145 tb = tb_gen_code(env, pc, cs_base, flags, 0);
147 found:
148 /* Move the last found TB to the head of the list */
149 if (likely(*ptb1)) {
150 *ptb1 = tb->phys_hash_next;
151 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
152 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
154 /* we add the TB in the virtual pc hash table */
155 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
156 return tb;
159 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
161 TranslationBlock *tb;
162 target_ulong cs_base, pc;
163 int flags;
165 /* we record a subset of the CPU state. It will
166 always be the same before a given translated block
167 is executed. */
168 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
169 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
170 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
171 tb->flags != flags)) {
172 tb = tb_find_slow(env, pc, cs_base, flags);
174 return tb;
177 static CPUDebugExcpHandler *debug_excp_handler;
179 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
181 debug_excp_handler = handler;
184 static void cpu_handle_debug_exception(CPUArchState *env)
186 CPUWatchpoint *wp;
188 if (!env->watchpoint_hit) {
189 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
190 wp->flags &= ~BP_WATCHPOINT_HIT;
193 if (debug_excp_handler) {
194 debug_excp_handler(env);
198 /* main execution loop */
200 volatile sig_atomic_t exit_request;
202 int cpu_exec(CPUArchState *env)
204 CPUState *cpu = ENV_GET_CPU(env);
205 #if !(defined(CONFIG_USER_ONLY) && \
206 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
207 CPUClass *cc = CPU_GET_CLASS(cpu);
208 #endif
209 #ifdef TARGET_I386
210 X86CPU *x86_cpu = X86_CPU(cpu);
211 #endif
212 int ret, interrupt_request;
213 TranslationBlock *tb;
214 uint8_t *tc_ptr;
215 uintptr_t next_tb;
217 if (cpu->halted) {
218 if (!cpu_has_work(cpu)) {
219 return EXCP_HALTED;
222 cpu->halted = 0;
225 current_cpu = cpu;
227 /* As long as current_cpu is null, up to the assignment just above,
228 * requests by other threads to exit the execution loop are expected to
229 * be issued using the exit_request global. We must make sure that our
230 * evaluation of the global value is performed past the current_cpu
231 * value transition point, which requires a memory barrier as well as
232 * an instruction scheduling constraint on modern architectures. */
233 smp_mb();
235 if (unlikely(exit_request)) {
236 cpu->exit_request = 1;
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
242 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
243 CC_OP = CC_OP_EFLAGS;
244 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env->cc_op = CC_OP_FLAGS;
248 env->cc_dest = env->sr & 0xf;
249 env->cc_x = (env->sr >> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_UNICORE32)
253 #elif defined(TARGET_PPC)
254 env->reserve_addr = -1;
255 #elif defined(TARGET_LM32)
256 #elif defined(TARGET_MICROBLAZE)
257 #elif defined(TARGET_MIPS)
258 #elif defined(TARGET_MOXIE)
259 #elif defined(TARGET_OPENRISC)
260 #elif defined(TARGET_SH4)
261 #elif defined(TARGET_CRIS)
262 #elif defined(TARGET_S390X)
263 #elif defined(TARGET_XTENSA)
264 /* XXXXX */
265 #else
266 #error unsupported target CPU
267 #endif
268 env->exception_index = -1;
270 /* prepare setjmp context for exception handling */
271 for(;;) {
272 if (sigsetjmp(env->jmp_env, 0) == 0) {
273 /* if an exception is pending, we execute it here */
274 if (env->exception_index >= 0) {
275 if (env->exception_index >= EXCP_INTERRUPT) {
276 /* exit request from the cpu execution loop */
277 ret = env->exception_index;
278 if (ret == EXCP_DEBUG) {
279 cpu_handle_debug_exception(env);
281 break;
282 } else {
283 #if defined(CONFIG_USER_ONLY)
284 /* if user mode only, we simulate a fake exception
285 which will be handled outside the cpu execution
286 loop */
287 #if defined(TARGET_I386)
288 cc->do_interrupt(cpu);
289 #endif
290 ret = env->exception_index;
291 break;
292 #else
293 cc->do_interrupt(cpu);
294 env->exception_index = -1;
295 #endif
299 next_tb = 0; /* force lookup of first TB */
300 for(;;) {
301 interrupt_request = cpu->interrupt_request;
302 if (unlikely(interrupt_request)) {
303 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
304 /* Mask out external interrupts for this step. */
305 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
307 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
308 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
309 env->exception_index = EXCP_DEBUG;
310 cpu_loop_exit(env);
312 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
313 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
314 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
315 if (interrupt_request & CPU_INTERRUPT_HALT) {
316 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
317 cpu->halted = 1;
318 env->exception_index = EXCP_HLT;
319 cpu_loop_exit(env);
321 #endif
322 #if defined(TARGET_I386)
323 #if !defined(CONFIG_USER_ONLY)
324 if (interrupt_request & CPU_INTERRUPT_POLL) {
325 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
326 apic_poll_irq(x86_cpu->apic_state);
328 #endif
329 if (interrupt_request & CPU_INTERRUPT_INIT) {
330 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
332 do_cpu_init(x86_cpu);
333 env->exception_index = EXCP_HALTED;
334 cpu_loop_exit(env);
335 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
336 do_cpu_sipi(x86_cpu);
337 } else if (env->hflags2 & HF2_GIF_MASK) {
338 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
339 !(env->hflags & HF_SMM_MASK)) {
340 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
342 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
343 do_smm_enter(x86_cpu);
344 next_tb = 0;
345 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
346 !(env->hflags2 & HF2_NMI_MASK)) {
347 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
348 env->hflags2 |= HF2_NMI_MASK;
349 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
350 next_tb = 0;
351 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
352 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
353 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
354 next_tb = 0;
355 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
356 (((env->hflags2 & HF2_VINTR_MASK) &&
357 (env->hflags2 & HF2_HIF_MASK)) ||
358 (!(env->hflags2 & HF2_VINTR_MASK) &&
359 (env->eflags & IF_MASK &&
360 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
361 int intno;
362 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
364 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
365 CPU_INTERRUPT_VIRQ);
366 intno = cpu_get_pic_interrupt(env);
367 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
368 do_interrupt_x86_hardirq(env, intno, 1);
369 /* ensure that no TB jump will be modified as
370 the program flow was changed */
371 next_tb = 0;
372 #if !defined(CONFIG_USER_ONLY)
373 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
374 (env->eflags & IF_MASK) &&
375 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
376 int intno;
377 /* FIXME: this should respect TPR */
378 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
380 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
381 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
382 do_interrupt_x86_hardirq(env, intno, 1);
383 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
384 next_tb = 0;
385 #endif
388 #elif defined(TARGET_PPC)
389 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
390 cpu_reset(cpu);
392 if (interrupt_request & CPU_INTERRUPT_HARD) {
393 ppc_hw_interrupt(env);
394 if (env->pending_interrupts == 0) {
395 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
397 next_tb = 0;
399 #elif defined(TARGET_LM32)
400 if ((interrupt_request & CPU_INTERRUPT_HARD)
401 && (env->ie & IE_IE)) {
402 env->exception_index = EXCP_IRQ;
403 cc->do_interrupt(cpu);
404 next_tb = 0;
406 #elif defined(TARGET_MICROBLAZE)
407 if ((interrupt_request & CPU_INTERRUPT_HARD)
408 && (env->sregs[SR_MSR] & MSR_IE)
409 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
410 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
411 env->exception_index = EXCP_IRQ;
412 cc->do_interrupt(cpu);
413 next_tb = 0;
415 #elif defined(TARGET_MIPS)
416 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
417 cpu_mips_hw_interrupts_pending(env)) {
418 /* Raise it */
419 env->exception_index = EXCP_EXT_INTERRUPT;
420 env->error_code = 0;
421 cc->do_interrupt(cpu);
422 next_tb = 0;
424 #elif defined(TARGET_OPENRISC)
426 int idx = -1;
427 if ((interrupt_request & CPU_INTERRUPT_HARD)
428 && (env->sr & SR_IEE)) {
429 idx = EXCP_INT;
431 if ((interrupt_request & CPU_INTERRUPT_TIMER)
432 && (env->sr & SR_TEE)) {
433 idx = EXCP_TICK;
435 if (idx >= 0) {
436 env->exception_index = idx;
437 cc->do_interrupt(cpu);
438 next_tb = 0;
441 #elif defined(TARGET_SPARC)
442 if (interrupt_request & CPU_INTERRUPT_HARD) {
443 if (cpu_interrupts_enabled(env) &&
444 env->interrupt_index > 0) {
445 int pil = env->interrupt_index & 0xf;
446 int type = env->interrupt_index & 0xf0;
448 if (((type == TT_EXTINT) &&
449 cpu_pil_allowed(env, pil)) ||
450 type != TT_EXTINT) {
451 env->exception_index = env->interrupt_index;
452 cc->do_interrupt(cpu);
453 next_tb = 0;
457 #elif defined(TARGET_ARM)
458 if (interrupt_request & CPU_INTERRUPT_FIQ
459 && !(env->uncached_cpsr & CPSR_F)) {
460 env->exception_index = EXCP_FIQ;
461 cc->do_interrupt(cpu);
462 next_tb = 0;
464 /* ARMv7-M interrupt return works by loading a magic value
465 into the PC. On real hardware the load causes the
466 return to occur. The qemu implementation performs the
467 jump normally, then does the exception return when the
468 CPU tries to execute code at the magic address.
469 This will cause the magic PC value to be pushed to
470 the stack if an interrupt occurred at the wrong time.
471 We avoid this by disabling interrupts when
472 pc contains a magic address. */
473 if (interrupt_request & CPU_INTERRUPT_HARD
474 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
475 || !(env->uncached_cpsr & CPSR_I))) {
476 env->exception_index = EXCP_IRQ;
477 cc->do_interrupt(cpu);
478 next_tb = 0;
480 #elif defined(TARGET_UNICORE32)
481 if (interrupt_request & CPU_INTERRUPT_HARD
482 && !(env->uncached_asr & ASR_I)) {
483 env->exception_index = UC32_EXCP_INTR;
484 cc->do_interrupt(cpu);
485 next_tb = 0;
487 #elif defined(TARGET_SH4)
488 if (interrupt_request & CPU_INTERRUPT_HARD) {
489 cc->do_interrupt(cpu);
490 next_tb = 0;
492 #elif defined(TARGET_ALPHA)
494 int idx = -1;
495 /* ??? This hard-codes the OSF/1 interrupt levels. */
496 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
497 case 0 ... 3:
498 if (interrupt_request & CPU_INTERRUPT_HARD) {
499 idx = EXCP_DEV_INTERRUPT;
501 /* FALLTHRU */
502 case 4:
503 if (interrupt_request & CPU_INTERRUPT_TIMER) {
504 idx = EXCP_CLK_INTERRUPT;
506 /* FALLTHRU */
507 case 5:
508 if (interrupt_request & CPU_INTERRUPT_SMP) {
509 idx = EXCP_SMP_INTERRUPT;
511 /* FALLTHRU */
512 case 6:
513 if (interrupt_request & CPU_INTERRUPT_MCHK) {
514 idx = EXCP_MCHK;
517 if (idx >= 0) {
518 env->exception_index = idx;
519 env->error_code = 0;
520 cc->do_interrupt(cpu);
521 next_tb = 0;
524 #elif defined(TARGET_CRIS)
525 if (interrupt_request & CPU_INTERRUPT_HARD
526 && (env->pregs[PR_CCS] & I_FLAG)
527 && !env->locked_irq) {
528 env->exception_index = EXCP_IRQ;
529 cc->do_interrupt(cpu);
530 next_tb = 0;
532 if (interrupt_request & CPU_INTERRUPT_NMI) {
533 unsigned int m_flag_archval;
534 if (env->pregs[PR_VR] < 32) {
535 m_flag_archval = M_FLAG_V10;
536 } else {
537 m_flag_archval = M_FLAG_V32;
539 if ((env->pregs[PR_CCS] & m_flag_archval)) {
540 env->exception_index = EXCP_NMI;
541 cc->do_interrupt(cpu);
542 next_tb = 0;
545 #elif defined(TARGET_M68K)
546 if (interrupt_request & CPU_INTERRUPT_HARD
547 && ((env->sr & SR_I) >> SR_I_SHIFT)
548 < env->pending_level) {
549 /* Real hardware gets the interrupt vector via an
550 IACK cycle at this point. Current emulated
551 hardware doesn't rely on this, so we
552 provide/save the vector when the interrupt is
553 first signalled. */
554 env->exception_index = env->pending_vector;
555 do_interrupt_m68k_hardirq(env);
556 next_tb = 0;
558 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
559 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
560 (env->psw.mask & PSW_MASK_EXT)) {
561 cc->do_interrupt(cpu);
562 next_tb = 0;
564 #elif defined(TARGET_XTENSA)
565 if (interrupt_request & CPU_INTERRUPT_HARD) {
566 env->exception_index = EXC_IRQ;
567 cc->do_interrupt(cpu);
568 next_tb = 0;
570 #endif
571 /* Don't use the cached interrupt_request value,
572 do_interrupt may have updated the EXITTB flag. */
573 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
574 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
575 /* ensure that no TB jump will be modified as
576 the program flow was changed */
577 next_tb = 0;
580 if (unlikely(cpu->exit_request)) {
581 cpu->exit_request = 0;
582 env->exception_index = EXCP_INTERRUPT;
583 cpu_loop_exit(env);
585 #if defined(DEBUG_DISAS)
586 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
587 /* restore flags in standard format */
588 #if defined(TARGET_I386)
589 log_cpu_state(cpu, CPU_DUMP_CCOP);
590 #elif defined(TARGET_M68K)
591 cpu_m68k_flush_flags(env, env->cc_op);
592 env->cc_op = CC_OP_FLAGS;
593 env->sr = (env->sr & 0xffe0)
594 | env->cc_dest | (env->cc_x << 4);
595 log_cpu_state(cpu, 0);
596 #else
597 log_cpu_state(cpu, 0);
598 #endif
600 #endif /* DEBUG_DISAS */
601 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
602 tb = tb_find_fast(env);
603 /* Note: we do it here to avoid a gcc bug on Mac OS X when
604 doing it in tb_find_slow */
605 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
606 /* as some TB could have been invalidated because
607 of memory exceptions while generating the code, we
608 must recompute the hash index here */
609 next_tb = 0;
610 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
612 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
613 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
614 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
616 /* see if we can patch the calling TB. When the TB
617 spans two pages, we cannot safely do a direct
618 jump. */
619 if (next_tb != 0 && tb->page_addr[1] == -1) {
620 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
621 next_tb & TB_EXIT_MASK, tb);
623 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
625 /* cpu_interrupt might be called while translating the
626 TB, but before it is linked into a potentially
627 infinite loop and becomes env->current_tb. Avoid
628 starting execution if there is a pending interrupt. */
629 cpu->current_tb = tb;
630 barrier();
631 if (likely(!cpu->exit_request)) {
632 tc_ptr = tb->tc_ptr;
633 /* execute the generated code */
634 next_tb = cpu_tb_exec(cpu, tc_ptr);
635 switch (next_tb & TB_EXIT_MASK) {
636 case TB_EXIT_REQUESTED:
637 /* Something asked us to stop executing
638 * chained TBs; just continue round the main
639 * loop. Whatever requested the exit will also
640 * have set something else (eg exit_request or
641 * interrupt_request) which we will handle
642 * next time around the loop.
644 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
645 next_tb = 0;
646 break;
647 case TB_EXIT_ICOUNT_EXPIRED:
649 /* Instruction counter expired. */
650 int insns_left;
651 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
652 insns_left = env->icount_decr.u32;
653 if (env->icount_extra && insns_left >= 0) {
654 /* Refill decrementer and continue execution. */
655 env->icount_extra += insns_left;
656 if (env->icount_extra > 0xffff) {
657 insns_left = 0xffff;
658 } else {
659 insns_left = env->icount_extra;
661 env->icount_extra -= insns_left;
662 env->icount_decr.u16.low = insns_left;
663 } else {
664 if (insns_left > 0) {
665 /* Execute remaining instructions. */
666 cpu_exec_nocache(env, insns_left, tb);
668 env->exception_index = EXCP_INTERRUPT;
669 next_tb = 0;
670 cpu_loop_exit(env);
672 break;
674 default:
675 break;
678 cpu->current_tb = NULL;
679 /* reset soft MMU for next block (it can currently
680 only be set by a memory fault) */
681 } /* for(;;) */
682 } else {
683 /* Reload env after longjmp - the compiler may have smashed all
684 * local variables as longjmp is marked 'noreturn'. */
685 cpu = current_cpu;
686 env = cpu->env_ptr;
687 #if !(defined(CONFIG_USER_ONLY) && \
688 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
689 cc = CPU_GET_CLASS(cpu);
690 #endif
691 #ifdef TARGET_I386
692 x86_cpu = X86_CPU(cpu);
693 #endif
695 } /* for(;;) */
698 #if defined(TARGET_I386)
699 /* restore flags in standard format */
700 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
701 | (env->df & DF_MASK);
702 #elif defined(TARGET_ARM)
703 /* XXX: Save/restore host fpu exception state?. */
704 #elif defined(TARGET_UNICORE32)
705 #elif defined(TARGET_SPARC)
706 #elif defined(TARGET_PPC)
707 #elif defined(TARGET_LM32)
708 #elif defined(TARGET_M68K)
709 cpu_m68k_flush_flags(env, env->cc_op);
710 env->cc_op = CC_OP_FLAGS;
711 env->sr = (env->sr & 0xffe0)
712 | env->cc_dest | (env->cc_x << 4);
713 #elif defined(TARGET_MICROBLAZE)
714 #elif defined(TARGET_MIPS)
715 #elif defined(TARGET_MOXIE)
716 #elif defined(TARGET_OPENRISC)
717 #elif defined(TARGET_SH4)
718 #elif defined(TARGET_ALPHA)
719 #elif defined(TARGET_CRIS)
720 #elif defined(TARGET_S390X)
721 #elif defined(TARGET_XTENSA)
722 /* XXXXX */
723 #else
724 #error unsupported target CPU
725 #endif
727 /* fail safe : never use current_cpu outside cpu_exec() */
728 current_cpu = NULL;
729 return ret;