cpu: Move jmp_env field from CPU_COMMON to CPUState
[qemu/qmp-unstable.git] / cpu-exec.c
blob3e17ff534d2c0b1f5dc4073f6794498016c6680f
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 void cpu_loop_exit(CPUArchState *env)
28 CPUState *cpu = ENV_GET_CPU(env);
30 cpu->current_tb = NULL;
31 siglongjmp(cpu->jmp_env, 1);
34 /* exit the current TB from a signal handler. The host registers are
35 restored in a state compatible with the CPU emulator
37 #if defined(CONFIG_SOFTMMU)
38 void cpu_resume_from_signal(CPUArchState *env, void *puc)
40 CPUState *cpu = ENV_GET_CPU(env);
42 /* XXX: restore cpu registers saved in host registers */
44 env->exception_index = -1;
45 siglongjmp(cpu->jmp_env, 1);
47 #endif
49 /* Execute a TB, and fix up the CPU state afterwards if necessary */
50 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
52 CPUArchState *env = cpu->env_ptr;
53 uintptr_t next_tb;
55 #if defined(DEBUG_DISAS)
56 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
57 #if defined(TARGET_I386)
58 log_cpu_state(cpu, CPU_DUMP_CCOP);
59 #elif defined(TARGET_M68K)
60 /* ??? Should not modify env state for dumping. */
61 cpu_m68k_flush_flags(env, env->cc_op);
62 env->cc_op = CC_OP_FLAGS;
63 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
64 log_cpu_state(cpu, 0);
65 #else
66 log_cpu_state(cpu, 0);
67 #endif
69 #endif /* DEBUG_DISAS */
71 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
72 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
73 /* We didn't start executing this TB (eg because the instruction
74 * counter hit zero); we must restore the guest PC to the address
75 * of the start of the TB.
77 CPUClass *cc = CPU_GET_CLASS(cpu);
78 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
79 if (cc->synchronize_from_tb) {
80 cc->synchronize_from_tb(cpu, tb);
81 } else {
82 assert(cc->set_pc);
83 cc->set_pc(cpu, tb->pc);
86 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
87 /* We were asked to stop executing TBs (probably a pending
88 * interrupt. We've now stopped, so clear the flag.
90 cpu->tcg_exit_req = 0;
92 return next_tb;
95 /* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
98 TranslationBlock *orig_tb)
100 CPUState *cpu = ENV_GET_CPU(env);
101 TranslationBlock *tb;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
110 cpu->current_tb = tb;
111 /* execute the generated code */
112 cpu_tb_exec(cpu, tb->tc_ptr);
113 cpu->current_tb = NULL;
114 tb_phys_invalidate(tb, -1);
115 tb_free(tb);
118 static TranslationBlock *tb_find_slow(CPUArchState *env,
119 target_ulong pc,
120 target_ulong cs_base,
121 uint64_t flags)
123 CPUState *cpu = ENV_GET_CPU(env);
124 TranslationBlock *tb, **ptb1;
125 unsigned int h;
126 tb_page_addr_t phys_pc, phys_page1;
127 target_ulong virt_page2;
129 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
131 /* find translated block using physical mappings */
132 phys_pc = get_page_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
134 h = tb_phys_hash_func(phys_pc);
135 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
136 for(;;) {
137 tb = *ptb1;
138 if (!tb)
139 goto not_found;
140 if (tb->pc == pc &&
141 tb->page_addr[0] == phys_page1 &&
142 tb->cs_base == cs_base &&
143 tb->flags == flags) {
144 /* check next page if needed */
145 if (tb->page_addr[1] != -1) {
146 tb_page_addr_t phys_page2;
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_page_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
157 ptb1 = &tb->phys_hash_next;
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 found:
164 /* Move the last found TB to the head of the list */
165 if (likely(*ptb1)) {
166 *ptb1 = tb->phys_hash_next;
167 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
168 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
170 /* we add the TB in the virtual pc hash table */
171 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 return tb;
175 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
177 CPUState *cpu = ENV_GET_CPU(env);
178 TranslationBlock *tb;
179 target_ulong cs_base, pc;
180 int flags;
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
184 is executed. */
185 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
186 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
187 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
188 tb->flags != flags)) {
189 tb = tb_find_slow(env, pc, cs_base, flags);
191 return tb;
194 static CPUDebugExcpHandler *debug_excp_handler;
196 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 debug_excp_handler = handler;
201 static void cpu_handle_debug_exception(CPUArchState *env)
203 CPUWatchpoint *wp;
205 if (!env->watchpoint_hit) {
206 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
207 wp->flags &= ~BP_WATCHPOINT_HIT;
210 if (debug_excp_handler) {
211 debug_excp_handler(env);
215 /* main execution loop */
217 volatile sig_atomic_t exit_request;
219 int cpu_exec(CPUArchState *env)
221 CPUState *cpu = ENV_GET_CPU(env);
222 #if !(defined(CONFIG_USER_ONLY) && \
223 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
224 CPUClass *cc = CPU_GET_CLASS(cpu);
225 #endif
226 #ifdef TARGET_I386
227 X86CPU *x86_cpu = X86_CPU(cpu);
228 #endif
229 int ret, interrupt_request;
230 TranslationBlock *tb;
231 uint8_t *tc_ptr;
232 uintptr_t next_tb;
234 if (cpu->halted) {
235 if (!cpu_has_work(cpu)) {
236 return EXCP_HALTED;
239 cpu->halted = 0;
242 current_cpu = cpu;
244 /* As long as current_cpu is null, up to the assignment just above,
245 * requests by other threads to exit the execution loop are expected to
246 * be issued using the exit_request global. We must make sure that our
247 * evaluation of the global value is performed past the current_cpu
248 * value transition point, which requires a memory barrier as well as
249 * an instruction scheduling constraint on modern architectures. */
250 smp_mb();
252 if (unlikely(exit_request)) {
253 cpu->exit_request = 1;
256 #if defined(TARGET_I386)
257 /* put eflags in CPU temporary format */
258 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
259 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
260 CC_OP = CC_OP_EFLAGS;
261 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
262 #elif defined(TARGET_SPARC)
263 #elif defined(TARGET_M68K)
264 env->cc_op = CC_OP_FLAGS;
265 env->cc_dest = env->sr & 0xf;
266 env->cc_x = (env->sr >> 4) & 1;
267 #elif defined(TARGET_ALPHA)
268 #elif defined(TARGET_ARM)
269 #elif defined(TARGET_UNICORE32)
270 #elif defined(TARGET_PPC)
271 env->reserve_addr = -1;
272 #elif defined(TARGET_LM32)
273 #elif defined(TARGET_MICROBLAZE)
274 #elif defined(TARGET_MIPS)
275 #elif defined(TARGET_MOXIE)
276 #elif defined(TARGET_OPENRISC)
277 #elif defined(TARGET_SH4)
278 #elif defined(TARGET_CRIS)
279 #elif defined(TARGET_S390X)
280 #elif defined(TARGET_XTENSA)
281 /* XXXXX */
282 #else
283 #error unsupported target CPU
284 #endif
285 env->exception_index = -1;
287 /* prepare setjmp context for exception handling */
288 for(;;) {
289 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
290 /* if an exception is pending, we execute it here */
291 if (env->exception_index >= 0) {
292 if (env->exception_index >= EXCP_INTERRUPT) {
293 /* exit request from the cpu execution loop */
294 ret = env->exception_index;
295 if (ret == EXCP_DEBUG) {
296 cpu_handle_debug_exception(env);
298 break;
299 } else {
300 #if defined(CONFIG_USER_ONLY)
301 /* if user mode only, we simulate a fake exception
302 which will be handled outside the cpu execution
303 loop */
304 #if defined(TARGET_I386)
305 cc->do_interrupt(cpu);
306 #endif
307 ret = env->exception_index;
308 break;
309 #else
310 cc->do_interrupt(cpu);
311 env->exception_index = -1;
312 #endif
316 next_tb = 0; /* force lookup of first TB */
317 for(;;) {
318 interrupt_request = cpu->interrupt_request;
319 if (unlikely(interrupt_request)) {
320 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
321 /* Mask out external interrupts for this step. */
322 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
324 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
325 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
326 env->exception_index = EXCP_DEBUG;
327 cpu_loop_exit(env);
329 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
330 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
331 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
332 if (interrupt_request & CPU_INTERRUPT_HALT) {
333 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
334 cpu->halted = 1;
335 env->exception_index = EXCP_HLT;
336 cpu_loop_exit(env);
338 #endif
339 #if defined(TARGET_I386)
340 #if !defined(CONFIG_USER_ONLY)
341 if (interrupt_request & CPU_INTERRUPT_POLL) {
342 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
343 apic_poll_irq(x86_cpu->apic_state);
345 #endif
346 if (interrupt_request & CPU_INTERRUPT_INIT) {
347 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
349 do_cpu_init(x86_cpu);
350 env->exception_index = EXCP_HALTED;
351 cpu_loop_exit(env);
352 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
353 do_cpu_sipi(x86_cpu);
354 } else if (env->hflags2 & HF2_GIF_MASK) {
355 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
356 !(env->hflags & HF_SMM_MASK)) {
357 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
359 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
360 do_smm_enter(x86_cpu);
361 next_tb = 0;
362 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
363 !(env->hflags2 & HF2_NMI_MASK)) {
364 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
365 env->hflags2 |= HF2_NMI_MASK;
366 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
367 next_tb = 0;
368 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
369 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
370 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
371 next_tb = 0;
372 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
373 (((env->hflags2 & HF2_VINTR_MASK) &&
374 (env->hflags2 & HF2_HIF_MASK)) ||
375 (!(env->hflags2 & HF2_VINTR_MASK) &&
376 (env->eflags & IF_MASK &&
377 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
378 int intno;
379 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
381 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
382 CPU_INTERRUPT_VIRQ);
383 intno = cpu_get_pic_interrupt(env);
384 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
385 do_interrupt_x86_hardirq(env, intno, 1);
386 /* ensure that no TB jump will be modified as
387 the program flow was changed */
388 next_tb = 0;
389 #if !defined(CONFIG_USER_ONLY)
390 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
391 (env->eflags & IF_MASK) &&
392 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
393 int intno;
394 /* FIXME: this should respect TPR */
395 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
397 intno = ldl_phys(cpu->as,
398 env->vm_vmcb
399 + offsetof(struct vmcb,
400 control.int_vector));
401 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
402 do_interrupt_x86_hardirq(env, intno, 1);
403 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
404 next_tb = 0;
405 #endif
408 #elif defined(TARGET_PPC)
409 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
410 cpu_reset(cpu);
412 if (interrupt_request & CPU_INTERRUPT_HARD) {
413 ppc_hw_interrupt(env);
414 if (env->pending_interrupts == 0) {
415 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
417 next_tb = 0;
419 #elif defined(TARGET_LM32)
420 if ((interrupt_request & CPU_INTERRUPT_HARD)
421 && (env->ie & IE_IE)) {
422 env->exception_index = EXCP_IRQ;
423 cc->do_interrupt(cpu);
424 next_tb = 0;
426 #elif defined(TARGET_MICROBLAZE)
427 if ((interrupt_request & CPU_INTERRUPT_HARD)
428 && (env->sregs[SR_MSR] & MSR_IE)
429 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
430 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
431 env->exception_index = EXCP_IRQ;
432 cc->do_interrupt(cpu);
433 next_tb = 0;
435 #elif defined(TARGET_MIPS)
436 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
437 cpu_mips_hw_interrupts_pending(env)) {
438 /* Raise it */
439 env->exception_index = EXCP_EXT_INTERRUPT;
440 env->error_code = 0;
441 cc->do_interrupt(cpu);
442 next_tb = 0;
444 #elif defined(TARGET_OPENRISC)
446 int idx = -1;
447 if ((interrupt_request & CPU_INTERRUPT_HARD)
448 && (env->sr & SR_IEE)) {
449 idx = EXCP_INT;
451 if ((interrupt_request & CPU_INTERRUPT_TIMER)
452 && (env->sr & SR_TEE)) {
453 idx = EXCP_TICK;
455 if (idx >= 0) {
456 env->exception_index = idx;
457 cc->do_interrupt(cpu);
458 next_tb = 0;
461 #elif defined(TARGET_SPARC)
462 if (interrupt_request & CPU_INTERRUPT_HARD) {
463 if (cpu_interrupts_enabled(env) &&
464 env->interrupt_index > 0) {
465 int pil = env->interrupt_index & 0xf;
466 int type = env->interrupt_index & 0xf0;
468 if (((type == TT_EXTINT) &&
469 cpu_pil_allowed(env, pil)) ||
470 type != TT_EXTINT) {
471 env->exception_index = env->interrupt_index;
472 cc->do_interrupt(cpu);
473 next_tb = 0;
477 #elif defined(TARGET_ARM)
478 if (interrupt_request & CPU_INTERRUPT_FIQ
479 && !(env->daif & PSTATE_F)) {
480 env->exception_index = EXCP_FIQ;
481 cc->do_interrupt(cpu);
482 next_tb = 0;
484 /* ARMv7-M interrupt return works by loading a magic value
485 into the PC. On real hardware the load causes the
486 return to occur. The qemu implementation performs the
487 jump normally, then does the exception return when the
488 CPU tries to execute code at the magic address.
489 This will cause the magic PC value to be pushed to
490 the stack if an interrupt occurred at the wrong time.
491 We avoid this by disabling interrupts when
492 pc contains a magic address. */
493 if (interrupt_request & CPU_INTERRUPT_HARD
494 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
495 || !(env->daif & PSTATE_I))) {
496 env->exception_index = EXCP_IRQ;
497 cc->do_interrupt(cpu);
498 next_tb = 0;
500 #elif defined(TARGET_UNICORE32)
501 if (interrupt_request & CPU_INTERRUPT_HARD
502 && !(env->uncached_asr & ASR_I)) {
503 env->exception_index = UC32_EXCP_INTR;
504 cc->do_interrupt(cpu);
505 next_tb = 0;
507 #elif defined(TARGET_SH4)
508 if (interrupt_request & CPU_INTERRUPT_HARD) {
509 cc->do_interrupt(cpu);
510 next_tb = 0;
512 #elif defined(TARGET_ALPHA)
514 int idx = -1;
515 /* ??? This hard-codes the OSF/1 interrupt levels. */
516 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
517 case 0 ... 3:
518 if (interrupt_request & CPU_INTERRUPT_HARD) {
519 idx = EXCP_DEV_INTERRUPT;
521 /* FALLTHRU */
522 case 4:
523 if (interrupt_request & CPU_INTERRUPT_TIMER) {
524 idx = EXCP_CLK_INTERRUPT;
526 /* FALLTHRU */
527 case 5:
528 if (interrupt_request & CPU_INTERRUPT_SMP) {
529 idx = EXCP_SMP_INTERRUPT;
531 /* FALLTHRU */
532 case 6:
533 if (interrupt_request & CPU_INTERRUPT_MCHK) {
534 idx = EXCP_MCHK;
537 if (idx >= 0) {
538 env->exception_index = idx;
539 env->error_code = 0;
540 cc->do_interrupt(cpu);
541 next_tb = 0;
544 #elif defined(TARGET_CRIS)
545 if (interrupt_request & CPU_INTERRUPT_HARD
546 && (env->pregs[PR_CCS] & I_FLAG)
547 && !env->locked_irq) {
548 env->exception_index = EXCP_IRQ;
549 cc->do_interrupt(cpu);
550 next_tb = 0;
552 if (interrupt_request & CPU_INTERRUPT_NMI) {
553 unsigned int m_flag_archval;
554 if (env->pregs[PR_VR] < 32) {
555 m_flag_archval = M_FLAG_V10;
556 } else {
557 m_flag_archval = M_FLAG_V32;
559 if ((env->pregs[PR_CCS] & m_flag_archval)) {
560 env->exception_index = EXCP_NMI;
561 cc->do_interrupt(cpu);
562 next_tb = 0;
565 #elif defined(TARGET_M68K)
566 if (interrupt_request & CPU_INTERRUPT_HARD
567 && ((env->sr & SR_I) >> SR_I_SHIFT)
568 < env->pending_level) {
569 /* Real hardware gets the interrupt vector via an
570 IACK cycle at this point. Current emulated
571 hardware doesn't rely on this, so we
572 provide/save the vector when the interrupt is
573 first signalled. */
574 env->exception_index = env->pending_vector;
575 do_interrupt_m68k_hardirq(env);
576 next_tb = 0;
578 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
579 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
580 (env->psw.mask & PSW_MASK_EXT)) {
581 cc->do_interrupt(cpu);
582 next_tb = 0;
584 #elif defined(TARGET_XTENSA)
585 if (interrupt_request & CPU_INTERRUPT_HARD) {
586 env->exception_index = EXC_IRQ;
587 cc->do_interrupt(cpu);
588 next_tb = 0;
590 #endif
591 /* Don't use the cached interrupt_request value,
592 do_interrupt may have updated the EXITTB flag. */
593 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
594 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
595 /* ensure that no TB jump will be modified as
596 the program flow was changed */
597 next_tb = 0;
600 if (unlikely(cpu->exit_request)) {
601 cpu->exit_request = 0;
602 env->exception_index = EXCP_INTERRUPT;
603 cpu_loop_exit(env);
605 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
606 tb = tb_find_fast(env);
607 /* Note: we do it here to avoid a gcc bug on Mac OS X when
608 doing it in tb_find_slow */
609 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
610 /* as some TB could have been invalidated because
611 of memory exceptions while generating the code, we
612 must recompute the hash index here */
613 next_tb = 0;
614 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
616 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
617 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
618 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
620 /* see if we can patch the calling TB. When the TB
621 spans two pages, we cannot safely do a direct
622 jump. */
623 if (next_tb != 0 && tb->page_addr[1] == -1) {
624 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
625 next_tb & TB_EXIT_MASK, tb);
627 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
629 /* cpu_interrupt might be called while translating the
630 TB, but before it is linked into a potentially
631 infinite loop and becomes env->current_tb. Avoid
632 starting execution if there is a pending interrupt. */
633 cpu->current_tb = tb;
634 barrier();
635 if (likely(!cpu->exit_request)) {
636 tc_ptr = tb->tc_ptr;
637 /* execute the generated code */
638 next_tb = cpu_tb_exec(cpu, tc_ptr);
639 switch (next_tb & TB_EXIT_MASK) {
640 case TB_EXIT_REQUESTED:
641 /* Something asked us to stop executing
642 * chained TBs; just continue round the main
643 * loop. Whatever requested the exit will also
644 * have set something else (eg exit_request or
645 * interrupt_request) which we will handle
646 * next time around the loop.
648 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
649 next_tb = 0;
650 break;
651 case TB_EXIT_ICOUNT_EXPIRED:
653 /* Instruction counter expired. */
654 int insns_left;
655 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
656 insns_left = cpu->icount_decr.u32;
657 if (cpu->icount_extra && insns_left >= 0) {
658 /* Refill decrementer and continue execution. */
659 cpu->icount_extra += insns_left;
660 if (cpu->icount_extra > 0xffff) {
661 insns_left = 0xffff;
662 } else {
663 insns_left = cpu->icount_extra;
665 cpu->icount_extra -= insns_left;
666 cpu->icount_decr.u16.low = insns_left;
667 } else {
668 if (insns_left > 0) {
669 /* Execute remaining instructions. */
670 cpu_exec_nocache(env, insns_left, tb);
672 env->exception_index = EXCP_INTERRUPT;
673 next_tb = 0;
674 cpu_loop_exit(env);
676 break;
678 default:
679 break;
682 cpu->current_tb = NULL;
683 /* reset soft MMU for next block (it can currently
684 only be set by a memory fault) */
685 } /* for(;;) */
686 } else {
687 /* Reload env after longjmp - the compiler may have smashed all
688 * local variables as longjmp is marked 'noreturn'. */
689 cpu = current_cpu;
690 env = cpu->env_ptr;
691 #if !(defined(CONFIG_USER_ONLY) && \
692 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
693 cc = CPU_GET_CLASS(cpu);
694 #endif
695 #ifdef TARGET_I386
696 x86_cpu = X86_CPU(cpu);
697 #endif
699 } /* for(;;) */
702 #if defined(TARGET_I386)
703 /* restore flags in standard format */
704 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
705 | (env->df & DF_MASK);
706 #elif defined(TARGET_ARM)
707 /* XXX: Save/restore host fpu exception state?. */
708 #elif defined(TARGET_UNICORE32)
709 #elif defined(TARGET_SPARC)
710 #elif defined(TARGET_PPC)
711 #elif defined(TARGET_LM32)
712 #elif defined(TARGET_M68K)
713 cpu_m68k_flush_flags(env, env->cc_op);
714 env->cc_op = CC_OP_FLAGS;
715 env->sr = (env->sr & 0xffe0)
716 | env->cc_dest | (env->cc_x << 4);
717 #elif defined(TARGET_MICROBLAZE)
718 #elif defined(TARGET_MIPS)
719 #elif defined(TARGET_MOXIE)
720 #elif defined(TARGET_OPENRISC)
721 #elif defined(TARGET_SH4)
722 #elif defined(TARGET_ALPHA)
723 #elif defined(TARGET_CRIS)
724 #elif defined(TARGET_S390X)
725 #elif defined(TARGET_XTENSA)
726 /* XXXXX */
727 #else
728 #error unsupported target CPU
729 #endif
731 /* fail safe : never use current_cpu outside cpu_exec() */
732 current_cpu = NULL;
733 return ret;