cpu: Move watchpoint fields from CPU_COMMON to CPUState
[qemu-kvm.git] / cpu-exec.c
blobd7c21d35e50e466f76a0b0be8961ab659ab32c33
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 void cpu_loop_exit(CPUArchState *env)
28 CPUState *cpu = ENV_GET_CPU(env);
30 cpu->current_tb = NULL;
31 siglongjmp(cpu->jmp_env, 1);
34 /* exit the current TB from a signal handler. The host registers are
35 restored in a state compatible with the CPU emulator
37 #if defined(CONFIG_SOFTMMU)
38 void cpu_resume_from_signal(CPUArchState *env, void *puc)
40 CPUState *cpu = ENV_GET_CPU(env);
42 /* XXX: restore cpu registers saved in host registers */
44 cpu->exception_index = -1;
45 siglongjmp(cpu->jmp_env, 1);
47 #endif
49 /* Execute a TB, and fix up the CPU state afterwards if necessary */
50 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
52 CPUArchState *env = cpu->env_ptr;
53 uintptr_t next_tb;
55 #if defined(DEBUG_DISAS)
56 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
57 #if defined(TARGET_I386)
58 log_cpu_state(cpu, CPU_DUMP_CCOP);
59 #elif defined(TARGET_M68K)
60 /* ??? Should not modify env state for dumping. */
61 cpu_m68k_flush_flags(env, env->cc_op);
62 env->cc_op = CC_OP_FLAGS;
63 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
64 log_cpu_state(cpu, 0);
65 #else
66 log_cpu_state(cpu, 0);
67 #endif
69 #endif /* DEBUG_DISAS */
71 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
72 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
73 /* We didn't start executing this TB (eg because the instruction
74 * counter hit zero); we must restore the guest PC to the address
75 * of the start of the TB.
77 CPUClass *cc = CPU_GET_CLASS(cpu);
78 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
79 if (cc->synchronize_from_tb) {
80 cc->synchronize_from_tb(cpu, tb);
81 } else {
82 assert(cc->set_pc);
83 cc->set_pc(cpu, tb->pc);
86 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
87 /* We were asked to stop executing TBs (probably a pending
88 * interrupt. We've now stopped, so clear the flag.
90 cpu->tcg_exit_req = 0;
92 return next_tb;
95 /* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
98 TranslationBlock *orig_tb)
100 CPUState *cpu = ENV_GET_CPU(env);
101 TranslationBlock *tb;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
110 cpu->current_tb = tb;
111 /* execute the generated code */
112 cpu_tb_exec(cpu, tb->tc_ptr);
113 cpu->current_tb = NULL;
114 tb_phys_invalidate(tb, -1);
115 tb_free(tb);
118 static TranslationBlock *tb_find_slow(CPUArchState *env,
119 target_ulong pc,
120 target_ulong cs_base,
121 uint64_t flags)
123 CPUState *cpu = ENV_GET_CPU(env);
124 TranslationBlock *tb, **ptb1;
125 unsigned int h;
126 tb_page_addr_t phys_pc, phys_page1;
127 target_ulong virt_page2;
129 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
131 /* find translated block using physical mappings */
132 phys_pc = get_page_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
134 h = tb_phys_hash_func(phys_pc);
135 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
136 for(;;) {
137 tb = *ptb1;
138 if (!tb)
139 goto not_found;
140 if (tb->pc == pc &&
141 tb->page_addr[0] == phys_page1 &&
142 tb->cs_base == cs_base &&
143 tb->flags == flags) {
144 /* check next page if needed */
145 if (tb->page_addr[1] != -1) {
146 tb_page_addr_t phys_page2;
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_page_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
157 ptb1 = &tb->phys_hash_next;
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 found:
164 /* Move the last found TB to the head of the list */
165 if (likely(*ptb1)) {
166 *ptb1 = tb->phys_hash_next;
167 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
168 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
170 /* we add the TB in the virtual pc hash table */
171 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 return tb;
175 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
177 CPUState *cpu = ENV_GET_CPU(env);
178 TranslationBlock *tb;
179 target_ulong cs_base, pc;
180 int flags;
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
184 is executed. */
185 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
186 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
187 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
188 tb->flags != flags)) {
189 tb = tb_find_slow(env, pc, cs_base, flags);
191 return tb;
194 static CPUDebugExcpHandler *debug_excp_handler;
196 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 debug_excp_handler = handler;
201 static void cpu_handle_debug_exception(CPUArchState *env)
203 CPUState *cpu = ENV_GET_CPU(env);
204 CPUWatchpoint *wp;
206 if (!cpu->watchpoint_hit) {
207 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
208 wp->flags &= ~BP_WATCHPOINT_HIT;
211 if (debug_excp_handler) {
212 debug_excp_handler(env);
216 /* main execution loop */
218 volatile sig_atomic_t exit_request;
220 int cpu_exec(CPUArchState *env)
222 CPUState *cpu = ENV_GET_CPU(env);
223 #if !(defined(CONFIG_USER_ONLY) && \
224 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
225 CPUClass *cc = CPU_GET_CLASS(cpu);
226 #endif
227 #ifdef TARGET_I386
228 X86CPU *x86_cpu = X86_CPU(cpu);
229 #endif
230 int ret, interrupt_request;
231 TranslationBlock *tb;
232 uint8_t *tc_ptr;
233 uintptr_t next_tb;
235 if (cpu->halted) {
236 if (!cpu_has_work(cpu)) {
237 return EXCP_HALTED;
240 cpu->halted = 0;
243 current_cpu = cpu;
245 /* As long as current_cpu is null, up to the assignment just above,
246 * requests by other threads to exit the execution loop are expected to
247 * be issued using the exit_request global. We must make sure that our
248 * evaluation of the global value is performed past the current_cpu
249 * value transition point, which requires a memory barrier as well as
250 * an instruction scheduling constraint on modern architectures. */
251 smp_mb();
253 if (unlikely(exit_request)) {
254 cpu->exit_request = 1;
257 #if defined(TARGET_I386)
258 /* put eflags in CPU temporary format */
259 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
261 CC_OP = CC_OP_EFLAGS;
262 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263 #elif defined(TARGET_SPARC)
264 #elif defined(TARGET_M68K)
265 env->cc_op = CC_OP_FLAGS;
266 env->cc_dest = env->sr & 0xf;
267 env->cc_x = (env->sr >> 4) & 1;
268 #elif defined(TARGET_ALPHA)
269 #elif defined(TARGET_ARM)
270 #elif defined(TARGET_UNICORE32)
271 #elif defined(TARGET_PPC)
272 env->reserve_addr = -1;
273 #elif defined(TARGET_LM32)
274 #elif defined(TARGET_MICROBLAZE)
275 #elif defined(TARGET_MIPS)
276 #elif defined(TARGET_MOXIE)
277 #elif defined(TARGET_OPENRISC)
278 #elif defined(TARGET_SH4)
279 #elif defined(TARGET_CRIS)
280 #elif defined(TARGET_S390X)
281 #elif defined(TARGET_XTENSA)
282 /* XXXXX */
283 #else
284 #error unsupported target CPU
285 #endif
286 cpu->exception_index = -1;
288 /* prepare setjmp context for exception handling */
289 for(;;) {
290 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
291 /* if an exception is pending, we execute it here */
292 if (cpu->exception_index >= 0) {
293 if (cpu->exception_index >= EXCP_INTERRUPT) {
294 /* exit request from the cpu execution loop */
295 ret = cpu->exception_index;
296 if (ret == EXCP_DEBUG) {
297 cpu_handle_debug_exception(env);
299 break;
300 } else {
301 #if defined(CONFIG_USER_ONLY)
302 /* if user mode only, we simulate a fake exception
303 which will be handled outside the cpu execution
304 loop */
305 #if defined(TARGET_I386)
306 cc->do_interrupt(cpu);
307 #endif
308 ret = cpu->exception_index;
309 break;
310 #else
311 cc->do_interrupt(cpu);
312 cpu->exception_index = -1;
313 #endif
317 next_tb = 0; /* force lookup of first TB */
318 for(;;) {
319 interrupt_request = cpu->interrupt_request;
320 if (unlikely(interrupt_request)) {
321 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
322 /* Mask out external interrupts for this step. */
323 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
325 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
326 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
327 cpu->exception_index = EXCP_DEBUG;
328 cpu_loop_exit(env);
330 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
331 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
332 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
333 if (interrupt_request & CPU_INTERRUPT_HALT) {
334 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
335 cpu->halted = 1;
336 cpu->exception_index = EXCP_HLT;
337 cpu_loop_exit(env);
339 #endif
340 #if defined(TARGET_I386)
341 #if !defined(CONFIG_USER_ONLY)
342 if (interrupt_request & CPU_INTERRUPT_POLL) {
343 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
344 apic_poll_irq(x86_cpu->apic_state);
346 #endif
347 if (interrupt_request & CPU_INTERRUPT_INIT) {
348 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
350 do_cpu_init(x86_cpu);
351 cpu->exception_index = EXCP_HALTED;
352 cpu_loop_exit(env);
353 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
354 do_cpu_sipi(x86_cpu);
355 } else if (env->hflags2 & HF2_GIF_MASK) {
356 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
357 !(env->hflags & HF_SMM_MASK)) {
358 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
360 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
361 do_smm_enter(x86_cpu);
362 next_tb = 0;
363 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
364 !(env->hflags2 & HF2_NMI_MASK)) {
365 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
366 env->hflags2 |= HF2_NMI_MASK;
367 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
368 next_tb = 0;
369 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
370 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
371 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
372 next_tb = 0;
373 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
374 (((env->hflags2 & HF2_VINTR_MASK) &&
375 (env->hflags2 & HF2_HIF_MASK)) ||
376 (!(env->hflags2 & HF2_VINTR_MASK) &&
377 (env->eflags & IF_MASK &&
378 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
379 int intno;
380 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
382 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
383 CPU_INTERRUPT_VIRQ);
384 intno = cpu_get_pic_interrupt(env);
385 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
386 do_interrupt_x86_hardirq(env, intno, 1);
387 /* ensure that no TB jump will be modified as
388 the program flow was changed */
389 next_tb = 0;
390 #if !defined(CONFIG_USER_ONLY)
391 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
392 (env->eflags & IF_MASK) &&
393 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
394 int intno;
395 /* FIXME: this should respect TPR */
396 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
398 intno = ldl_phys(cpu->as,
399 env->vm_vmcb
400 + offsetof(struct vmcb,
401 control.int_vector));
402 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
403 do_interrupt_x86_hardirq(env, intno, 1);
404 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
405 next_tb = 0;
406 #endif
409 #elif defined(TARGET_PPC)
410 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
411 cpu_reset(cpu);
413 if (interrupt_request & CPU_INTERRUPT_HARD) {
414 ppc_hw_interrupt(env);
415 if (env->pending_interrupts == 0) {
416 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
418 next_tb = 0;
420 #elif defined(TARGET_LM32)
421 if ((interrupt_request & CPU_INTERRUPT_HARD)
422 && (env->ie & IE_IE)) {
423 cpu->exception_index = EXCP_IRQ;
424 cc->do_interrupt(cpu);
425 next_tb = 0;
427 #elif defined(TARGET_MICROBLAZE)
428 if ((interrupt_request & CPU_INTERRUPT_HARD)
429 && (env->sregs[SR_MSR] & MSR_IE)
430 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
431 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
432 cpu->exception_index = EXCP_IRQ;
433 cc->do_interrupt(cpu);
434 next_tb = 0;
436 #elif defined(TARGET_MIPS)
437 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
438 cpu_mips_hw_interrupts_pending(env)) {
439 /* Raise it */
440 cpu->exception_index = EXCP_EXT_INTERRUPT;
441 env->error_code = 0;
442 cc->do_interrupt(cpu);
443 next_tb = 0;
445 #elif defined(TARGET_OPENRISC)
447 int idx = -1;
448 if ((interrupt_request & CPU_INTERRUPT_HARD)
449 && (env->sr & SR_IEE)) {
450 idx = EXCP_INT;
452 if ((interrupt_request & CPU_INTERRUPT_TIMER)
453 && (env->sr & SR_TEE)) {
454 idx = EXCP_TICK;
456 if (idx >= 0) {
457 cpu->exception_index = idx;
458 cc->do_interrupt(cpu);
459 next_tb = 0;
462 #elif defined(TARGET_SPARC)
463 if (interrupt_request & CPU_INTERRUPT_HARD) {
464 if (cpu_interrupts_enabled(env) &&
465 env->interrupt_index > 0) {
466 int pil = env->interrupt_index & 0xf;
467 int type = env->interrupt_index & 0xf0;
469 if (((type == TT_EXTINT) &&
470 cpu_pil_allowed(env, pil)) ||
471 type != TT_EXTINT) {
472 cpu->exception_index = env->interrupt_index;
473 cc->do_interrupt(cpu);
474 next_tb = 0;
478 #elif defined(TARGET_ARM)
479 if (interrupt_request & CPU_INTERRUPT_FIQ
480 && !(env->daif & PSTATE_F)) {
481 cpu->exception_index = EXCP_FIQ;
482 cc->do_interrupt(cpu);
483 next_tb = 0;
485 /* ARMv7-M interrupt return works by loading a magic value
486 into the PC. On real hardware the load causes the
487 return to occur. The qemu implementation performs the
488 jump normally, then does the exception return when the
489 CPU tries to execute code at the magic address.
490 This will cause the magic PC value to be pushed to
491 the stack if an interrupt occurred at the wrong time.
492 We avoid this by disabling interrupts when
493 pc contains a magic address. */
494 if (interrupt_request & CPU_INTERRUPT_HARD
495 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
496 || !(env->daif & PSTATE_I))) {
497 cpu->exception_index = EXCP_IRQ;
498 cc->do_interrupt(cpu);
499 next_tb = 0;
501 #elif defined(TARGET_UNICORE32)
502 if (interrupt_request & CPU_INTERRUPT_HARD
503 && !(env->uncached_asr & ASR_I)) {
504 cpu->exception_index = UC32_EXCP_INTR;
505 cc->do_interrupt(cpu);
506 next_tb = 0;
508 #elif defined(TARGET_SH4)
509 if (interrupt_request & CPU_INTERRUPT_HARD) {
510 cc->do_interrupt(cpu);
511 next_tb = 0;
513 #elif defined(TARGET_ALPHA)
515 int idx = -1;
516 /* ??? This hard-codes the OSF/1 interrupt levels. */
517 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
518 case 0 ... 3:
519 if (interrupt_request & CPU_INTERRUPT_HARD) {
520 idx = EXCP_DEV_INTERRUPT;
522 /* FALLTHRU */
523 case 4:
524 if (interrupt_request & CPU_INTERRUPT_TIMER) {
525 idx = EXCP_CLK_INTERRUPT;
527 /* FALLTHRU */
528 case 5:
529 if (interrupt_request & CPU_INTERRUPT_SMP) {
530 idx = EXCP_SMP_INTERRUPT;
532 /* FALLTHRU */
533 case 6:
534 if (interrupt_request & CPU_INTERRUPT_MCHK) {
535 idx = EXCP_MCHK;
538 if (idx >= 0) {
539 cpu->exception_index = idx;
540 env->error_code = 0;
541 cc->do_interrupt(cpu);
542 next_tb = 0;
545 #elif defined(TARGET_CRIS)
546 if (interrupt_request & CPU_INTERRUPT_HARD
547 && (env->pregs[PR_CCS] & I_FLAG)
548 && !env->locked_irq) {
549 cpu->exception_index = EXCP_IRQ;
550 cc->do_interrupt(cpu);
551 next_tb = 0;
553 if (interrupt_request & CPU_INTERRUPT_NMI) {
554 unsigned int m_flag_archval;
555 if (env->pregs[PR_VR] < 32) {
556 m_flag_archval = M_FLAG_V10;
557 } else {
558 m_flag_archval = M_FLAG_V32;
560 if ((env->pregs[PR_CCS] & m_flag_archval)) {
561 cpu->exception_index = EXCP_NMI;
562 cc->do_interrupt(cpu);
563 next_tb = 0;
566 #elif defined(TARGET_M68K)
567 if (interrupt_request & CPU_INTERRUPT_HARD
568 && ((env->sr & SR_I) >> SR_I_SHIFT)
569 < env->pending_level) {
570 /* Real hardware gets the interrupt vector via an
571 IACK cycle at this point. Current emulated
572 hardware doesn't rely on this, so we
573 provide/save the vector when the interrupt is
574 first signalled. */
575 cpu->exception_index = env->pending_vector;
576 do_interrupt_m68k_hardirq(env);
577 next_tb = 0;
579 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
580 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
581 (env->psw.mask & PSW_MASK_EXT)) {
582 cc->do_interrupt(cpu);
583 next_tb = 0;
585 #elif defined(TARGET_XTENSA)
586 if (interrupt_request & CPU_INTERRUPT_HARD) {
587 cpu->exception_index = EXC_IRQ;
588 cc->do_interrupt(cpu);
589 next_tb = 0;
591 #endif
592 /* Don't use the cached interrupt_request value,
593 do_interrupt may have updated the EXITTB flag. */
594 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
595 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
596 /* ensure that no TB jump will be modified as
597 the program flow was changed */
598 next_tb = 0;
601 if (unlikely(cpu->exit_request)) {
602 cpu->exit_request = 0;
603 cpu->exception_index = EXCP_INTERRUPT;
604 cpu_loop_exit(env);
606 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
607 tb = tb_find_fast(env);
608 /* Note: we do it here to avoid a gcc bug on Mac OS X when
609 doing it in tb_find_slow */
610 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
611 /* as some TB could have been invalidated because
612 of memory exceptions while generating the code, we
613 must recompute the hash index here */
614 next_tb = 0;
615 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
617 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
618 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
619 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
621 /* see if we can patch the calling TB. When the TB
622 spans two pages, we cannot safely do a direct
623 jump. */
624 if (next_tb != 0 && tb->page_addr[1] == -1) {
625 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
626 next_tb & TB_EXIT_MASK, tb);
628 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
630 /* cpu_interrupt might be called while translating the
631 TB, but before it is linked into a potentially
632 infinite loop and becomes env->current_tb. Avoid
633 starting execution if there is a pending interrupt. */
634 cpu->current_tb = tb;
635 barrier();
636 if (likely(!cpu->exit_request)) {
637 tc_ptr = tb->tc_ptr;
638 /* execute the generated code */
639 next_tb = cpu_tb_exec(cpu, tc_ptr);
640 switch (next_tb & TB_EXIT_MASK) {
641 case TB_EXIT_REQUESTED:
642 /* Something asked us to stop executing
643 * chained TBs; just continue round the main
644 * loop. Whatever requested the exit will also
645 * have set something else (eg exit_request or
646 * interrupt_request) which we will handle
647 * next time around the loop.
649 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
650 next_tb = 0;
651 break;
652 case TB_EXIT_ICOUNT_EXPIRED:
654 /* Instruction counter expired. */
655 int insns_left;
656 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
657 insns_left = cpu->icount_decr.u32;
658 if (cpu->icount_extra && insns_left >= 0) {
659 /* Refill decrementer and continue execution. */
660 cpu->icount_extra += insns_left;
661 if (cpu->icount_extra > 0xffff) {
662 insns_left = 0xffff;
663 } else {
664 insns_left = cpu->icount_extra;
666 cpu->icount_extra -= insns_left;
667 cpu->icount_decr.u16.low = insns_left;
668 } else {
669 if (insns_left > 0) {
670 /* Execute remaining instructions. */
671 cpu_exec_nocache(env, insns_left, tb);
673 cpu->exception_index = EXCP_INTERRUPT;
674 next_tb = 0;
675 cpu_loop_exit(env);
677 break;
679 default:
680 break;
683 cpu->current_tb = NULL;
684 /* reset soft MMU for next block (it can currently
685 only be set by a memory fault) */
686 } /* for(;;) */
687 } else {
688 /* Reload env after longjmp - the compiler may have smashed all
689 * local variables as longjmp is marked 'noreturn'. */
690 cpu = current_cpu;
691 env = cpu->env_ptr;
692 #if !(defined(CONFIG_USER_ONLY) && \
693 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
694 cc = CPU_GET_CLASS(cpu);
695 #endif
696 #ifdef TARGET_I386
697 x86_cpu = X86_CPU(cpu);
698 #endif
700 } /* for(;;) */
703 #if defined(TARGET_I386)
704 /* restore flags in standard format */
705 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
706 | (env->df & DF_MASK);
707 #elif defined(TARGET_ARM)
708 /* XXX: Save/restore host fpu exception state?. */
709 #elif defined(TARGET_UNICORE32)
710 #elif defined(TARGET_SPARC)
711 #elif defined(TARGET_PPC)
712 #elif defined(TARGET_LM32)
713 #elif defined(TARGET_M68K)
714 cpu_m68k_flush_flags(env, env->cc_op);
715 env->cc_op = CC_OP_FLAGS;
716 env->sr = (env->sr & 0xffe0)
717 | env->cc_dest | (env->cc_x << 4);
718 #elif defined(TARGET_MICROBLAZE)
719 #elif defined(TARGET_MIPS)
720 #elif defined(TARGET_MOXIE)
721 #elif defined(TARGET_OPENRISC)
722 #elif defined(TARGET_SH4)
723 #elif defined(TARGET_ALPHA)
724 #elif defined(TARGET_CRIS)
725 #elif defined(TARGET_S390X)
726 #elif defined(TARGET_XTENSA)
727 /* XXXXX */
728 #else
729 #error unsupported target CPU
730 #endif
732 /* fail safe : never use current_cpu outside cpu_exec() */
733 current_cpu = NULL;
734 return ret;