net: cadence_gem: Make phy respond to broadcast
[qemu.git] / cpu-exec.c
blob2f54054d8c1284bec402aba279e1275cf419335e
1 /*
2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
26 void cpu_loop_exit(CPUState *cpu)
28 cpu->current_tb = NULL;
29 siglongjmp(cpu->jmp_env, 1);
32 /* exit the current TB from a signal handler. The host registers are
33 restored in a state compatible with the CPU emulator
35 #if defined(CONFIG_SOFTMMU)
36 void cpu_resume_from_signal(CPUState *cpu, void *puc)
38 /* XXX: restore cpu registers saved in host registers */
40 cpu->exception_index = -1;
41 siglongjmp(cpu->jmp_env, 1);
43 #endif
45 /* Execute a TB, and fix up the CPU state afterwards if necessary */
46 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
48 CPUArchState *env = cpu->env_ptr;
49 uintptr_t next_tb;
51 #if defined(DEBUG_DISAS)
52 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
53 #if defined(TARGET_I386)
54 log_cpu_state(cpu, CPU_DUMP_CCOP);
55 #elif defined(TARGET_M68K)
56 /* ??? Should not modify env state for dumping. */
57 cpu_m68k_flush_flags(env, env->cc_op);
58 env->cc_op = CC_OP_FLAGS;
59 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
60 log_cpu_state(cpu, 0);
61 #else
62 log_cpu_state(cpu, 0);
63 #endif
65 #endif /* DEBUG_DISAS */
67 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
68 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
69 /* We didn't start executing this TB (eg because the instruction
70 * counter hit zero); we must restore the guest PC to the address
71 * of the start of the TB.
73 CPUClass *cc = CPU_GET_CLASS(cpu);
74 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
75 if (cc->synchronize_from_tb) {
76 cc->synchronize_from_tb(cpu, tb);
77 } else {
78 assert(cc->set_pc);
79 cc->set_pc(cpu, tb->pc);
82 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
83 /* We were asked to stop executing TBs (probably a pending
84 * interrupt. We've now stopped, so clear the flag.
86 cpu->tcg_exit_req = 0;
88 return next_tb;
91 /* Execute the code without caching the generated code. An interpreter
92 could be used if available. */
93 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
94 TranslationBlock *orig_tb)
96 CPUState *cpu = ENV_GET_CPU(env);
97 TranslationBlock *tb;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
104 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105 max_cycles);
106 cpu->current_tb = tb;
107 /* execute the generated code */
108 cpu_tb_exec(cpu, tb->tc_ptr);
109 cpu->current_tb = NULL;
110 tb_phys_invalidate(tb, -1);
111 tb_free(tb);
114 static TranslationBlock *tb_find_slow(CPUArchState *env,
115 target_ulong pc,
116 target_ulong cs_base,
117 uint64_t flags)
119 CPUState *cpu = ENV_GET_CPU(env);
120 TranslationBlock *tb, **ptb1;
121 unsigned int h;
122 tb_page_addr_t phys_pc, phys_page1;
123 target_ulong virt_page2;
125 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
127 /* find translated block using physical mappings */
128 phys_pc = get_page_addr_code(env, pc);
129 phys_page1 = phys_pc & TARGET_PAGE_MASK;
130 h = tb_phys_hash_func(phys_pc);
131 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
132 for(;;) {
133 tb = *ptb1;
134 if (!tb)
135 goto not_found;
136 if (tb->pc == pc &&
137 tb->page_addr[0] == phys_page1 &&
138 tb->cs_base == cs_base &&
139 tb->flags == flags) {
140 /* check next page if needed */
141 if (tb->page_addr[1] != -1) {
142 tb_page_addr_t phys_page2;
144 virt_page2 = (pc & TARGET_PAGE_MASK) +
145 TARGET_PAGE_SIZE;
146 phys_page2 = get_page_addr_code(env, virt_page2);
147 if (tb->page_addr[1] == phys_page2)
148 goto found;
149 } else {
150 goto found;
153 ptb1 = &tb->phys_hash_next;
155 not_found:
156 /* if no translated code available, then translate it now */
157 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
159 found:
160 /* Move the last found TB to the head of the list */
161 if (likely(*ptb1)) {
162 *ptb1 = tb->phys_hash_next;
163 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
164 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
166 /* we add the TB in the virtual pc hash table */
167 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 return tb;
171 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
173 CPUState *cpu = ENV_GET_CPU(env);
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
176 int flags;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
180 is executed. */
181 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184 tb->flags != flags)) {
185 tb = tb_find_slow(env, pc, cs_base, flags);
187 return tb;
190 static CPUDebugExcpHandler *debug_excp_handler;
192 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194 debug_excp_handler = handler;
197 static void cpu_handle_debug_exception(CPUArchState *env)
199 CPUState *cpu = ENV_GET_CPU(env);
200 CPUWatchpoint *wp;
202 if (!cpu->watchpoint_hit) {
203 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
204 wp->flags &= ~BP_WATCHPOINT_HIT;
207 if (debug_excp_handler) {
208 debug_excp_handler(env);
212 /* main execution loop */
214 volatile sig_atomic_t exit_request;
216 int cpu_exec(CPUArchState *env)
218 CPUState *cpu = ENV_GET_CPU(env);
219 #if !(defined(CONFIG_USER_ONLY) && \
220 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
221 CPUClass *cc = CPU_GET_CLASS(cpu);
222 #endif
223 #ifdef TARGET_I386
224 X86CPU *x86_cpu = X86_CPU(cpu);
225 #endif
226 int ret, interrupt_request;
227 TranslationBlock *tb;
228 uint8_t *tc_ptr;
229 uintptr_t next_tb;
230 /* This must be volatile so it is not trashed by longjmp() */
231 volatile bool have_tb_lock = false;
233 if (cpu->halted) {
234 if (!cpu_has_work(cpu)) {
235 return EXCP_HALTED;
238 cpu->halted = 0;
241 current_cpu = cpu;
243 /* As long as current_cpu is null, up to the assignment just above,
244 * requests by other threads to exit the execution loop are expected to
245 * be issued using the exit_request global. We must make sure that our
246 * evaluation of the global value is performed past the current_cpu
247 * value transition point, which requires a memory barrier as well as
248 * an instruction scheduling constraint on modern architectures. */
249 smp_mb();
251 if (unlikely(exit_request)) {
252 cpu->exit_request = 1;
255 #if defined(TARGET_I386)
256 /* put eflags in CPU temporary format */
257 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
258 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
259 CC_OP = CC_OP_EFLAGS;
260 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
261 #elif defined(TARGET_SPARC)
262 #elif defined(TARGET_M68K)
263 env->cc_op = CC_OP_FLAGS;
264 env->cc_dest = env->sr & 0xf;
265 env->cc_x = (env->sr >> 4) & 1;
266 #elif defined(TARGET_ALPHA)
267 #elif defined(TARGET_ARM)
268 #elif defined(TARGET_UNICORE32)
269 #elif defined(TARGET_PPC)
270 env->reserve_addr = -1;
271 #elif defined(TARGET_LM32)
272 #elif defined(TARGET_MICROBLAZE)
273 #elif defined(TARGET_MIPS)
274 #elif defined(TARGET_MOXIE)
275 #elif defined(TARGET_OPENRISC)
276 #elif defined(TARGET_SH4)
277 #elif defined(TARGET_CRIS)
278 #elif defined(TARGET_S390X)
279 #elif defined(TARGET_XTENSA)
280 /* XXXXX */
281 #else
282 #error unsupported target CPU
283 #endif
284 cpu->exception_index = -1;
286 /* prepare setjmp context for exception handling */
287 for(;;) {
288 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
289 /* if an exception is pending, we execute it here */
290 if (cpu->exception_index >= 0) {
291 if (cpu->exception_index >= EXCP_INTERRUPT) {
292 /* exit request from the cpu execution loop */
293 ret = cpu->exception_index;
294 if (ret == EXCP_DEBUG) {
295 cpu_handle_debug_exception(env);
297 break;
298 } else {
299 #if defined(CONFIG_USER_ONLY)
300 /* if user mode only, we simulate a fake exception
301 which will be handled outside the cpu execution
302 loop */
303 #if defined(TARGET_I386)
304 cc->do_interrupt(cpu);
305 #endif
306 ret = cpu->exception_index;
307 break;
308 #else
309 cc->do_interrupt(cpu);
310 cpu->exception_index = -1;
311 #endif
315 next_tb = 0; /* force lookup of first TB */
316 for(;;) {
317 interrupt_request = cpu->interrupt_request;
318 if (unlikely(interrupt_request)) {
319 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
320 /* Mask out external interrupts for this step. */
321 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
323 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
324 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
325 cpu->exception_index = EXCP_DEBUG;
326 cpu_loop_exit(cpu);
328 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
329 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
330 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
331 if (interrupt_request & CPU_INTERRUPT_HALT) {
332 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
333 cpu->halted = 1;
334 cpu->exception_index = EXCP_HLT;
335 cpu_loop_exit(cpu);
337 #endif
338 #if defined(TARGET_I386)
339 #if !defined(CONFIG_USER_ONLY)
340 if (interrupt_request & CPU_INTERRUPT_POLL) {
341 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
342 apic_poll_irq(x86_cpu->apic_state);
344 #endif
345 if (interrupt_request & CPU_INTERRUPT_INIT) {
346 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
348 do_cpu_init(x86_cpu);
349 cpu->exception_index = EXCP_HALTED;
350 cpu_loop_exit(cpu);
351 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
352 do_cpu_sipi(x86_cpu);
353 } else if (env->hflags2 & HF2_GIF_MASK) {
354 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
355 !(env->hflags & HF_SMM_MASK)) {
356 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
358 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
359 do_smm_enter(x86_cpu);
360 next_tb = 0;
361 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
362 !(env->hflags2 & HF2_NMI_MASK)) {
363 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
364 env->hflags2 |= HF2_NMI_MASK;
365 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
366 next_tb = 0;
367 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
368 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
369 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
370 next_tb = 0;
371 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
372 (((env->hflags2 & HF2_VINTR_MASK) &&
373 (env->hflags2 & HF2_HIF_MASK)) ||
374 (!(env->hflags2 & HF2_VINTR_MASK) &&
375 (env->eflags & IF_MASK &&
376 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
377 int intno;
378 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
380 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
381 CPU_INTERRUPT_VIRQ);
382 intno = cpu_get_pic_interrupt(env);
383 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
384 do_interrupt_x86_hardirq(env, intno, 1);
385 /* ensure that no TB jump will be modified as
386 the program flow was changed */
387 next_tb = 0;
388 #if !defined(CONFIG_USER_ONLY)
389 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
390 (env->eflags & IF_MASK) &&
391 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
392 int intno;
393 /* FIXME: this should respect TPR */
394 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
396 intno = ldl_phys(cpu->as,
397 env->vm_vmcb
398 + offsetof(struct vmcb,
399 control.int_vector));
400 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
401 do_interrupt_x86_hardirq(env, intno, 1);
402 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
403 next_tb = 0;
404 #endif
407 #elif defined(TARGET_PPC)
408 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
409 cpu_reset(cpu);
411 if (interrupt_request & CPU_INTERRUPT_HARD) {
412 ppc_hw_interrupt(env);
413 if (env->pending_interrupts == 0) {
414 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
416 next_tb = 0;
418 #elif defined(TARGET_LM32)
419 if ((interrupt_request & CPU_INTERRUPT_HARD)
420 && (env->ie & IE_IE)) {
421 cpu->exception_index = EXCP_IRQ;
422 cc->do_interrupt(cpu);
423 next_tb = 0;
425 #elif defined(TARGET_MICROBLAZE)
426 if ((interrupt_request & CPU_INTERRUPT_HARD)
427 && (env->sregs[SR_MSR] & MSR_IE)
428 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
429 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
430 cpu->exception_index = EXCP_IRQ;
431 cc->do_interrupt(cpu);
432 next_tb = 0;
434 #elif defined(TARGET_MIPS)
435 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
436 cpu_mips_hw_interrupts_pending(env)) {
437 /* Raise it */
438 cpu->exception_index = EXCP_EXT_INTERRUPT;
439 env->error_code = 0;
440 cc->do_interrupt(cpu);
441 next_tb = 0;
443 #elif defined(TARGET_OPENRISC)
445 int idx = -1;
446 if ((interrupt_request & CPU_INTERRUPT_HARD)
447 && (env->sr & SR_IEE)) {
448 idx = EXCP_INT;
450 if ((interrupt_request & CPU_INTERRUPT_TIMER)
451 && (env->sr & SR_TEE)) {
452 idx = EXCP_TICK;
454 if (idx >= 0) {
455 cpu->exception_index = idx;
456 cc->do_interrupt(cpu);
457 next_tb = 0;
460 #elif defined(TARGET_SPARC)
461 if (interrupt_request & CPU_INTERRUPT_HARD) {
462 if (cpu_interrupts_enabled(env) &&
463 env->interrupt_index > 0) {
464 int pil = env->interrupt_index & 0xf;
465 int type = env->interrupt_index & 0xf0;
467 if (((type == TT_EXTINT) &&
468 cpu_pil_allowed(env, pil)) ||
469 type != TT_EXTINT) {
470 cpu->exception_index = env->interrupt_index;
471 cc->do_interrupt(cpu);
472 next_tb = 0;
476 #elif defined(TARGET_ARM)
477 if (interrupt_request & CPU_INTERRUPT_FIQ
478 && !(env->daif & PSTATE_F)) {
479 cpu->exception_index = EXCP_FIQ;
480 cc->do_interrupt(cpu);
481 next_tb = 0;
483 /* ARMv7-M interrupt return works by loading a magic value
484 into the PC. On real hardware the load causes the
485 return to occur. The qemu implementation performs the
486 jump normally, then does the exception return when the
487 CPU tries to execute code at the magic address.
488 This will cause the magic PC value to be pushed to
489 the stack if an interrupt occurred at the wrong time.
490 We avoid this by disabling interrupts when
491 pc contains a magic address. */
492 if (interrupt_request & CPU_INTERRUPT_HARD
493 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
494 || !(env->daif & PSTATE_I))) {
495 cpu->exception_index = EXCP_IRQ;
496 cc->do_interrupt(cpu);
497 next_tb = 0;
499 #elif defined(TARGET_UNICORE32)
500 if (interrupt_request & CPU_INTERRUPT_HARD
501 && !(env->uncached_asr & ASR_I)) {
502 cpu->exception_index = UC32_EXCP_INTR;
503 cc->do_interrupt(cpu);
504 next_tb = 0;
506 #elif defined(TARGET_SH4)
507 if (interrupt_request & CPU_INTERRUPT_HARD) {
508 cc->do_interrupt(cpu);
509 next_tb = 0;
511 #elif defined(TARGET_ALPHA)
513 int idx = -1;
514 /* ??? This hard-codes the OSF/1 interrupt levels. */
515 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
516 case 0 ... 3:
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 idx = EXCP_DEV_INTERRUPT;
520 /* FALLTHRU */
521 case 4:
522 if (interrupt_request & CPU_INTERRUPT_TIMER) {
523 idx = EXCP_CLK_INTERRUPT;
525 /* FALLTHRU */
526 case 5:
527 if (interrupt_request & CPU_INTERRUPT_SMP) {
528 idx = EXCP_SMP_INTERRUPT;
530 /* FALLTHRU */
531 case 6:
532 if (interrupt_request & CPU_INTERRUPT_MCHK) {
533 idx = EXCP_MCHK;
536 if (idx >= 0) {
537 cpu->exception_index = idx;
538 env->error_code = 0;
539 cc->do_interrupt(cpu);
540 next_tb = 0;
543 #elif defined(TARGET_CRIS)
544 if (interrupt_request & CPU_INTERRUPT_HARD
545 && (env->pregs[PR_CCS] & I_FLAG)
546 && !env->locked_irq) {
547 cpu->exception_index = EXCP_IRQ;
548 cc->do_interrupt(cpu);
549 next_tb = 0;
551 if (interrupt_request & CPU_INTERRUPT_NMI) {
552 unsigned int m_flag_archval;
553 if (env->pregs[PR_VR] < 32) {
554 m_flag_archval = M_FLAG_V10;
555 } else {
556 m_flag_archval = M_FLAG_V32;
558 if ((env->pregs[PR_CCS] & m_flag_archval)) {
559 cpu->exception_index = EXCP_NMI;
560 cc->do_interrupt(cpu);
561 next_tb = 0;
564 #elif defined(TARGET_M68K)
565 if (interrupt_request & CPU_INTERRUPT_HARD
566 && ((env->sr & SR_I) >> SR_I_SHIFT)
567 < env->pending_level) {
568 /* Real hardware gets the interrupt vector via an
569 IACK cycle at this point. Current emulated
570 hardware doesn't rely on this, so we
571 provide/save the vector when the interrupt is
572 first signalled. */
573 cpu->exception_index = env->pending_vector;
574 do_interrupt_m68k_hardirq(env);
575 next_tb = 0;
577 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
578 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
579 (env->psw.mask & PSW_MASK_EXT)) {
580 cc->do_interrupt(cpu);
581 next_tb = 0;
583 #elif defined(TARGET_XTENSA)
584 if (interrupt_request & CPU_INTERRUPT_HARD) {
585 cpu->exception_index = EXC_IRQ;
586 cc->do_interrupt(cpu);
587 next_tb = 0;
589 #endif
590 /* Don't use the cached interrupt_request value,
591 do_interrupt may have updated the EXITTB flag. */
592 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
593 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
594 /* ensure that no TB jump will be modified as
595 the program flow was changed */
596 next_tb = 0;
599 if (unlikely(cpu->exit_request)) {
600 cpu->exit_request = 0;
601 cpu->exception_index = EXCP_INTERRUPT;
602 cpu_loop_exit(cpu);
604 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
605 have_tb_lock = true;
606 tb = tb_find_fast(env);
607 /* Note: we do it here to avoid a gcc bug on Mac OS X when
608 doing it in tb_find_slow */
609 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
610 /* as some TB could have been invalidated because
611 of memory exceptions while generating the code, we
612 must recompute the hash index here */
613 next_tb = 0;
614 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
616 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
617 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
618 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
620 /* see if we can patch the calling TB. When the TB
621 spans two pages, we cannot safely do a direct
622 jump. */
623 if (next_tb != 0 && tb->page_addr[1] == -1) {
624 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
625 next_tb & TB_EXIT_MASK, tb);
627 have_tb_lock = false;
628 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
630 /* cpu_interrupt might be called while translating the
631 TB, but before it is linked into a potentially
632 infinite loop and becomes env->current_tb. Avoid
633 starting execution if there is a pending interrupt. */
634 cpu->current_tb = tb;
635 barrier();
636 if (likely(!cpu->exit_request)) {
637 tc_ptr = tb->tc_ptr;
638 /* execute the generated code */
639 next_tb = cpu_tb_exec(cpu, tc_ptr);
640 switch (next_tb & TB_EXIT_MASK) {
641 case TB_EXIT_REQUESTED:
642 /* Something asked us to stop executing
643 * chained TBs; just continue round the main
644 * loop. Whatever requested the exit will also
645 * have set something else (eg exit_request or
646 * interrupt_request) which we will handle
647 * next time around the loop.
649 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
650 next_tb = 0;
651 break;
652 case TB_EXIT_ICOUNT_EXPIRED:
654 /* Instruction counter expired. */
655 int insns_left;
656 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
657 insns_left = cpu->icount_decr.u32;
658 if (cpu->icount_extra && insns_left >= 0) {
659 /* Refill decrementer and continue execution. */
660 cpu->icount_extra += insns_left;
661 if (cpu->icount_extra > 0xffff) {
662 insns_left = 0xffff;
663 } else {
664 insns_left = cpu->icount_extra;
666 cpu->icount_extra -= insns_left;
667 cpu->icount_decr.u16.low = insns_left;
668 } else {
669 if (insns_left > 0) {
670 /* Execute remaining instructions. */
671 cpu_exec_nocache(env, insns_left, tb);
673 cpu->exception_index = EXCP_INTERRUPT;
674 next_tb = 0;
675 cpu_loop_exit(cpu);
677 break;
679 default:
680 break;
683 cpu->current_tb = NULL;
684 /* reset soft MMU for next block (it can currently
685 only be set by a memory fault) */
686 } /* for(;;) */
687 } else {
688 /* Reload env after longjmp - the compiler may have smashed all
689 * local variables as longjmp is marked 'noreturn'. */
690 cpu = current_cpu;
691 env = cpu->env_ptr;
692 #if !(defined(CONFIG_USER_ONLY) && \
693 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
694 cc = CPU_GET_CLASS(cpu);
695 #endif
696 #ifdef TARGET_I386
697 x86_cpu = X86_CPU(cpu);
698 #endif
699 if (have_tb_lock) {
700 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
701 have_tb_lock = false;
704 } /* for(;;) */
707 #if defined(TARGET_I386)
708 /* restore flags in standard format */
709 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
710 | (env->df & DF_MASK);
711 #elif defined(TARGET_ARM)
712 /* XXX: Save/restore host fpu exception state?. */
713 #elif defined(TARGET_UNICORE32)
714 #elif defined(TARGET_SPARC)
715 #elif defined(TARGET_PPC)
716 #elif defined(TARGET_LM32)
717 #elif defined(TARGET_M68K)
718 cpu_m68k_flush_flags(env, env->cc_op);
719 env->cc_op = CC_OP_FLAGS;
720 env->sr = (env->sr & 0xffe0)
721 | env->cc_dest | (env->cc_x << 4);
722 #elif defined(TARGET_MICROBLAZE)
723 #elif defined(TARGET_MIPS)
724 #elif defined(TARGET_MOXIE)
725 #elif defined(TARGET_OPENRISC)
726 #elif defined(TARGET_SH4)
727 #elif defined(TARGET_ALPHA)
728 #elif defined(TARGET_CRIS)
729 #elif defined(TARGET_S390X)
730 #elif defined(TARGET_XTENSA)
731 /* XXXXX */
732 #else
733 #error unsupported target CPU
734 #endif
736 /* fail safe : never use current_cpu outside cpu_exec() */
737 current_cpu = NULL;
738 return ret;