2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "qemu/timer.h"
27 /* -icount align implementation. */
29 typedef struct SyncClocks
{
31 int64_t last_cpu_icount
;
32 int64_t realtime_clock
;
35 #if !defined(CONFIG_USER_ONLY)
36 /* Allow the guest to have a max 3ms advance.
37 * The difference between the 2 clocks could therefore
40 #define VM_CLOCK_ADVANCE 3000000
41 #define THRESHOLD_REDUCE 1.5
42 #define MAX_DELAY_PRINT_RATE 2000000000LL
43 #define MAX_NB_PRINTS 100
45 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
49 if (!icount_align_option
) {
53 cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
54 sc
->diff_clk
+= cpu_icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
55 sc
->last_cpu_icount
= cpu_icount
;
57 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
59 struct timespec sleep_delay
, rem_delay
;
60 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
61 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
62 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
63 sc
->diff_clk
-= (sleep_delay
.tv_sec
- rem_delay
.tv_sec
) * 1000000000LL;
64 sc
->diff_clk
-= sleep_delay
.tv_nsec
- rem_delay
.tv_nsec
;
69 Sleep(sc
->diff_clk
/ SCALE_MS
);
75 static void print_delay(const SyncClocks
*sc
)
77 static float threshold_delay
;
78 static int64_t last_realtime_clock
;
81 if (icount_align_option
&&
82 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
83 nb_prints
< MAX_NB_PRINTS
) {
84 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
85 (-sc
->diff_clk
/ (float)1000000000LL <
86 (threshold_delay
- THRESHOLD_REDUCE
))) {
87 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
88 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
92 last_realtime_clock
= sc
->realtime_clock
;
97 static void init_delay_params(SyncClocks
*sc
,
100 if (!icount_align_option
) {
103 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
104 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) -
106 cpu_get_clock_offset();
107 sc
->last_cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
108 if (sc
->diff_clk
< max_delay
) {
109 max_delay
= sc
->diff_clk
;
111 if (sc
->diff_clk
> max_advance
) {
112 max_advance
= sc
->diff_clk
;
115 /* Print every 2s max if the guest is late. We limit the number
116 of printed messages to NB_PRINT_MAX(currently 100) */
120 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
124 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
127 #endif /* CONFIG USER ONLY */
129 void cpu_loop_exit(CPUState
*cpu
)
131 cpu
->current_tb
= NULL
;
132 siglongjmp(cpu
->jmp_env
, 1);
135 /* exit the current TB from a signal handler. The host registers are
136 restored in a state compatible with the CPU emulator
138 #if defined(CONFIG_SOFTMMU)
139 void cpu_resume_from_signal(CPUState
*cpu
, void *puc
)
141 /* XXX: restore cpu registers saved in host registers */
143 cpu
->exception_index
= -1;
144 siglongjmp(cpu
->jmp_env
, 1);
148 /* Execute a TB, and fix up the CPU state afterwards if necessary */
149 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
151 CPUArchState
*env
= cpu
->env_ptr
;
154 #if defined(DEBUG_DISAS)
155 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
156 #if defined(TARGET_I386)
157 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
158 #elif defined(TARGET_M68K)
159 /* ??? Should not modify env state for dumping. */
160 cpu_m68k_flush_flags(env
, env
->cc_op
);
161 env
->cc_op
= CC_OP_FLAGS
;
162 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
163 log_cpu_state(cpu
, 0);
165 log_cpu_state(cpu
, 0);
168 #endif /* DEBUG_DISAS */
170 next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
171 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
172 /* We didn't start executing this TB (eg because the instruction
173 * counter hit zero); we must restore the guest PC to the address
174 * of the start of the TB.
176 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
177 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
178 if (cc
->synchronize_from_tb
) {
179 cc
->synchronize_from_tb(cpu
, tb
);
182 cc
->set_pc(cpu
, tb
->pc
);
185 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
186 /* We were asked to stop executing TBs (probably a pending
187 * interrupt. We've now stopped, so clear the flag.
189 cpu
->tcg_exit_req
= 0;
194 /* Execute the code without caching the generated code. An interpreter
195 could be used if available. */
196 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
197 TranslationBlock
*orig_tb
)
199 CPUState
*cpu
= ENV_GET_CPU(env
);
200 TranslationBlock
*tb
;
202 /* Should never happen.
203 We only end up here when an existing TB is too long. */
204 if (max_cycles
> CF_COUNT_MASK
)
205 max_cycles
= CF_COUNT_MASK
;
207 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
209 cpu
->current_tb
= tb
;
210 /* execute the generated code */
211 cpu_tb_exec(cpu
, tb
->tc_ptr
);
212 cpu
->current_tb
= NULL
;
213 tb_phys_invalidate(tb
, -1);
217 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
219 target_ulong cs_base
,
222 CPUState
*cpu
= ENV_GET_CPU(env
);
223 TranslationBlock
*tb
, **ptb1
;
225 tb_page_addr_t phys_pc
, phys_page1
;
226 target_ulong virt_page2
;
228 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
230 /* find translated block using physical mappings */
231 phys_pc
= get_page_addr_code(env
, pc
);
232 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
233 h
= tb_phys_hash_func(phys_pc
);
234 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
240 tb
->page_addr
[0] == phys_page1
&&
241 tb
->cs_base
== cs_base
&&
242 tb
->flags
== flags
) {
243 /* check next page if needed */
244 if (tb
->page_addr
[1] != -1) {
245 tb_page_addr_t phys_page2
;
247 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
249 phys_page2
= get_page_addr_code(env
, virt_page2
);
250 if (tb
->page_addr
[1] == phys_page2
)
256 ptb1
= &tb
->phys_hash_next
;
259 /* if no translated code available, then translate it now */
260 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, 0);
263 /* Move the last found TB to the head of the list */
265 *ptb1
= tb
->phys_hash_next
;
266 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
267 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
269 /* we add the TB in the virtual pc hash table */
270 cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
274 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
276 CPUState
*cpu
= ENV_GET_CPU(env
);
277 TranslationBlock
*tb
;
278 target_ulong cs_base
, pc
;
281 /* we record a subset of the CPU state. It will
282 always be the same before a given translated block
284 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
285 tb
= cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
286 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
287 tb
->flags
!= flags
)) {
288 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
293 static CPUDebugExcpHandler
*debug_excp_handler
;
295 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
297 debug_excp_handler
= handler
;
300 static void cpu_handle_debug_exception(CPUArchState
*env
)
302 CPUState
*cpu
= ENV_GET_CPU(env
);
305 if (!cpu
->watchpoint_hit
) {
306 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
307 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
310 if (debug_excp_handler
) {
311 debug_excp_handler(env
);
315 /* main execution loop */
317 volatile sig_atomic_t exit_request
;
319 int cpu_exec(CPUArchState
*env
)
321 CPUState
*cpu
= ENV_GET_CPU(env
);
322 #if !(defined(CONFIG_USER_ONLY) && \
323 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
324 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
327 X86CPU
*x86_cpu
= X86_CPU(cpu
);
329 int ret
, interrupt_request
;
330 TranslationBlock
*tb
;
335 /* This must be volatile so it is not trashed by longjmp() */
336 volatile bool have_tb_lock
= false;
339 if (!cpu_has_work(cpu
)) {
348 /* As long as current_cpu is null, up to the assignment just above,
349 * requests by other threads to exit the execution loop are expected to
350 * be issued using the exit_request global. We must make sure that our
351 * evaluation of the global value is performed past the current_cpu
352 * value transition point, which requires a memory barrier as well as
353 * an instruction scheduling constraint on modern architectures. */
356 if (unlikely(exit_request
)) {
357 cpu
->exit_request
= 1;
360 #if defined(TARGET_I386)
361 /* put eflags in CPU temporary format */
362 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
363 env
->df
= 1 - (2 * ((env
->eflags
>> 10) & 1));
364 CC_OP
= CC_OP_EFLAGS
;
365 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
366 #elif defined(TARGET_SPARC)
367 #elif defined(TARGET_M68K)
368 env
->cc_op
= CC_OP_FLAGS
;
369 env
->cc_dest
= env
->sr
& 0xf;
370 env
->cc_x
= (env
->sr
>> 4) & 1;
371 #elif defined(TARGET_ALPHA)
372 #elif defined(TARGET_ARM)
373 #elif defined(TARGET_UNICORE32)
374 #elif defined(TARGET_PPC)
375 env
->reserve_addr
= -1;
376 #elif defined(TARGET_LM32)
377 #elif defined(TARGET_MICROBLAZE)
378 #elif defined(TARGET_MIPS)
379 #elif defined(TARGET_MOXIE)
380 #elif defined(TARGET_OPENRISC)
381 #elif defined(TARGET_SH4)
382 #elif defined(TARGET_CRIS)
383 #elif defined(TARGET_S390X)
384 #elif defined(TARGET_XTENSA)
387 #error unsupported target CPU
389 cpu
->exception_index
= -1;
391 /* Calculate difference between guest clock and host clock.
392 * This delay includes the delay of the last cycle, so
393 * what we have to do is sleep until it is 0. As for the
394 * advance/delay we gain here, we try to fix it next time.
396 init_delay_params(&sc
, cpu
);
398 /* prepare setjmp context for exception handling */
400 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
401 /* if an exception is pending, we execute it here */
402 if (cpu
->exception_index
>= 0) {
403 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
404 /* exit request from the cpu execution loop */
405 ret
= cpu
->exception_index
;
406 if (ret
== EXCP_DEBUG
) {
407 cpu_handle_debug_exception(env
);
411 #if defined(CONFIG_USER_ONLY)
412 /* if user mode only, we simulate a fake exception
413 which will be handled outside the cpu execution
415 #if defined(TARGET_I386)
416 cc
->do_interrupt(cpu
);
418 ret
= cpu
->exception_index
;
421 cc
->do_interrupt(cpu
);
422 cpu
->exception_index
= -1;
427 next_tb
= 0; /* force lookup of first TB */
429 interrupt_request
= cpu
->interrupt_request
;
430 if (unlikely(interrupt_request
)) {
431 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
432 /* Mask out external interrupts for this step. */
433 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
435 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
436 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
437 cpu
->exception_index
= EXCP_DEBUG
;
440 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
441 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
442 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
443 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
444 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
446 cpu
->exception_index
= EXCP_HLT
;
450 #if defined(TARGET_I386)
451 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
452 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0);
453 do_cpu_init(x86_cpu
);
454 cpu
->exception_index
= EXCP_HALTED
;
458 if (interrupt_request
& CPU_INTERRUPT_RESET
) {
462 #if defined(TARGET_I386)
463 #if !defined(CONFIG_USER_ONLY)
464 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
465 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
466 apic_poll_irq(x86_cpu
->apic_state
);
469 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
470 do_cpu_sipi(x86_cpu
);
471 } else if (env
->hflags2
& HF2_GIF_MASK
) {
472 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
473 !(env
->hflags
& HF_SMM_MASK
)) {
474 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
476 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
477 do_smm_enter(x86_cpu
);
479 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
480 !(env
->hflags2
& HF2_NMI_MASK
)) {
481 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
482 env
->hflags2
|= HF2_NMI_MASK
;
483 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
485 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
486 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
487 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
489 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
490 (((env
->hflags2
& HF2_VINTR_MASK
) &&
491 (env
->hflags2
& HF2_HIF_MASK
)) ||
492 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
493 (env
->eflags
& IF_MASK
&&
494 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
496 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
498 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
500 intno
= cpu_get_pic_interrupt(env
);
501 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
502 do_interrupt_x86_hardirq(env
, intno
, 1);
503 /* ensure that no TB jump will be modified as
504 the program flow was changed */
506 #if !defined(CONFIG_USER_ONLY)
507 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
508 (env
->eflags
& IF_MASK
) &&
509 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
511 /* FIXME: this should respect TPR */
512 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
514 intno
= ldl_phys(cpu
->as
,
516 + offsetof(struct vmcb
,
517 control
.int_vector
));
518 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
519 do_interrupt_x86_hardirq(env
, intno
, 1);
520 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
525 #elif defined(TARGET_PPC)
526 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
527 ppc_hw_interrupt(env
);
528 if (env
->pending_interrupts
== 0) {
529 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
533 #elif defined(TARGET_LM32)
534 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
535 && (env
->ie
& IE_IE
)) {
536 cpu
->exception_index
= EXCP_IRQ
;
537 cc
->do_interrupt(cpu
);
540 #elif defined(TARGET_MICROBLAZE)
541 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
542 && (env
->sregs
[SR_MSR
] & MSR_IE
)
543 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
544 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
545 cpu
->exception_index
= EXCP_IRQ
;
546 cc
->do_interrupt(cpu
);
549 #elif defined(TARGET_MIPS)
550 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
551 cpu_mips_hw_interrupts_pending(env
)) {
553 cpu
->exception_index
= EXCP_EXT_INTERRUPT
;
555 cc
->do_interrupt(cpu
);
558 #elif defined(TARGET_OPENRISC)
561 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
562 && (env
->sr
& SR_IEE
)) {
565 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
566 && (env
->sr
& SR_TEE
)) {
570 cpu
->exception_index
= idx
;
571 cc
->do_interrupt(cpu
);
575 #elif defined(TARGET_SPARC)
576 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
577 if (cpu_interrupts_enabled(env
) &&
578 env
->interrupt_index
> 0) {
579 int pil
= env
->interrupt_index
& 0xf;
580 int type
= env
->interrupt_index
& 0xf0;
582 if (((type
== TT_EXTINT
) &&
583 cpu_pil_allowed(env
, pil
)) ||
585 cpu
->exception_index
= env
->interrupt_index
;
586 cc
->do_interrupt(cpu
);
591 #elif defined(TARGET_ARM)
592 if (interrupt_request
& CPU_INTERRUPT_FIQ
593 && !(env
->daif
& PSTATE_F
)) {
594 cpu
->exception_index
= EXCP_FIQ
;
595 cc
->do_interrupt(cpu
);
598 /* ARMv7-M interrupt return works by loading a magic value
599 into the PC. On real hardware the load causes the
600 return to occur. The qemu implementation performs the
601 jump normally, then does the exception return when the
602 CPU tries to execute code at the magic address.
603 This will cause the magic PC value to be pushed to
604 the stack if an interrupt occurred at the wrong time.
605 We avoid this by disabling interrupts when
606 pc contains a magic address. */
607 if (interrupt_request
& CPU_INTERRUPT_HARD
608 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
609 || !(env
->daif
& PSTATE_I
))) {
610 cpu
->exception_index
= EXCP_IRQ
;
611 cc
->do_interrupt(cpu
);
614 #elif defined(TARGET_UNICORE32)
615 if (interrupt_request
& CPU_INTERRUPT_HARD
616 && !(env
->uncached_asr
& ASR_I
)) {
617 cpu
->exception_index
= UC32_EXCP_INTR
;
618 cc
->do_interrupt(cpu
);
621 #elif defined(TARGET_SH4)
622 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
623 cc
->do_interrupt(cpu
);
626 #elif defined(TARGET_ALPHA)
629 /* ??? This hard-codes the OSF/1 interrupt levels. */
630 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
632 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
633 idx
= EXCP_DEV_INTERRUPT
;
637 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
638 idx
= EXCP_CLK_INTERRUPT
;
642 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
643 idx
= EXCP_SMP_INTERRUPT
;
647 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
652 cpu
->exception_index
= idx
;
654 cc
->do_interrupt(cpu
);
658 #elif defined(TARGET_CRIS)
659 if (interrupt_request
& CPU_INTERRUPT_HARD
660 && (env
->pregs
[PR_CCS
] & I_FLAG
)
661 && !env
->locked_irq
) {
662 cpu
->exception_index
= EXCP_IRQ
;
663 cc
->do_interrupt(cpu
);
666 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
667 unsigned int m_flag_archval
;
668 if (env
->pregs
[PR_VR
] < 32) {
669 m_flag_archval
= M_FLAG_V10
;
671 m_flag_archval
= M_FLAG_V32
;
673 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
674 cpu
->exception_index
= EXCP_NMI
;
675 cc
->do_interrupt(cpu
);
679 #elif defined(TARGET_M68K)
680 if (interrupt_request
& CPU_INTERRUPT_HARD
681 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
682 < env
->pending_level
) {
683 /* Real hardware gets the interrupt vector via an
684 IACK cycle at this point. Current emulated
685 hardware doesn't rely on this, so we
686 provide/save the vector when the interrupt is
688 cpu
->exception_index
= env
->pending_vector
;
689 do_interrupt_m68k_hardirq(env
);
692 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
693 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
694 (env
->psw
.mask
& PSW_MASK_EXT
)) {
695 cc
->do_interrupt(cpu
);
698 #elif defined(TARGET_XTENSA)
699 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
700 cpu
->exception_index
= EXC_IRQ
;
701 cc
->do_interrupt(cpu
);
705 /* Don't use the cached interrupt_request value,
706 do_interrupt may have updated the EXITTB flag. */
707 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
708 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
709 /* ensure that no TB jump will be modified as
710 the program flow was changed */
714 if (unlikely(cpu
->exit_request
)) {
715 cpu
->exit_request
= 0;
716 cpu
->exception_index
= EXCP_INTERRUPT
;
719 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
721 tb
= tb_find_fast(env
);
722 /* Note: we do it here to avoid a gcc bug on Mac OS X when
723 doing it in tb_find_slow */
724 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
725 /* as some TB could have been invalidated because
726 of memory exceptions while generating the code, we
727 must recompute the hash index here */
729 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
731 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
732 qemu_log("Trace %p [" TARGET_FMT_lx
"] %s\n",
733 tb
->tc_ptr
, tb
->pc
, lookup_symbol(tb
->pc
));
735 /* see if we can patch the calling TB. When the TB
736 spans two pages, we cannot safely do a direct
738 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
739 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
740 next_tb
& TB_EXIT_MASK
, tb
);
742 have_tb_lock
= false;
743 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
745 /* cpu_interrupt might be called while translating the
746 TB, but before it is linked into a potentially
747 infinite loop and becomes env->current_tb. Avoid
748 starting execution if there is a pending interrupt. */
749 cpu
->current_tb
= tb
;
751 if (likely(!cpu
->exit_request
)) {
753 /* execute the generated code */
754 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
755 switch (next_tb
& TB_EXIT_MASK
) {
756 case TB_EXIT_REQUESTED
:
757 /* Something asked us to stop executing
758 * chained TBs; just continue round the main
759 * loop. Whatever requested the exit will also
760 * have set something else (eg exit_request or
761 * interrupt_request) which we will handle
762 * next time around the loop.
764 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
767 case TB_EXIT_ICOUNT_EXPIRED
:
769 /* Instruction counter expired. */
771 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
772 insns_left
= cpu
->icount_decr
.u32
;
773 if (cpu
->icount_extra
&& insns_left
>= 0) {
774 /* Refill decrementer and continue execution. */
775 cpu
->icount_extra
+= insns_left
;
776 if (cpu
->icount_extra
> 0xffff) {
779 insns_left
= cpu
->icount_extra
;
781 cpu
->icount_extra
-= insns_left
;
782 cpu
->icount_decr
.u16
.low
= insns_left
;
784 if (insns_left
> 0) {
785 /* Execute remaining instructions. */
786 cpu_exec_nocache(env
, insns_left
, tb
);
787 align_clocks(&sc
, cpu
);
789 cpu
->exception_index
= EXCP_INTERRUPT
;
799 cpu
->current_tb
= NULL
;
800 /* Try to align the host and virtual clocks
801 if the guest is in advance */
802 align_clocks(&sc
, cpu
);
803 /* reset soft MMU for next block (it can currently
804 only be set by a memory fault) */
807 /* Reload env after longjmp - the compiler may have smashed all
808 * local variables as longjmp is marked 'noreturn'. */
811 #if !(defined(CONFIG_USER_ONLY) && \
812 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
813 cc
= CPU_GET_CLASS(cpu
);
816 x86_cpu
= X86_CPU(cpu
);
819 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
820 have_tb_lock
= false;
826 #if defined(TARGET_I386)
827 /* restore flags in standard format */
828 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
829 | (env
->df
& DF_MASK
);
830 #elif defined(TARGET_ARM)
831 /* XXX: Save/restore host fpu exception state?. */
832 #elif defined(TARGET_UNICORE32)
833 #elif defined(TARGET_SPARC)
834 #elif defined(TARGET_PPC)
835 #elif defined(TARGET_LM32)
836 #elif defined(TARGET_M68K)
837 cpu_m68k_flush_flags(env
, env
->cc_op
);
838 env
->cc_op
= CC_OP_FLAGS
;
839 env
->sr
= (env
->sr
& 0xffe0)
840 | env
->cc_dest
| (env
->cc_x
<< 4);
841 #elif defined(TARGET_MICROBLAZE)
842 #elif defined(TARGET_MIPS)
843 #elif defined(TARGET_MOXIE)
844 #elif defined(TARGET_OPENRISC)
845 #elif defined(TARGET_SH4)
846 #elif defined(TARGET_ALPHA)
847 #elif defined(TARGET_CRIS)
848 #elif defined(TARGET_S390X)
849 #elif defined(TARGET_XTENSA)
852 #error unsupported target CPU
855 /* fail safe : never use current_cpu outside cpu_exec() */