2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
28 /* -icount align implementation. */
30 typedef struct SyncClocks
{
32 int64_t last_cpu_icount
;
33 int64_t realtime_clock
;
36 #if !defined(CONFIG_USER_ONLY)
37 /* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
41 #define VM_CLOCK_ADVANCE 3000000
42 #define THRESHOLD_REDUCE 1.5
43 #define MAX_DELAY_PRINT_RATE 2000000000LL
44 #define MAX_NB_PRINTS 100
46 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
50 if (!icount_align_option
) {
54 cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
55 sc
->diff_clk
+= cpu_icount_to_ns(sc
->last_cpu_icount
- cpu_icount
);
56 sc
->last_cpu_icount
= cpu_icount
;
58 if (sc
->diff_clk
> VM_CLOCK_ADVANCE
) {
60 struct timespec sleep_delay
, rem_delay
;
61 sleep_delay
.tv_sec
= sc
->diff_clk
/ 1000000000LL;
62 sleep_delay
.tv_nsec
= sc
->diff_clk
% 1000000000LL;
63 if (nanosleep(&sleep_delay
, &rem_delay
) < 0) {
64 sc
->diff_clk
-= (sleep_delay
.tv_sec
- rem_delay
.tv_sec
) * 1000000000LL;
65 sc
->diff_clk
-= sleep_delay
.tv_nsec
- rem_delay
.tv_nsec
;
70 Sleep(sc
->diff_clk
/ SCALE_MS
);
76 static void print_delay(const SyncClocks
*sc
)
78 static float threshold_delay
;
79 static int64_t last_realtime_clock
;
82 if (icount_align_option
&&
83 sc
->realtime_clock
- last_realtime_clock
>= MAX_DELAY_PRINT_RATE
&&
84 nb_prints
< MAX_NB_PRINTS
) {
85 if ((-sc
->diff_clk
/ (float)1000000000LL > threshold_delay
) ||
86 (-sc
->diff_clk
/ (float)1000000000LL <
87 (threshold_delay
- THRESHOLD_REDUCE
))) {
88 threshold_delay
= (-sc
->diff_clk
/ 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
93 last_realtime_clock
= sc
->realtime_clock
;
98 static void init_delay_params(SyncClocks
*sc
,
101 if (!icount_align_option
) {
104 sc
->realtime_clock
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
105 sc
->diff_clk
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) -
107 cpu_get_clock_offset();
108 sc
->last_cpu_icount
= cpu
->icount_extra
+ cpu
->icount_decr
.u16
.low
;
109 if (sc
->diff_clk
< max_delay
) {
110 max_delay
= sc
->diff_clk
;
112 if (sc
->diff_clk
> max_advance
) {
113 max_advance
= sc
->diff_clk
;
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
121 static void align_clocks(SyncClocks
*sc
, const CPUState
*cpu
)
125 static void init_delay_params(SyncClocks
*sc
, const CPUState
*cpu
)
128 #endif /* CONFIG USER ONLY */
130 void cpu_loop_exit(CPUState
*cpu
)
132 cpu
->current_tb
= NULL
;
133 siglongjmp(cpu
->jmp_env
, 1);
136 /* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
139 #if defined(CONFIG_SOFTMMU)
140 void cpu_resume_from_signal(CPUState
*cpu
, void *puc
)
142 /* XXX: restore cpu registers saved in host registers */
144 cpu
->exception_index
= -1;
145 siglongjmp(cpu
->jmp_env
, 1);
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
150 static inline tcg_target_ulong
cpu_tb_exec(CPUState
*cpu
, uint8_t *tb_ptr
)
152 CPUArchState
*env
= cpu
->env_ptr
;
155 #if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
157 #if defined(TARGET_I386)
158 log_cpu_state(cpu
, CPU_DUMP_CCOP
);
159 #elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env
, env
->cc_op
);
162 env
->cc_op
= CC_OP_FLAGS
;
163 env
->sr
= (env
->sr
& 0xffe0) | env
->cc_dest
| (env
->cc_x
<< 4);
164 log_cpu_state(cpu
, 0);
166 log_cpu_state(cpu
, 0);
169 #endif /* DEBUG_DISAS */
171 next_tb
= tcg_qemu_tb_exec(env
, tb_ptr
);
172 trace_exec_tb_exit((void *) (next_tb
& ~TB_EXIT_MASK
),
173 next_tb
& TB_EXIT_MASK
);
175 if ((next_tb
& TB_EXIT_MASK
) > TB_EXIT_IDX1
) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
180 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
181 TranslationBlock
*tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
182 if (cc
->synchronize_from_tb
) {
183 cc
->synchronize_from_tb(cpu
, tb
);
186 cc
->set_pc(cpu
, tb
->pc
);
189 if ((next_tb
& TB_EXIT_MASK
) == TB_EXIT_REQUESTED
) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
193 cpu
->tcg_exit_req
= 0;
198 /* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
200 static void cpu_exec_nocache(CPUArchState
*env
, int max_cycles
,
201 TranslationBlock
*orig_tb
)
203 CPUState
*cpu
= ENV_GET_CPU(env
);
204 TranslationBlock
*tb
;
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles
> CF_COUNT_MASK
)
209 max_cycles
= CF_COUNT_MASK
;
211 tb
= tb_gen_code(cpu
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
213 cpu
->current_tb
= tb
;
214 /* execute the generated code */
215 trace_exec_tb_nocache(tb
, tb
->pc
);
216 cpu_tb_exec(cpu
, tb
->tc_ptr
);
217 cpu
->current_tb
= NULL
;
218 tb_phys_invalidate(tb
, -1);
222 static TranslationBlock
*tb_find_slow(CPUArchState
*env
,
224 target_ulong cs_base
,
227 CPUState
*cpu
= ENV_GET_CPU(env
);
228 TranslationBlock
*tb
, **ptb1
;
230 tb_page_addr_t phys_pc
, phys_page1
;
231 target_ulong virt_page2
;
233 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
235 /* find translated block using physical mappings */
236 phys_pc
= get_page_addr_code(env
, pc
);
237 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
238 h
= tb_phys_hash_func(phys_pc
);
239 ptb1
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
245 tb
->page_addr
[0] == phys_page1
&&
246 tb
->cs_base
== cs_base
&&
247 tb
->flags
== flags
) {
248 /* check next page if needed */
249 if (tb
->page_addr
[1] != -1) {
250 tb_page_addr_t phys_page2
;
252 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
254 phys_page2
= get_page_addr_code(env
, virt_page2
);
255 if (tb
->page_addr
[1] == phys_page2
)
261 ptb1
= &tb
->phys_hash_next
;
264 /* if no translated code available, then translate it now */
265 tb
= tb_gen_code(cpu
, pc
, cs_base
, flags
, 0);
268 /* Move the last found TB to the head of the list */
270 *ptb1
= tb
->phys_hash_next
;
271 tb
->phys_hash_next
= tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
272 tcg_ctx
.tb_ctx
.tb_phys_hash
[h
] = tb
;
274 /* we add the TB in the virtual pc hash table */
275 cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
279 static inline TranslationBlock
*tb_find_fast(CPUArchState
*env
)
281 CPUState
*cpu
= ENV_GET_CPU(env
);
282 TranslationBlock
*tb
;
283 target_ulong cs_base
, pc
;
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
289 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
290 tb
= cpu
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
291 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
292 tb
->flags
!= flags
)) {
293 tb
= tb_find_slow(env
, pc
, cs_base
, flags
);
298 static CPUDebugExcpHandler
*debug_excp_handler
;
300 void cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
302 debug_excp_handler
= handler
;
305 static void cpu_handle_debug_exception(CPUArchState
*env
)
307 CPUState
*cpu
= ENV_GET_CPU(env
);
310 if (!cpu
->watchpoint_hit
) {
311 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
312 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
315 if (debug_excp_handler
) {
316 debug_excp_handler(env
);
320 /* main execution loop */
322 volatile sig_atomic_t exit_request
;
324 int cpu_exec(CPUArchState
*env
)
326 CPUState
*cpu
= ENV_GET_CPU(env
);
327 #if !(defined(CONFIG_USER_ONLY) && \
328 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
329 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
332 X86CPU
*x86_cpu
= X86_CPU(cpu
);
334 int ret
, interrupt_request
;
335 TranslationBlock
*tb
;
340 /* This must be volatile so it is not trashed by longjmp() */
341 volatile bool have_tb_lock
= false;
344 if (!cpu_has_work(cpu
)) {
353 /* As long as current_cpu is null, up to the assignment just above,
354 * requests by other threads to exit the execution loop are expected to
355 * be issued using the exit_request global. We must make sure that our
356 * evaluation of the global value is performed past the current_cpu
357 * value transition point, which requires a memory barrier as well as
358 * an instruction scheduling constraint on modern architectures. */
361 if (unlikely(exit_request
)) {
362 cpu
->exit_request
= 1;
365 #if defined(TARGET_I386)
366 /* put eflags in CPU temporary format */
367 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
368 env
->df
= 1 - (2 * ((env
->eflags
>> 10) & 1));
369 CC_OP
= CC_OP_EFLAGS
;
370 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
371 #elif defined(TARGET_SPARC)
372 #elif defined(TARGET_M68K)
373 env
->cc_op
= CC_OP_FLAGS
;
374 env
->cc_dest
= env
->sr
& 0xf;
375 env
->cc_x
= (env
->sr
>> 4) & 1;
376 #elif defined(TARGET_ALPHA)
377 #elif defined(TARGET_ARM)
378 #elif defined(TARGET_UNICORE32)
379 #elif defined(TARGET_PPC)
380 env
->reserve_addr
= -1;
381 #elif defined(TARGET_LM32)
382 #elif defined(TARGET_MICROBLAZE)
383 #elif defined(TARGET_MIPS)
384 #elif defined(TARGET_MOXIE)
385 #elif defined(TARGET_OPENRISC)
386 #elif defined(TARGET_SH4)
387 #elif defined(TARGET_CRIS)
388 #elif defined(TARGET_S390X)
389 #elif defined(TARGET_XTENSA)
392 #error unsupported target CPU
394 cpu
->exception_index
= -1;
396 /* Calculate difference between guest clock and host clock.
397 * This delay includes the delay of the last cycle, so
398 * what we have to do is sleep until it is 0. As for the
399 * advance/delay we gain here, we try to fix it next time.
401 init_delay_params(&sc
, cpu
);
403 /* prepare setjmp context for exception handling */
405 if (sigsetjmp(cpu
->jmp_env
, 0) == 0) {
406 /* if an exception is pending, we execute it here */
407 if (cpu
->exception_index
>= 0) {
408 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
409 /* exit request from the cpu execution loop */
410 ret
= cpu
->exception_index
;
411 if (ret
== EXCP_DEBUG
) {
412 cpu_handle_debug_exception(env
);
416 #if defined(CONFIG_USER_ONLY)
417 /* if user mode only, we simulate a fake exception
418 which will be handled outside the cpu execution
420 #if defined(TARGET_I386)
421 cc
->do_interrupt(cpu
);
423 ret
= cpu
->exception_index
;
426 cc
->do_interrupt(cpu
);
427 cpu
->exception_index
= -1;
432 next_tb
= 0; /* force lookup of first TB */
434 interrupt_request
= cpu
->interrupt_request
;
435 if (unlikely(interrupt_request
)) {
436 if (unlikely(cpu
->singlestep_enabled
& SSTEP_NOIRQ
)) {
437 /* Mask out external interrupts for this step. */
438 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
440 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
441 cpu
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
442 cpu
->exception_index
= EXCP_DEBUG
;
445 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
446 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
447 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
448 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
449 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
451 cpu
->exception_index
= EXCP_HLT
;
455 #if defined(TARGET_I386)
456 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
457 cpu_svm_check_intercept_param(env
, SVM_EXIT_INIT
, 0);
458 do_cpu_init(x86_cpu
);
459 cpu
->exception_index
= EXCP_HALTED
;
463 if (interrupt_request
& CPU_INTERRUPT_RESET
) {
467 #if defined(TARGET_I386)
468 #if !defined(CONFIG_USER_ONLY)
469 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
470 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
471 apic_poll_irq(x86_cpu
->apic_state
);
474 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
475 do_cpu_sipi(x86_cpu
);
476 } else if (env
->hflags2
& HF2_GIF_MASK
) {
477 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
478 !(env
->hflags
& HF_SMM_MASK
)) {
479 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
,
481 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
482 do_smm_enter(x86_cpu
);
484 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
485 !(env
->hflags2
& HF2_NMI_MASK
)) {
486 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
487 env
->hflags2
|= HF2_NMI_MASK
;
488 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
490 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
491 cpu
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
492 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
494 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
495 (((env
->hflags2
& HF2_VINTR_MASK
) &&
496 (env
->hflags2
& HF2_HIF_MASK
)) ||
497 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
498 (env
->eflags
& IF_MASK
&&
499 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
501 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
,
503 cpu
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
505 intno
= cpu_get_pic_interrupt(env
);
506 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
507 do_interrupt_x86_hardirq(env
, intno
, 1);
508 /* ensure that no TB jump will be modified as
509 the program flow was changed */
511 #if !defined(CONFIG_USER_ONLY)
512 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
513 (env
->eflags
& IF_MASK
) &&
514 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
516 /* FIXME: this should respect TPR */
517 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
,
519 intno
= ldl_phys(cpu
->as
,
521 + offsetof(struct vmcb
,
522 control
.int_vector
));
523 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
524 do_interrupt_x86_hardirq(env
, intno
, 1);
525 cpu
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
530 #elif defined(TARGET_PPC)
531 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
532 ppc_hw_interrupt(env
);
533 if (env
->pending_interrupts
== 0) {
534 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
538 #elif defined(TARGET_LM32)
539 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
540 && (env
->ie
& IE_IE
)) {
541 cpu
->exception_index
= EXCP_IRQ
;
542 cc
->do_interrupt(cpu
);
545 #elif defined(TARGET_MICROBLAZE)
546 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
547 && (env
->sregs
[SR_MSR
] & MSR_IE
)
548 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
549 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
550 cpu
->exception_index
= EXCP_IRQ
;
551 cc
->do_interrupt(cpu
);
554 #elif defined(TARGET_MIPS)
555 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
556 cpu_mips_hw_interrupts_pending(env
)) {
558 cpu
->exception_index
= EXCP_EXT_INTERRUPT
;
560 cc
->do_interrupt(cpu
);
563 #elif defined(TARGET_OPENRISC)
566 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
567 && (env
->sr
& SR_IEE
)) {
570 if ((interrupt_request
& CPU_INTERRUPT_TIMER
)
571 && (env
->sr
& SR_TEE
)) {
575 cpu
->exception_index
= idx
;
576 cc
->do_interrupt(cpu
);
580 #elif defined(TARGET_SPARC)
581 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
582 if (cpu_interrupts_enabled(env
) &&
583 env
->interrupt_index
> 0) {
584 int pil
= env
->interrupt_index
& 0xf;
585 int type
= env
->interrupt_index
& 0xf0;
587 if (((type
== TT_EXTINT
) &&
588 cpu_pil_allowed(env
, pil
)) ||
590 cpu
->exception_index
= env
->interrupt_index
;
591 cc
->do_interrupt(cpu
);
596 #elif defined(TARGET_ARM)
597 if (interrupt_request
& CPU_INTERRUPT_FIQ
598 && !(env
->daif
& PSTATE_F
)) {
599 cpu
->exception_index
= EXCP_FIQ
;
600 cc
->do_interrupt(cpu
);
603 /* ARMv7-M interrupt return works by loading a magic value
604 into the PC. On real hardware the load causes the
605 return to occur. The qemu implementation performs the
606 jump normally, then does the exception return when the
607 CPU tries to execute code at the magic address.
608 This will cause the magic PC value to be pushed to
609 the stack if an interrupt occurred at the wrong time.
610 We avoid this by disabling interrupts when
611 pc contains a magic address. */
612 if (interrupt_request
& CPU_INTERRUPT_HARD
613 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
614 || !(env
->daif
& PSTATE_I
))) {
615 cpu
->exception_index
= EXCP_IRQ
;
616 cc
->do_interrupt(cpu
);
619 #elif defined(TARGET_UNICORE32)
620 if (interrupt_request
& CPU_INTERRUPT_HARD
621 && !(env
->uncached_asr
& ASR_I
)) {
622 cpu
->exception_index
= UC32_EXCP_INTR
;
623 cc
->do_interrupt(cpu
);
626 #elif defined(TARGET_SH4)
627 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
628 cc
->do_interrupt(cpu
);
631 #elif defined(TARGET_ALPHA)
634 /* ??? This hard-codes the OSF/1 interrupt levels. */
635 switch (env
->pal_mode
? 7 : env
->ps
& PS_INT_MASK
) {
637 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
638 idx
= EXCP_DEV_INTERRUPT
;
642 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
643 idx
= EXCP_CLK_INTERRUPT
;
647 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
648 idx
= EXCP_SMP_INTERRUPT
;
652 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
657 cpu
->exception_index
= idx
;
659 cc
->do_interrupt(cpu
);
663 #elif defined(TARGET_CRIS)
664 if (interrupt_request
& CPU_INTERRUPT_HARD
665 && (env
->pregs
[PR_CCS
] & I_FLAG
)
666 && !env
->locked_irq
) {
667 cpu
->exception_index
= EXCP_IRQ
;
668 cc
->do_interrupt(cpu
);
671 if (interrupt_request
& CPU_INTERRUPT_NMI
) {
672 unsigned int m_flag_archval
;
673 if (env
->pregs
[PR_VR
] < 32) {
674 m_flag_archval
= M_FLAG_V10
;
676 m_flag_archval
= M_FLAG_V32
;
678 if ((env
->pregs
[PR_CCS
] & m_flag_archval
)) {
679 cpu
->exception_index
= EXCP_NMI
;
680 cc
->do_interrupt(cpu
);
684 #elif defined(TARGET_M68K)
685 if (interrupt_request
& CPU_INTERRUPT_HARD
686 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
687 < env
->pending_level
) {
688 /* Real hardware gets the interrupt vector via an
689 IACK cycle at this point. Current emulated
690 hardware doesn't rely on this, so we
691 provide/save the vector when the interrupt is
693 cpu
->exception_index
= env
->pending_vector
;
694 do_interrupt_m68k_hardirq(env
);
697 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
698 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
699 (env
->psw
.mask
& PSW_MASK_EXT
)) {
700 cc
->do_interrupt(cpu
);
703 #elif defined(TARGET_XTENSA)
704 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
705 cpu
->exception_index
= EXC_IRQ
;
706 cc
->do_interrupt(cpu
);
710 /* Don't use the cached interrupt_request value,
711 do_interrupt may have updated the EXITTB flag. */
712 if (cpu
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
713 cpu
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
714 /* ensure that no TB jump will be modified as
715 the program flow was changed */
719 if (unlikely(cpu
->exit_request
)) {
720 cpu
->exit_request
= 0;
721 cpu
->exception_index
= EXCP_INTERRUPT
;
724 spin_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
726 tb
= tb_find_fast(env
);
727 /* Note: we do it here to avoid a gcc bug on Mac OS X when
728 doing it in tb_find_slow */
729 if (tcg_ctx
.tb_ctx
.tb_invalidated_flag
) {
730 /* as some TB could have been invalidated because
731 of memory exceptions while generating the code, we
732 must recompute the hash index here */
734 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 0;
736 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
737 qemu_log("Trace %p [" TARGET_FMT_lx
"] %s\n",
738 tb
->tc_ptr
, tb
->pc
, lookup_symbol(tb
->pc
));
740 /* see if we can patch the calling TB. When the TB
741 spans two pages, we cannot safely do a direct
743 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
744 tb_add_jump((TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
),
745 next_tb
& TB_EXIT_MASK
, tb
);
747 have_tb_lock
= false;
748 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
750 /* cpu_interrupt might be called while translating the
751 TB, but before it is linked into a potentially
752 infinite loop and becomes env->current_tb. Avoid
753 starting execution if there is a pending interrupt. */
754 cpu
->current_tb
= tb
;
756 if (likely(!cpu
->exit_request
)) {
757 trace_exec_tb(tb
, tb
->pc
);
759 /* execute the generated code */
760 next_tb
= cpu_tb_exec(cpu
, tc_ptr
);
761 switch (next_tb
& TB_EXIT_MASK
) {
762 case TB_EXIT_REQUESTED
:
763 /* Something asked us to stop executing
764 * chained TBs; just continue round the main
765 * loop. Whatever requested the exit will also
766 * have set something else (eg exit_request or
767 * interrupt_request) which we will handle
768 * next time around the loop.
770 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
773 case TB_EXIT_ICOUNT_EXPIRED
:
775 /* Instruction counter expired. */
777 tb
= (TranslationBlock
*)(next_tb
& ~TB_EXIT_MASK
);
778 insns_left
= cpu
->icount_decr
.u32
;
779 if (cpu
->icount_extra
&& insns_left
>= 0) {
780 /* Refill decrementer and continue execution. */
781 cpu
->icount_extra
+= insns_left
;
782 if (cpu
->icount_extra
> 0xffff) {
785 insns_left
= cpu
->icount_extra
;
787 cpu
->icount_extra
-= insns_left
;
788 cpu
->icount_decr
.u16
.low
= insns_left
;
790 if (insns_left
> 0) {
791 /* Execute remaining instructions. */
792 cpu_exec_nocache(env
, insns_left
, tb
);
793 align_clocks(&sc
, cpu
);
795 cpu
->exception_index
= EXCP_INTERRUPT
;
805 cpu
->current_tb
= NULL
;
806 /* Try to align the host and virtual clocks
807 if the guest is in advance */
808 align_clocks(&sc
, cpu
);
809 /* reset soft MMU for next block (it can currently
810 only be set by a memory fault) */
813 /* Reload env after longjmp - the compiler may have smashed all
814 * local variables as longjmp is marked 'noreturn'. */
817 #if !(defined(CONFIG_USER_ONLY) && \
818 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
819 cc
= CPU_GET_CLASS(cpu
);
822 x86_cpu
= X86_CPU(cpu
);
825 spin_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
826 have_tb_lock
= false;
832 #if defined(TARGET_I386)
833 /* restore flags in standard format */
834 env
->eflags
= env
->eflags
| cpu_cc_compute_all(env
, CC_OP
)
835 | (env
->df
& DF_MASK
);
836 #elif defined(TARGET_ARM)
837 /* XXX: Save/restore host fpu exception state?. */
838 #elif defined(TARGET_UNICORE32)
839 #elif defined(TARGET_SPARC)
840 #elif defined(TARGET_PPC)
841 #elif defined(TARGET_LM32)
842 #elif defined(TARGET_M68K)
843 cpu_m68k_flush_flags(env
, env
->cc_op
);
844 env
->cc_op
= CC_OP_FLAGS
;
845 env
->sr
= (env
->sr
& 0xffe0)
846 | env
->cc_dest
| (env
->cc_x
<< 4);
847 #elif defined(TARGET_MICROBLAZE)
848 #elif defined(TARGET_MIPS)
849 #elif defined(TARGET_MOXIE)
850 #elif defined(TARGET_OPENRISC)
851 #elif defined(TARGET_SH4)
852 #elif defined(TARGET_ALPHA)
853 #elif defined(TARGET_CRIS)
854 #elif defined(TARGET_S390X)
855 #elif defined(TARGET_XTENSA)
858 #error unsupported target CPU
861 /* fail safe : never use current_cpu outside cpu_exec() */