2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #if !defined(TARGET_IA64)
26 #include "qemu-barrier.h"
28 #if !defined(CONFIG_SOFTMMU)
40 #include <sys/ucontext.h>
46 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
49 #define env cpu_single_env
52 int tb_invalidated_flag
;
54 //#define CONFIG_DEBUG_EXEC
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState
*env
)
59 return cpu_has_work(env
);
62 void cpu_loop_exit(void)
64 env
->current_tb
= NULL
;
65 longjmp(env
->jmp_env
, 1);
68 /* exit the current TB from a signal handler. The host registers are
69 restored in a state compatible with the CPU emulator
71 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
73 #if !defined(CONFIG_SOFTMMU)
75 struct ucontext
*uc
= puc
;
76 #elif defined(__OpenBSD__)
77 struct sigcontext
*uc
= puc
;
83 /* XXX: restore cpu registers saved in host registers */
85 #if !defined(CONFIG_SOFTMMU)
87 /* XXX: use siglongjmp ? */
90 sigprocmask(SIG_SETMASK
, (sigset_t
*)&uc
->uc_sigmask
, NULL
);
92 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
94 #elif defined(__OpenBSD__)
95 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
99 env
->exception_index
= -1;
100 longjmp(env
->jmp_env
, 1);
103 /* Execute the code without caching the generated code. An interpreter
104 could be used if available. */
105 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
107 unsigned long next_tb
;
108 TranslationBlock
*tb
;
110 /* Should never happen.
111 We only end up here when an existing TB is too long. */
112 if (max_cycles
> CF_COUNT_MASK
)
113 max_cycles
= CF_COUNT_MASK
;
115 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
117 env
->current_tb
= tb
;
118 /* execute the generated code */
119 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
120 env
->current_tb
= NULL
;
122 if ((next_tb
& 3) == 2) {
123 /* Restore PC. This may happen if async event occurs before
124 the TB starts executing. */
125 cpu_pc_from_tb(env
, tb
);
127 tb_phys_invalidate(tb
, -1);
131 static TranslationBlock
*tb_find_slow(target_ulong pc
,
132 target_ulong cs_base
,
135 TranslationBlock
*tb
, **ptb1
;
137 tb_page_addr_t phys_pc
, phys_page1
, phys_page2
;
138 target_ulong virt_page2
;
140 tb_invalidated_flag
= 0;
142 /* find translated block using physical mappings */
143 phys_pc
= get_page_addr_code(env
, pc
);
144 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
146 h
= tb_phys_hash_func(phys_pc
);
147 ptb1
= &tb_phys_hash
[h
];
153 tb
->page_addr
[0] == phys_page1
&&
154 tb
->cs_base
== cs_base
&&
155 tb
->flags
== flags
) {
156 /* check next page if needed */
157 if (tb
->page_addr
[1] != -1) {
158 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
160 phys_page2
= get_page_addr_code(env
, virt_page2
);
161 if (tb
->page_addr
[1] == phys_page2
)
167 ptb1
= &tb
->phys_hash_next
;
170 /* if no translated code available, then translate it now */
171 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
174 /* Move the last found TB to the head of the list */
176 *ptb1
= tb
->phys_hash_next
;
177 tb
->phys_hash_next
= tb_phys_hash
[h
];
178 tb_phys_hash
[h
] = tb
;
180 /* we add the TB in the virtual pc hash table */
181 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
185 static inline TranslationBlock
*tb_find_fast(void)
187 TranslationBlock
*tb
;
188 target_ulong cs_base
, pc
;
191 /* we record a subset of the CPU state. It will
192 always be the same before a given translated block
194 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
195 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
196 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
197 tb
->flags
!= flags
)) {
198 tb
= tb_find_slow(pc
, cs_base
, flags
);
203 static CPUDebugExcpHandler
*debug_excp_handler
;
205 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
207 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
209 debug_excp_handler
= handler
;
213 static void cpu_handle_debug_exception(CPUState
*env
)
217 if (!env
->watchpoint_hit
) {
218 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
219 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
222 if (debug_excp_handler
) {
223 debug_excp_handler(env
);
227 /* main execution loop */
229 volatile sig_atomic_t exit_request
;
231 int cpu_exec(CPUState
*env1
)
233 volatile host_reg_t saved_env_reg
;
234 int ret
, interrupt_request
;
235 TranslationBlock
*tb
;
237 unsigned long next_tb
;
240 if (!cpu_has_work(env1
)) {
247 cpu_single_env
= env1
;
249 /* the access to env below is actually saving the global register's
250 value, so that files not including target-xyz/exec.h are free to
252 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg
) != sizeof (env
));
253 saved_env_reg
= (host_reg_t
) env
;
257 if (unlikely(exit_request
)) {
258 env
->exit_request
= 1;
261 #if defined(TARGET_I386)
262 /* put eflags in CPU temporary format */
263 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
264 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
265 CC_OP
= CC_OP_EFLAGS
;
266 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
267 #elif defined(TARGET_SPARC)
268 #elif defined(TARGET_M68K)
269 env
->cc_op
= CC_OP_FLAGS
;
270 env
->cc_dest
= env
->sr
& 0xf;
271 env
->cc_x
= (env
->sr
>> 4) & 1;
272 #elif defined(TARGET_ALPHA)
273 #elif defined(TARGET_ARM)
274 #elif defined(TARGET_UNICORE32)
275 #elif defined(TARGET_PPC)
276 #elif defined(TARGET_LM32)
277 #elif defined(TARGET_MICROBLAZE)
278 #elif defined(TARGET_MIPS)
279 #elif defined(TARGET_SH4)
280 #elif defined(TARGET_CRIS)
281 #elif defined(TARGET_S390X)
282 #elif defined(TARGET_IA64)
285 #error unsupported target CPU
287 env
->exception_index
= -1;
289 /* prepare setjmp context for exception handling */
291 if (setjmp(env
->jmp_env
) == 0) {
292 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
294 env
= cpu_single_env
;
295 #define env cpu_single_env
297 /* if an exception is pending, we execute it here */
298 if (env
->exception_index
>= 0) {
299 if (env
->exception_index
>= EXCP_INTERRUPT
) {
300 /* exit request from the cpu execution loop */
301 ret
= env
->exception_index
;
302 if (ret
== EXCP_DEBUG
) {
303 cpu_handle_debug_exception(env
);
307 #if defined(CONFIG_USER_ONLY)
308 /* if user mode only, we simulate a fake exception
309 which will be handled outside the cpu execution
311 #if defined(TARGET_I386)
312 do_interrupt_user(env
->exception_index
,
313 env
->exception_is_int
,
315 env
->exception_next_eip
);
316 /* successfully delivered */
317 env
->old_exception
= -1;
319 ret
= env
->exception_index
;
322 #if defined(TARGET_I386)
323 /* simulate a real cpu exception. On i386, it can
324 trigger new exceptions, but we do not handle
325 double or triple faults yet. */
326 do_interrupt(env
->exception_index
,
327 env
->exception_is_int
,
329 env
->exception_next_eip
, 0);
330 /* successfully delivered */
331 env
->old_exception
= -1;
332 #elif defined(TARGET_PPC)
334 #elif defined(TARGET_LM32)
336 #elif defined(TARGET_MICROBLAZE)
338 #elif defined(TARGET_MIPS)
340 #elif defined(TARGET_SPARC)
342 #elif defined(TARGET_ARM)
344 #elif defined(TARGET_UNICORE32)
346 #elif defined(TARGET_SH4)
348 #elif defined(TARGET_ALPHA)
350 #elif defined(TARGET_CRIS)
352 #elif defined(TARGET_M68K)
354 #elif defined(TARGET_IA64)
356 #elif defined(TARGET_S390X)
359 env
->exception_index
= -1;
364 next_tb
= 0; /* force lookup of first TB */
366 interrupt_request
= env
->interrupt_request
;
367 if (unlikely(interrupt_request
)) {
368 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
369 /* Mask out external interrupts for this step. */
370 interrupt_request
&= ~CPU_INTERRUPT_SSTEP_MASK
;
372 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
373 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
374 env
->exception_index
= EXCP_DEBUG
;
377 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
378 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
379 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
380 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
381 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
383 env
->exception_index
= EXCP_HLT
;
387 #if defined(TARGET_I386)
388 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
389 svm_check_intercept(SVM_EXIT_INIT
);
391 env
->exception_index
= EXCP_HALTED
;
393 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
395 } else if (env
->hflags2
& HF2_GIF_MASK
) {
396 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
397 !(env
->hflags
& HF_SMM_MASK
)) {
398 svm_check_intercept(SVM_EXIT_SMI
);
399 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
402 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
403 !(env
->hflags2
& HF2_NMI_MASK
)) {
404 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
405 env
->hflags2
|= HF2_NMI_MASK
;
406 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
408 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
409 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
410 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
412 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
413 (((env
->hflags2
& HF2_VINTR_MASK
) &&
414 (env
->hflags2
& HF2_HIF_MASK
)) ||
415 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
416 (env
->eflags
& IF_MASK
&&
417 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
419 svm_check_intercept(SVM_EXIT_INTR
);
420 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
421 intno
= cpu_get_pic_interrupt(env
);
422 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
423 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
425 env
= cpu_single_env
;
426 #define env cpu_single_env
428 do_interrupt(intno
, 0, 0, 0, 1);
429 /* ensure that no TB jump will be modified as
430 the program flow was changed */
432 #if !defined(CONFIG_USER_ONLY)
433 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
434 (env
->eflags
& IF_MASK
) &&
435 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
437 /* FIXME: this should respect TPR */
438 svm_check_intercept(SVM_EXIT_VINTR
);
439 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
440 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
441 do_interrupt(intno
, 0, 0, 0, 1);
442 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
447 #elif defined(TARGET_PPC)
449 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
453 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
454 ppc_hw_interrupt(env
);
455 if (env
->pending_interrupts
== 0)
456 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
459 #elif defined(TARGET_LM32)
460 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
461 && (env
->ie
& IE_IE
)) {
462 env
->exception_index
= EXCP_IRQ
;
466 #elif defined(TARGET_MICROBLAZE)
467 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
468 && (env
->sregs
[SR_MSR
] & MSR_IE
)
469 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
470 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
471 env
->exception_index
= EXCP_IRQ
;
475 #elif defined(TARGET_MIPS)
476 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
477 cpu_mips_hw_interrupts_pending(env
)) {
479 env
->exception_index
= EXCP_EXT_INTERRUPT
;
484 #elif defined(TARGET_SPARC)
485 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
486 if (cpu_interrupts_enabled(env
) &&
487 env
->interrupt_index
> 0) {
488 int pil
= env
->interrupt_index
& 0xf;
489 int type
= env
->interrupt_index
& 0xf0;
491 if (((type
== TT_EXTINT
) &&
492 cpu_pil_allowed(env
, pil
)) ||
494 env
->exception_index
= env
->interrupt_index
;
500 #elif defined(TARGET_ARM)
501 if (interrupt_request
& CPU_INTERRUPT_FIQ
502 && !(env
->uncached_cpsr
& CPSR_F
)) {
503 env
->exception_index
= EXCP_FIQ
;
507 /* ARMv7-M interrupt return works by loading a magic value
508 into the PC. On real hardware the load causes the
509 return to occur. The qemu implementation performs the
510 jump normally, then does the exception return when the
511 CPU tries to execute code at the magic address.
512 This will cause the magic PC value to be pushed to
513 the stack if an interrupt occurred at the wrong time.
514 We avoid this by disabling interrupts when
515 pc contains a magic address. */
516 if (interrupt_request
& CPU_INTERRUPT_HARD
517 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
518 || !(env
->uncached_cpsr
& CPSR_I
))) {
519 env
->exception_index
= EXCP_IRQ
;
523 #elif defined(TARGET_UNICORE32)
524 if (interrupt_request
& CPU_INTERRUPT_HARD
525 && !(env
->uncached_asr
& ASR_I
)) {
529 #elif defined(TARGET_SH4)
530 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
534 #elif defined(TARGET_ALPHA)
535 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
539 #elif defined(TARGET_CRIS)
540 if (interrupt_request
& CPU_INTERRUPT_HARD
541 && (env
->pregs
[PR_CCS
] & I_FLAG
)
542 && !env
->locked_irq
) {
543 env
->exception_index
= EXCP_IRQ
;
547 if (interrupt_request
& CPU_INTERRUPT_NMI
548 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
549 env
->exception_index
= EXCP_NMI
;
553 #elif defined(TARGET_M68K)
554 if (interrupt_request
& CPU_INTERRUPT_HARD
555 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
556 < env
->pending_level
) {
557 /* Real hardware gets the interrupt vector via an
558 IACK cycle at this point. Current emulated
559 hardware doesn't rely on this, so we
560 provide/save the vector when the interrupt is
562 env
->exception_index
= env
->pending_vector
;
566 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
567 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
568 (env
->psw
.mask
& PSW_MASK_EXT
)) {
573 /* Don't use the cached interrupt_request value,
574 do_interrupt may have updated the EXITTB flag. */
575 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
576 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
577 /* ensure that no TB jump will be modified as
578 the program flow was changed */
582 if (unlikely(env
->exit_request
)) {
583 env
->exit_request
= 0;
584 env
->exception_index
= EXCP_INTERRUPT
;
587 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
588 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
589 /* restore flags in standard format */
590 #if defined(TARGET_I386)
591 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
592 log_cpu_state(env
, X86_DUMP_CCOP
);
593 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
594 #elif defined(TARGET_M68K)
595 cpu_m68k_flush_flags(env
, env
->cc_op
);
596 env
->cc_op
= CC_OP_FLAGS
;
597 env
->sr
= (env
->sr
& 0xffe0)
598 | env
->cc_dest
| (env
->cc_x
<< 4);
599 log_cpu_state(env
, 0);
601 log_cpu_state(env
, 0);
604 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
607 /* Note: we do it here to avoid a gcc bug on Mac OS X when
608 doing it in tb_find_slow */
609 if (tb_invalidated_flag
) {
610 /* as some TB could have been invalidated because
611 of memory exceptions while generating the code, we
612 must recompute the hash index here */
614 tb_invalidated_flag
= 0;
616 #ifdef CONFIG_DEBUG_EXEC
617 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
618 (long)tb
->tc_ptr
, tb
->pc
,
619 lookup_symbol(tb
->pc
));
621 /* see if we can patch the calling TB. When the TB
622 spans two pages, we cannot safely do a direct
624 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
625 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
627 spin_unlock(&tb_lock
);
629 /* cpu_interrupt might be called while translating the
630 TB, but before it is linked into a potentially
631 infinite loop and becomes env->current_tb. Avoid
632 starting execution if there is a pending interrupt. */
633 env
->current_tb
= tb
;
635 if (likely(!env
->exit_request
)) {
637 /* execute the generated code */
638 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
640 env
= cpu_single_env
;
641 #define env cpu_single_env
643 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
644 if ((next_tb
& 3) == 2) {
645 /* Instruction counter expired. */
647 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
649 cpu_pc_from_tb(env
, tb
);
650 insns_left
= env
->icount_decr
.u32
;
651 if (env
->icount_extra
&& insns_left
>= 0) {
652 /* Refill decrementer and continue execution. */
653 env
->icount_extra
+= insns_left
;
654 if (env
->icount_extra
> 0xffff) {
657 insns_left
= env
->icount_extra
;
659 env
->icount_extra
-= insns_left
;
660 env
->icount_decr
.u16
.low
= insns_left
;
662 if (insns_left
> 0) {
663 /* Execute remaining instructions. */
664 cpu_exec_nocache(insns_left
, tb
);
666 env
->exception_index
= EXCP_INTERRUPT
;
672 env
->current_tb
= NULL
;
673 /* reset soft MMU for next block (it can currently
674 only be set by a memory fault) */
680 #if defined(TARGET_I386)
681 /* restore flags in standard format */
682 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
683 #elif defined(TARGET_ARM)
684 /* XXX: Save/restore host fpu exception state?. */
685 #elif defined(TARGET_UNICORE32)
686 #elif defined(TARGET_SPARC)
687 #elif defined(TARGET_PPC)
688 #elif defined(TARGET_LM32)
689 #elif defined(TARGET_M68K)
690 cpu_m68k_flush_flags(env
, env
->cc_op
);
691 env
->cc_op
= CC_OP_FLAGS
;
692 env
->sr
= (env
->sr
& 0xffe0)
693 | env
->cc_dest
| (env
->cc_x
<< 4);
694 #elif defined(TARGET_MICROBLAZE)
695 #elif defined(TARGET_MIPS)
696 #elif defined(TARGET_SH4)
697 #elif defined(TARGET_IA64)
698 #elif defined(TARGET_ALPHA)
699 #elif defined(TARGET_CRIS)
700 #elif defined(TARGET_S390X)
703 #error unsupported target CPU
706 /* restore global registers */
708 env
= (void *) saved_env_reg
;
710 /* fail safe : never use cpu_single_env outside cpu_exec() */
711 cpu_single_env
= NULL
;
715 /* must only be called from the generated code as an exception can be
717 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
719 /* XXX: cannot enable it yet because it yields to MMU exception
720 where NIP != read address on PowerPC */
722 target_ulong phys_addr
;
723 phys_addr
= get_phys_addr_code(env
, start
);
724 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
728 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
730 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
732 CPUX86State
*saved_env
;
736 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
738 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
739 (selector
<< 4), 0xffff, 0);
741 helper_load_seg(seg_reg
, selector
);
746 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
748 CPUX86State
*saved_env
;
753 helper_fsave(ptr
, data32
);
758 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
760 CPUX86State
*saved_env
;
765 helper_frstor(ptr
, data32
);
770 #endif /* TARGET_I386 */
772 #if !defined(CONFIG_SOFTMMU)
774 #if defined(TARGET_I386)
775 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
777 #define EXCEPTION_ACTION cpu_loop_exit()
780 /* 'pc' is the host PC at which the exception was raised. 'address' is
781 the effective address of the memory exception. 'is_write' is 1 if a
782 write caused the exception and otherwise 0'. 'old_set' is the
783 signal set which should be restored */
784 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
785 int is_write
, sigset_t
*old_set
,
788 TranslationBlock
*tb
;
792 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
793 #if defined(DEBUG_SIGNAL)
794 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
795 pc
, address
, is_write
, *(unsigned long *)old_set
);
797 /* XXX: locking issue */
798 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
802 /* see if it is an MMU fault */
803 ret
= cpu_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
805 return 0; /* not an MMU fault */
807 return 1; /* the MMU fault was handled without causing real CPU fault */
808 /* now we have a real cpu fault */
811 /* the PC is inside the translated code. It means that we have
812 a virtual CPU fault */
813 cpu_restore_state(tb
, env
, pc
);
816 /* we restore the process signal mask as the sigreturn should
817 do it (XXX: use sigsetjmp) */
818 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
821 /* never comes here */
825 #if defined(__i386__)
827 #if defined(__APPLE__)
828 # include <sys/ucontext.h>
830 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
831 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
832 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
833 # define MASK_sig(context) ((context)->uc_sigmask)
834 #elif defined (__NetBSD__)
835 # include <ucontext.h>
837 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
838 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
839 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
840 # define MASK_sig(context) ((context)->uc_sigmask)
841 #elif defined (__FreeBSD__) || defined(__DragonFly__)
842 # include <ucontext.h>
844 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
845 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
846 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
847 # define MASK_sig(context) ((context)->uc_sigmask)
848 #elif defined(__OpenBSD__)
849 # define EIP_sig(context) ((context)->sc_eip)
850 # define TRAP_sig(context) ((context)->sc_trapno)
851 # define ERROR_sig(context) ((context)->sc_err)
852 # define MASK_sig(context) ((context)->sc_mask)
854 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
855 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
856 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
857 # define MASK_sig(context) ((context)->uc_sigmask)
860 int cpu_signal_handler(int host_signum
, void *pinfo
,
863 siginfo_t
*info
= pinfo
;
864 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
865 ucontext_t
*uc
= puc
;
866 #elif defined(__OpenBSD__)
867 struct sigcontext
*uc
= puc
;
869 struct ucontext
*uc
= puc
;
878 #define REG_TRAPNO TRAPNO
881 trapno
= TRAP_sig(uc
);
882 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
884 (ERROR_sig(uc
) >> 1) & 1 : 0,
888 #elif defined(__x86_64__)
891 #define PC_sig(context) _UC_MACHINE_PC(context)
892 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
893 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
894 #define MASK_sig(context) ((context)->uc_sigmask)
895 #elif defined(__OpenBSD__)
896 #define PC_sig(context) ((context)->sc_rip)
897 #define TRAP_sig(context) ((context)->sc_trapno)
898 #define ERROR_sig(context) ((context)->sc_err)
899 #define MASK_sig(context) ((context)->sc_mask)
900 #elif defined (__FreeBSD__) || defined(__DragonFly__)
901 #include <ucontext.h>
903 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
904 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
905 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
906 #define MASK_sig(context) ((context)->uc_sigmask)
908 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
909 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
910 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
911 #define MASK_sig(context) ((context)->uc_sigmask)
914 int cpu_signal_handler(int host_signum
, void *pinfo
,
917 siginfo_t
*info
= pinfo
;
919 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
920 ucontext_t
*uc
= puc
;
921 #elif defined(__OpenBSD__)
922 struct sigcontext
*uc
= puc
;
924 struct ucontext
*uc
= puc
;
928 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
929 TRAP_sig(uc
) == 0xe ?
930 (ERROR_sig(uc
) >> 1) & 1 : 0,
934 #elif defined(_ARCH_PPC)
936 /***********************************************************************
937 * signal context platform-specific definitions
941 /* All Registers access - only for local access */
942 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
943 /* Gpr Registers access */
944 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
945 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
946 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
947 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
948 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
949 # define LR_sig(context) REG_sig(link, context) /* Link register */
950 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
951 /* Float Registers access */
952 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
953 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
954 /* Exception Registers access */
955 # define DAR_sig(context) REG_sig(dar, context)
956 # define DSISR_sig(context) REG_sig(dsisr, context)
957 # define TRAP_sig(context) REG_sig(trap, context)
960 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
961 #include <ucontext.h>
962 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
963 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
964 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
965 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
966 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
967 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
968 /* Exception Registers access */
969 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
970 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
971 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
972 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
975 # include <sys/ucontext.h>
976 typedef struct ucontext SIGCONTEXT
;
977 /* All Registers access - only for local access */
978 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
979 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
980 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
981 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
982 /* Gpr Registers access */
983 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
984 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
985 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
986 # define CTR_sig(context) REG_sig(ctr, context)
987 # define XER_sig(context) REG_sig(xer, context) /* Link register */
988 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
989 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
990 /* Float Registers access */
991 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
992 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
993 /* Exception Registers access */
994 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
995 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
996 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
997 #endif /* __APPLE__ */
999 int cpu_signal_handler(int host_signum
, void *pinfo
,
1002 siginfo_t
*info
= pinfo
;
1003 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1004 ucontext_t
*uc
= puc
;
1006 struct ucontext
*uc
= puc
;
1015 if (DSISR_sig(uc
) & 0x00800000)
1018 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1021 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1022 is_write
, &uc
->uc_sigmask
, puc
);
1025 #elif defined(__alpha__)
1027 int cpu_signal_handler(int host_signum
, void *pinfo
,
1030 siginfo_t
*info
= pinfo
;
1031 struct ucontext
*uc
= puc
;
1032 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1033 uint32_t insn
= *pc
;
1036 /* XXX: need kernel patch to get write flag faster */
1037 switch (insn
>> 26) {
1052 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1053 is_write
, &uc
->uc_sigmask
, puc
);
1055 #elif defined(__sparc__)
1057 int cpu_signal_handler(int host_signum
, void *pinfo
,
1060 siginfo_t
*info
= pinfo
;
1063 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1064 uint32_t *regs
= (uint32_t *)(info
+ 1);
1065 void *sigmask
= (regs
+ 20);
1066 /* XXX: is there a standard glibc define ? */
1067 unsigned long pc
= regs
[1];
1070 struct sigcontext
*sc
= puc
;
1071 unsigned long pc
= sc
->sigc_regs
.tpc
;
1072 void *sigmask
= (void *)sc
->sigc_mask
;
1073 #elif defined(__OpenBSD__)
1074 struct sigcontext
*uc
= puc
;
1075 unsigned long pc
= uc
->sc_pc
;
1076 void *sigmask
= (void *)(long)uc
->sc_mask
;
1080 /* XXX: need kernel patch to get write flag faster */
1082 insn
= *(uint32_t *)pc
;
1083 if ((insn
>> 30) == 3) {
1084 switch((insn
>> 19) & 0x3f) {
1108 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1109 is_write
, sigmask
, NULL
);
1112 #elif defined(__arm__)
1114 int cpu_signal_handler(int host_signum
, void *pinfo
,
1117 siginfo_t
*info
= pinfo
;
1118 struct ucontext
*uc
= puc
;
1122 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1123 pc
= uc
->uc_mcontext
.gregs
[R15
];
1125 pc
= uc
->uc_mcontext
.arm_pc
;
1127 /* XXX: compute is_write */
1129 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1131 &uc
->uc_sigmask
, puc
);
1134 #elif defined(__mc68000)
1136 int cpu_signal_handler(int host_signum
, void *pinfo
,
1139 siginfo_t
*info
= pinfo
;
1140 struct ucontext
*uc
= puc
;
1144 pc
= uc
->uc_mcontext
.gregs
[16];
1145 /* XXX: compute is_write */
1147 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1149 &uc
->uc_sigmask
, puc
);
1152 #elif defined(__ia64)
1155 /* This ought to be in <bits/siginfo.h>... */
1156 # define __ISR_VALID 1
1159 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1161 siginfo_t
*info
= pinfo
;
1162 struct ucontext
*uc
= puc
;
1166 ip
= uc
->uc_mcontext
.sc_ip
;
1167 switch (host_signum
) {
1173 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1174 /* ISR.W (write-access) is bit 33: */
1175 is_write
= (info
->si_isr
>> 33) & 1;
1181 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1183 (sigset_t
*)&uc
->uc_sigmask
, puc
);
1186 #elif defined(__s390__)
1188 int cpu_signal_handler(int host_signum
, void *pinfo
,
1191 siginfo_t
*info
= pinfo
;
1192 struct ucontext
*uc
= puc
;
1197 pc
= uc
->uc_mcontext
.psw
.addr
;
1199 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1200 of the normal 2 arguments. The 3rd argument contains the "int_code"
1201 from the hardware which does in fact contain the is_write value.
1202 The rt signal handler, as far as I can tell, does not give this value
1203 at all. Not that we could get to it from here even if it were. */
1204 /* ??? This is not even close to complete, since it ignores all
1205 of the read-modify-write instructions. */
1206 pinsn
= (uint16_t *)pc
;
1207 switch (pinsn
[0] >> 8) {
1209 case 0x42: /* STC */
1210 case 0x40: /* STH */
1213 case 0xc4: /* RIL format insns */
1214 switch (pinsn
[0] & 0xf) {
1215 case 0xf: /* STRL */
1216 case 0xb: /* STGRL */
1217 case 0x7: /* STHRL */
1221 case 0xe3: /* RXY format insns */
1222 switch (pinsn
[2] & 0xff) {
1223 case 0x50: /* STY */
1224 case 0x24: /* STG */
1225 case 0x72: /* STCY */
1226 case 0x70: /* STHY */
1227 case 0x8e: /* STPQ */
1228 case 0x3f: /* STRVH */
1229 case 0x3e: /* STRV */
1230 case 0x2f: /* STRVG */
1235 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1236 is_write
, &uc
->uc_sigmask
, puc
);
1239 #elif defined(__mips__)
1241 int cpu_signal_handler(int host_signum
, void *pinfo
,
1244 siginfo_t
*info
= pinfo
;
1245 struct ucontext
*uc
= puc
;
1246 greg_t pc
= uc
->uc_mcontext
.pc
;
1249 /* XXX: compute is_write */
1251 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1252 is_write
, &uc
->uc_sigmask
, puc
);
1255 #elif defined(__hppa__)
1257 int cpu_signal_handler(int host_signum
, void *pinfo
,
1260 struct siginfo
*info
= pinfo
;
1261 struct ucontext
*uc
= puc
;
1262 unsigned long pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1263 uint32_t insn
= *(uint32_t *)pc
;
1266 /* XXX: need kernel patch to get write flag faster. */
1267 switch (insn
>> 26) {
1268 case 0x1a: /* STW */
1269 case 0x19: /* STH */
1270 case 0x18: /* STB */
1271 case 0x1b: /* STWM */
1275 case 0x09: /* CSTWX, FSTWX, FSTWS */
1276 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1277 /* Distinguish from coprocessor load ... */
1278 is_write
= (insn
>> 9) & 1;
1282 switch ((insn
>> 6) & 15) {
1283 case 0xa: /* STWS */
1284 case 0x9: /* STHS */
1285 case 0x8: /* STBS */
1286 case 0xe: /* STWAS */
1287 case 0xc: /* STBYS */
1293 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1294 is_write
, &uc
->uc_sigmask
, puc
);
1299 #error host CPU specific signal handler needed
1303 #endif /* !defined(CONFIG_SOFTMMU) */