2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
44 #define env cpu_single_env
47 int tb_invalidated_flag
;
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
52 int qemu_cpu_has_work(CPUState
*env
)
54 return cpu_has_work(env
);
57 void cpu_loop_exit(void)
59 env
->current_tb
= NULL
;
60 longjmp(env
->jmp_env
, 1);
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
68 #if !defined(CONFIG_SOFTMMU)
70 struct ucontext
*uc
= puc
;
71 #elif defined(__OpenBSD__)
72 struct sigcontext
*uc
= puc
;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
82 /* XXX: use siglongjmp ? */
84 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
85 #elif defined(__OpenBSD__)
86 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
90 env
->exception_index
= -1;
91 longjmp(env
->jmp_env
, 1);
94 /* Execute the code without caching the generated code. An interpreter
95 could be used if available. */
96 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
98 unsigned long next_tb
;
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles
> CF_COUNT_MASK
)
104 max_cycles
= CF_COUNT_MASK
;
106 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
108 env
->current_tb
= tb
;
109 /* execute the generated code */
110 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
111 env
->current_tb
= NULL
;
113 if ((next_tb
& 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 cpu_pc_from_tb(env
, tb
);
118 tb_phys_invalidate(tb
, -1);
122 static TranslationBlock
*tb_find_slow(target_ulong pc
,
123 target_ulong cs_base
,
126 TranslationBlock
*tb
, **ptb1
;
128 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
130 tb_invalidated_flag
= 0;
132 /* find translated block using physical mappings */
133 phys_pc
= get_phys_addr_code(env
, pc
);
134 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
136 h
= tb_phys_hash_func(phys_pc
);
137 ptb1
= &tb_phys_hash
[h
];
143 tb
->page_addr
[0] == phys_page1
&&
144 tb
->cs_base
== cs_base
&&
145 tb
->flags
== flags
) {
146 /* check next page if needed */
147 if (tb
->page_addr
[1] != -1) {
148 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
150 phys_page2
= get_phys_addr_code(env
, virt_page2
);
151 if (tb
->page_addr
[1] == phys_page2
)
157 ptb1
= &tb
->phys_hash_next
;
160 /* if no translated code available, then translate it now */
161 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
164 /* we add the TB in the virtual pc hash table */
165 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
169 static inline TranslationBlock
*tb_find_fast(void)
171 TranslationBlock
*tb
;
172 target_ulong cs_base
, pc
;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
178 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
179 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
180 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
181 tb
->flags
!= flags
)) {
182 tb
= tb_find_slow(pc
, cs_base
, flags
);
187 static CPUDebugExcpHandler
*debug_excp_handler
;
189 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
191 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
193 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUState
*env
)
201 if (!env
->watchpoint_hit
)
202 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
203 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
205 if (debug_excp_handler
)
206 debug_excp_handler(env
);
209 /* main execution loop */
211 int cpu_exec(CPUState
*env1
)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret
, interrupt_request
;
216 TranslationBlock
*tb
;
218 unsigned long next_tb
;
220 if (cpu_halted(env1
) == EXCP_HALTED
)
223 cpu_single_env
= env1
;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
230 #if defined(TARGET_I386)
231 /* put eflags in CPU temporary format */
232 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
233 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
234 CC_OP
= CC_OP_EFLAGS
;
235 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
236 #elif defined(TARGET_SPARC)
237 #elif defined(TARGET_M68K)
238 env
->cc_op
= CC_OP_FLAGS
;
239 env
->cc_dest
= env
->sr
& 0xf;
240 env
->cc_x
= (env
->sr
>> 4) & 1;
241 #elif defined(TARGET_ALPHA)
242 #elif defined(TARGET_ARM)
243 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MICROBLAZE)
245 #elif defined(TARGET_MIPS)
246 #elif defined(TARGET_SH4)
247 #elif defined(TARGET_CRIS)
248 #elif defined(TARGET_S390X)
251 #error unsupported target CPU
253 env
->exception_index
= -1;
255 /* prepare setjmp context for exception handling */
257 if (setjmp(env
->jmp_env
) == 0) {
258 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
260 env
= cpu_single_env
;
261 #define env cpu_single_env
263 /* if an exception is pending, we execute it here */
264 if (env
->exception_index
>= 0) {
265 if (env
->exception_index
>= EXCP_INTERRUPT
) {
266 /* exit request from the cpu execution loop */
267 ret
= env
->exception_index
;
268 if (ret
== EXCP_DEBUG
)
269 cpu_handle_debug_exception(env
);
272 #if defined(CONFIG_USER_ONLY)
273 /* if user mode only, we simulate a fake exception
274 which will be handled outside the cpu execution
276 #if defined(TARGET_I386)
277 do_interrupt_user(env
->exception_index
,
278 env
->exception_is_int
,
280 env
->exception_next_eip
);
281 /* successfully delivered */
282 env
->old_exception
= -1;
284 ret
= env
->exception_index
;
287 #if defined(TARGET_I386)
288 /* simulate a real cpu exception. On i386, it can
289 trigger new exceptions, but we do not handle
290 double or triple faults yet. */
291 do_interrupt(env
->exception_index
,
292 env
->exception_is_int
,
294 env
->exception_next_eip
, 0);
295 /* successfully delivered */
296 env
->old_exception
= -1;
297 #elif defined(TARGET_PPC)
299 #elif defined(TARGET_MICROBLAZE)
301 #elif defined(TARGET_MIPS)
303 #elif defined(TARGET_SPARC)
305 #elif defined(TARGET_ARM)
307 #elif defined(TARGET_SH4)
309 #elif defined(TARGET_ALPHA)
311 #elif defined(TARGET_CRIS)
313 #elif defined(TARGET_M68K)
316 env
->exception_index
= -1;
323 longjmp(env
->jmp_env
, 1);
326 next_tb
= 0; /* force lookup of first TB */
328 interrupt_request
= env
->interrupt_request
;
329 if (unlikely(interrupt_request
)) {
330 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
331 /* Mask out external interrupts for this step. */
332 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
337 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
338 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
339 env
->exception_index
= EXCP_DEBUG
;
342 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
343 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
344 defined(TARGET_MICROBLAZE)
345 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
346 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
348 env
->exception_index
= EXCP_HLT
;
352 #if defined(TARGET_I386)
353 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
354 svm_check_intercept(SVM_EXIT_INIT
);
356 env
->exception_index
= EXCP_HALTED
;
358 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
360 } else if (env
->hflags2
& HF2_GIF_MASK
) {
361 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
362 !(env
->hflags
& HF_SMM_MASK
)) {
363 svm_check_intercept(SVM_EXIT_SMI
);
364 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
367 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
368 !(env
->hflags2
& HF2_NMI_MASK
)) {
369 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
370 env
->hflags2
|= HF2_NMI_MASK
;
371 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
373 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
374 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
375 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
377 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
378 (((env
->hflags2
& HF2_VINTR_MASK
) &&
379 (env
->hflags2
& HF2_HIF_MASK
)) ||
380 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
381 (env
->eflags
& IF_MASK
&&
382 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
384 svm_check_intercept(SVM_EXIT_INTR
);
385 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
386 intno
= cpu_get_pic_interrupt(env
);
387 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
388 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
390 env
= cpu_single_env
;
391 #define env cpu_single_env
393 do_interrupt(intno
, 0, 0, 0, 1);
394 /* ensure that no TB jump will be modified as
395 the program flow was changed */
397 #if !defined(CONFIG_USER_ONLY)
398 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
399 (env
->eflags
& IF_MASK
) &&
400 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
402 /* FIXME: this should respect TPR */
403 svm_check_intercept(SVM_EXIT_VINTR
);
404 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
405 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
406 do_interrupt(intno
, 0, 0, 0, 1);
407 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
412 #elif defined(TARGET_PPC)
414 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
418 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
419 ppc_hw_interrupt(env
);
420 if (env
->pending_interrupts
== 0)
421 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
424 #elif defined(TARGET_MICROBLAZE)
425 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
426 && (env
->sregs
[SR_MSR
] & MSR_IE
)
427 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
428 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
429 env
->exception_index
= EXCP_IRQ
;
433 #elif defined(TARGET_MIPS)
434 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
435 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
436 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
437 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
438 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
439 !(env
->hflags
& MIPS_HFLAG_DM
)) {
441 env
->exception_index
= EXCP_EXT_INTERRUPT
;
446 #elif defined(TARGET_SPARC)
447 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
448 if (cpu_interrupts_enabled(env
) &&
449 env
->interrupt_index
> 0) {
450 int pil
= env
->interrupt_index
& 0xf;
451 int type
= env
->interrupt_index
& 0xf0;
453 if (((type
== TT_EXTINT
) &&
454 cpu_pil_allowed(env
, pil
)) ||
456 env
->exception_index
= env
->interrupt_index
;
461 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
462 //do_interrupt(0, 0, 0, 0, 0);
463 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
465 #elif defined(TARGET_ARM)
466 if (interrupt_request
& CPU_INTERRUPT_FIQ
467 && !(env
->uncached_cpsr
& CPSR_F
)) {
468 env
->exception_index
= EXCP_FIQ
;
472 /* ARMv7-M interrupt return works by loading a magic value
473 into the PC. On real hardware the load causes the
474 return to occur. The qemu implementation performs the
475 jump normally, then does the exception return when the
476 CPU tries to execute code at the magic address.
477 This will cause the magic PC value to be pushed to
478 the stack if an interrupt occured at the wrong time.
479 We avoid this by disabling interrupts when
480 pc contains a magic address. */
481 if (interrupt_request
& CPU_INTERRUPT_HARD
482 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
483 || !(env
->uncached_cpsr
& CPSR_I
))) {
484 env
->exception_index
= EXCP_IRQ
;
488 #elif defined(TARGET_SH4)
489 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
493 #elif defined(TARGET_ALPHA)
494 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
498 #elif defined(TARGET_CRIS)
499 if (interrupt_request
& CPU_INTERRUPT_HARD
500 && (env
->pregs
[PR_CCS
] & I_FLAG
)
501 && !env
->locked_irq
) {
502 env
->exception_index
= EXCP_IRQ
;
506 if (interrupt_request
& CPU_INTERRUPT_NMI
507 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
508 env
->exception_index
= EXCP_NMI
;
512 #elif defined(TARGET_M68K)
513 if (interrupt_request
& CPU_INTERRUPT_HARD
514 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
515 < env
->pending_level
) {
516 /* Real hardware gets the interrupt vector via an
517 IACK cycle at this point. Current emulated
518 hardware doesn't rely on this, so we
519 provide/save the vector when the interrupt is
521 env
->exception_index
= env
->pending_vector
;
526 /* Don't use the cached interupt_request value,
527 do_interrupt may have updated the EXITTB flag. */
528 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
529 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
530 /* ensure that no TB jump will be modified as
531 the program flow was changed */
535 if (unlikely(env
->exit_request
)) {
536 env
->exit_request
= 0;
537 env
->exception_index
= EXCP_INTERRUPT
;
540 #ifdef CONFIG_DEBUG_EXEC
541 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
542 /* restore flags in standard format */
543 #if defined(TARGET_I386)
544 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
545 log_cpu_state(env
, X86_DUMP_CCOP
);
546 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
547 #elif defined(TARGET_ARM)
548 log_cpu_state(env
, 0);
549 #elif defined(TARGET_SPARC)
550 log_cpu_state(env
, 0);
551 #elif defined(TARGET_PPC)
552 log_cpu_state(env
, 0);
553 #elif defined(TARGET_M68K)
554 cpu_m68k_flush_flags(env
, env
->cc_op
);
555 env
->cc_op
= CC_OP_FLAGS
;
556 env
->sr
= (env
->sr
& 0xffe0)
557 | env
->cc_dest
| (env
->cc_x
<< 4);
558 log_cpu_state(env
, 0);
559 #elif defined(TARGET_MICROBLAZE)
560 log_cpu_state(env
, 0);
561 #elif defined(TARGET_MIPS)
562 log_cpu_state(env
, 0);
563 #elif defined(TARGET_SH4)
564 log_cpu_state(env
, 0);
565 #elif defined(TARGET_ALPHA)
566 log_cpu_state(env
, 0);
567 #elif defined(TARGET_CRIS)
568 log_cpu_state(env
, 0);
570 #error unsupported target CPU
576 /* Note: we do it here to avoid a gcc bug on Mac OS X when
577 doing it in tb_find_slow */
578 if (tb_invalidated_flag
) {
579 /* as some TB could have been invalidated because
580 of memory exceptions while generating the code, we
581 must recompute the hash index here */
583 tb_invalidated_flag
= 0;
585 #ifdef CONFIG_DEBUG_EXEC
586 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
587 (long)tb
->tc_ptr
, tb
->pc
,
588 lookup_symbol(tb
->pc
));
590 /* see if we can patch the calling TB. When the TB
591 spans two pages, we cannot safely do a direct
593 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
594 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
596 spin_unlock(&tb_lock
);
598 /* cpu_interrupt might be called while translating the
599 TB, but before it is linked into a potentially
600 infinite loop and becomes env->current_tb. Avoid
601 starting execution if there is a pending interrupt. */
602 if (!unlikely (env
->exit_request
)) {
603 env
->current_tb
= tb
;
605 /* execute the generated code */
606 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
608 env
= cpu_single_env
;
609 #define env cpu_single_env
611 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
612 env
->current_tb
= NULL
;
613 if ((next_tb
& 3) == 2) {
614 /* Instruction counter expired. */
616 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
618 cpu_pc_from_tb(env
, tb
);
619 insns_left
= env
->icount_decr
.u32
;
620 if (env
->icount_extra
&& insns_left
>= 0) {
621 /* Refill decrementer and continue execution. */
622 env
->icount_extra
+= insns_left
;
623 if (env
->icount_extra
> 0xffff) {
626 insns_left
= env
->icount_extra
;
628 env
->icount_extra
-= insns_left
;
629 env
->icount_decr
.u16
.low
= insns_left
;
631 if (insns_left
> 0) {
632 /* Execute remaining instructions. */
633 cpu_exec_nocache(insns_left
, tb
);
635 env
->exception_index
= EXCP_INTERRUPT
;
641 /* reset soft MMU for next block (it can currently
642 only be set by a memory fault) */
648 #if defined(TARGET_I386)
649 /* restore flags in standard format */
650 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
651 #elif defined(TARGET_ARM)
652 /* XXX: Save/restore host fpu exception state?. */
653 #elif defined(TARGET_SPARC)
654 #elif defined(TARGET_PPC)
655 #elif defined(TARGET_M68K)
656 cpu_m68k_flush_flags(env
, env
->cc_op
);
657 env
->cc_op
= CC_OP_FLAGS
;
658 env
->sr
= (env
->sr
& 0xffe0)
659 | env
->cc_dest
| (env
->cc_x
<< 4);
660 #elif defined(TARGET_MICROBLAZE)
661 #elif defined(TARGET_MIPS)
662 #elif defined(TARGET_SH4)
663 #elif defined(TARGET_ALPHA)
664 #elif defined(TARGET_CRIS)
665 #elif defined(TARGET_S390X)
668 #error unsupported target CPU
671 /* restore global registers */
672 #include "hostregs_helper.h"
674 /* fail safe : never use cpu_single_env outside cpu_exec() */
675 cpu_single_env
= NULL
;
679 /* must only be called from the generated code as an exception can be
681 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
683 /* XXX: cannot enable it yet because it yields to MMU exception
684 where NIP != read address on PowerPC */
686 target_ulong phys_addr
;
687 phys_addr
= get_phys_addr_code(env
, start
);
688 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
692 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
694 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
696 CPUX86State
*saved_env
;
700 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
702 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
703 (selector
<< 4), 0xffff, 0);
705 helper_load_seg(seg_reg
, selector
);
710 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
712 CPUX86State
*saved_env
;
717 helper_fsave(ptr
, data32
);
722 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
724 CPUX86State
*saved_env
;
729 helper_frstor(ptr
, data32
);
734 #endif /* TARGET_I386 */
736 #if !defined(CONFIG_SOFTMMU)
738 #if defined(TARGET_I386)
739 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
741 #define EXCEPTION_ACTION cpu_loop_exit()
744 /* 'pc' is the host PC at which the exception was raised. 'address' is
745 the effective address of the memory exception. 'is_write' is 1 if a
746 write caused the exception and otherwise 0'. 'old_set' is the
747 signal set which should be restored */
748 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
749 int is_write
, sigset_t
*old_set
,
752 TranslationBlock
*tb
;
756 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
757 #if defined(DEBUG_SIGNAL)
758 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
759 pc
, address
, is_write
, *(unsigned long *)old_set
);
761 /* XXX: locking issue */
762 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
766 /* see if it is an MMU fault */
767 ret
= cpu_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
769 return 0; /* not an MMU fault */
771 return 1; /* the MMU fault was handled without causing real CPU fault */
772 /* now we have a real cpu fault */
775 /* the PC is inside the translated code. It means that we have
776 a virtual CPU fault */
777 cpu_restore_state(tb
, env
, pc
, puc
);
780 /* we restore the process signal mask as the sigreturn should
781 do it (XXX: use sigsetjmp) */
782 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
785 /* never comes here */
789 #if defined(__i386__)
791 #if defined(__APPLE__)
792 # include <sys/ucontext.h>
794 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
795 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
796 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
797 # define MASK_sig(context) ((context)->uc_sigmask)
798 #elif defined (__NetBSD__)
799 # include <ucontext.h>
801 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
802 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
803 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
804 # define MASK_sig(context) ((context)->uc_sigmask)
805 #elif defined (__FreeBSD__) || defined(__DragonFly__)
806 # include <ucontext.h>
808 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
809 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
810 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
811 # define MASK_sig(context) ((context)->uc_sigmask)
812 #elif defined(__OpenBSD__)
813 # define EIP_sig(context) ((context)->sc_eip)
814 # define TRAP_sig(context) ((context)->sc_trapno)
815 # define ERROR_sig(context) ((context)->sc_err)
816 # define MASK_sig(context) ((context)->sc_mask)
818 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
819 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
820 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
821 # define MASK_sig(context) ((context)->uc_sigmask)
824 int cpu_signal_handler(int host_signum
, void *pinfo
,
827 siginfo_t
*info
= pinfo
;
828 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
829 ucontext_t
*uc
= puc
;
830 #elif defined(__OpenBSD__)
831 struct sigcontext
*uc
= puc
;
833 struct ucontext
*uc
= puc
;
842 #define REG_TRAPNO TRAPNO
845 trapno
= TRAP_sig(uc
);
846 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
848 (ERROR_sig(uc
) >> 1) & 1 : 0,
852 #elif defined(__x86_64__)
855 #define PC_sig(context) _UC_MACHINE_PC(context)
856 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
857 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
858 #define MASK_sig(context) ((context)->uc_sigmask)
859 #elif defined(__OpenBSD__)
860 #define PC_sig(context) ((context)->sc_rip)
861 #define TRAP_sig(context) ((context)->sc_trapno)
862 #define ERROR_sig(context) ((context)->sc_err)
863 #define MASK_sig(context) ((context)->sc_mask)
864 #elif defined (__FreeBSD__) || defined(__DragonFly__)
865 #include <ucontext.h>
867 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
868 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
869 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
870 #define MASK_sig(context) ((context)->uc_sigmask)
872 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
873 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
874 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
875 #define MASK_sig(context) ((context)->uc_sigmask)
878 int cpu_signal_handler(int host_signum
, void *pinfo
,
881 siginfo_t
*info
= pinfo
;
883 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
884 ucontext_t
*uc
= puc
;
885 #elif defined(__OpenBSD__)
886 struct sigcontext
*uc
= puc
;
888 struct ucontext
*uc
= puc
;
892 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
893 TRAP_sig(uc
) == 0xe ?
894 (ERROR_sig(uc
) >> 1) & 1 : 0,
898 #elif defined(_ARCH_PPC)
900 /***********************************************************************
901 * signal context platform-specific definitions
905 /* All Registers access - only for local access */
906 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
907 /* Gpr Registers access */
908 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
909 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
910 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
911 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
912 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
913 # define LR_sig(context) REG_sig(link, context) /* Link register */
914 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
915 /* Float Registers access */
916 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
917 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
918 /* Exception Registers access */
919 # define DAR_sig(context) REG_sig(dar, context)
920 # define DSISR_sig(context) REG_sig(dsisr, context)
921 # define TRAP_sig(context) REG_sig(trap, context)
925 # include <sys/ucontext.h>
926 typedef struct ucontext SIGCONTEXT
;
927 /* All Registers access - only for local access */
928 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
929 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
930 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
931 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
932 /* Gpr Registers access */
933 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
934 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
935 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
936 # define CTR_sig(context) REG_sig(ctr, context)
937 # define XER_sig(context) REG_sig(xer, context) /* Link register */
938 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
939 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
940 /* Float Registers access */
941 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
942 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
943 /* Exception Registers access */
944 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
945 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
946 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
947 #endif /* __APPLE__ */
949 int cpu_signal_handler(int host_signum
, void *pinfo
,
952 siginfo_t
*info
= pinfo
;
953 struct ucontext
*uc
= puc
;
961 if (DSISR_sig(uc
) & 0x00800000)
964 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
967 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
968 is_write
, &uc
->uc_sigmask
, puc
);
971 #elif defined(__alpha__)
973 int cpu_signal_handler(int host_signum
, void *pinfo
,
976 siginfo_t
*info
= pinfo
;
977 struct ucontext
*uc
= puc
;
978 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
982 /* XXX: need kernel patch to get write flag faster */
983 switch (insn
>> 26) {
998 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
999 is_write
, &uc
->uc_sigmask
, puc
);
1001 #elif defined(__sparc__)
1003 int cpu_signal_handler(int host_signum
, void *pinfo
,
1006 siginfo_t
*info
= pinfo
;
1009 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1010 uint32_t *regs
= (uint32_t *)(info
+ 1);
1011 void *sigmask
= (regs
+ 20);
1012 /* XXX: is there a standard glibc define ? */
1013 unsigned long pc
= regs
[1];
1016 struct sigcontext
*sc
= puc
;
1017 unsigned long pc
= sc
->sigc_regs
.tpc
;
1018 void *sigmask
= (void *)sc
->sigc_mask
;
1019 #elif defined(__OpenBSD__)
1020 struct sigcontext
*uc
= puc
;
1021 unsigned long pc
= uc
->sc_pc
;
1022 void *sigmask
= (void *)(long)uc
->sc_mask
;
1026 /* XXX: need kernel patch to get write flag faster */
1028 insn
= *(uint32_t *)pc
;
1029 if ((insn
>> 30) == 3) {
1030 switch((insn
>> 19) & 0x3f) {
1054 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1055 is_write
, sigmask
, NULL
);
1058 #elif defined(__arm__)
1060 int cpu_signal_handler(int host_signum
, void *pinfo
,
1063 siginfo_t
*info
= pinfo
;
1064 struct ucontext
*uc
= puc
;
1068 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1069 pc
= uc
->uc_mcontext
.gregs
[R15
];
1071 pc
= uc
->uc_mcontext
.arm_pc
;
1073 /* XXX: compute is_write */
1075 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1077 &uc
->uc_sigmask
, puc
);
1080 #elif defined(__mc68000)
1082 int cpu_signal_handler(int host_signum
, void *pinfo
,
1085 siginfo_t
*info
= pinfo
;
1086 struct ucontext
*uc
= puc
;
1090 pc
= uc
->uc_mcontext
.gregs
[16];
1091 /* XXX: compute is_write */
1093 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1095 &uc
->uc_sigmask
, puc
);
1098 #elif defined(__ia64)
1101 /* This ought to be in <bits/siginfo.h>... */
1102 # define __ISR_VALID 1
1105 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1107 siginfo_t
*info
= pinfo
;
1108 struct ucontext
*uc
= puc
;
1112 ip
= uc
->uc_mcontext
.sc_ip
;
1113 switch (host_signum
) {
1119 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1120 /* ISR.W (write-access) is bit 33: */
1121 is_write
= (info
->si_isr
>> 33) & 1;
1127 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1129 &uc
->uc_sigmask
, puc
);
1132 #elif defined(__s390__)
1134 int cpu_signal_handler(int host_signum
, void *pinfo
,
1137 siginfo_t
*info
= pinfo
;
1138 struct ucontext
*uc
= puc
;
1142 pc
= uc
->uc_mcontext
.psw
.addr
;
1143 /* XXX: compute is_write */
1145 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1146 is_write
, &uc
->uc_sigmask
, puc
);
1149 #elif defined(__mips__)
1151 int cpu_signal_handler(int host_signum
, void *pinfo
,
1154 siginfo_t
*info
= pinfo
;
1155 struct ucontext
*uc
= puc
;
1156 greg_t pc
= uc
->uc_mcontext
.pc
;
1159 /* XXX: compute is_write */
1161 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1162 is_write
, &uc
->uc_sigmask
, puc
);
1165 #elif defined(__hppa__)
1167 int cpu_signal_handler(int host_signum
, void *pinfo
,
1170 struct siginfo
*info
= pinfo
;
1171 struct ucontext
*uc
= puc
;
1175 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1176 /* FIXME: compute is_write */
1178 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1180 &uc
->uc_sigmask
, puc
);
1185 #error host CPU specific signal handler needed
1189 #endif /* !defined(CONFIG_SOFTMMU) */