2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
44 #define env cpu_single_env
47 int tb_invalidated_flag
;
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
52 int qemu_cpu_has_work(CPUState
*env
)
54 return cpu_has_work(env
);
57 void cpu_loop_exit(void)
59 env
->current_tb
= NULL
;
60 longjmp(env
->jmp_env
, 1);
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
68 #if !defined(CONFIG_SOFTMMU)
70 struct ucontext
*uc
= puc
;
71 #elif defined(__OpenBSD__)
72 struct sigcontext
*uc
= puc
;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
82 /* XXX: use siglongjmp ? */
84 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
85 #elif defined(__OpenBSD__)
86 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
90 env
->exception_index
= -1;
91 longjmp(env
->jmp_env
, 1);
94 /* Execute the code without caching the generated code. An interpreter
95 could be used if available. */
96 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
98 unsigned long next_tb
;
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles
> CF_COUNT_MASK
)
104 max_cycles
= CF_COUNT_MASK
;
106 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
108 env
->current_tb
= tb
;
109 /* execute the generated code */
110 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
111 env
->current_tb
= NULL
;
113 if ((next_tb
& 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 cpu_pc_from_tb(env
, tb
);
118 tb_phys_invalidate(tb
, -1);
122 static TranslationBlock
*tb_find_slow(target_ulong pc
,
123 target_ulong cs_base
,
126 TranslationBlock
*tb
, **ptb1
;
128 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
130 tb_invalidated_flag
= 0;
132 /* find translated block using physical mappings */
133 phys_pc
= get_phys_addr_code(env
, pc
);
134 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
136 h
= tb_phys_hash_func(phys_pc
);
137 ptb1
= &tb_phys_hash
[h
];
143 tb
->page_addr
[0] == phys_page1
&&
144 tb
->cs_base
== cs_base
&&
145 tb
->flags
== flags
) {
146 /* check next page if needed */
147 if (tb
->page_addr
[1] != -1) {
148 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
150 phys_page2
= get_phys_addr_code(env
, virt_page2
);
151 if (tb
->page_addr
[1] == phys_page2
)
157 ptb1
= &tb
->phys_hash_next
;
160 /* if no translated code available, then translate it now */
161 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
164 /* we add the TB in the virtual pc hash table */
165 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
169 static inline TranslationBlock
*tb_find_fast(void)
171 TranslationBlock
*tb
;
172 target_ulong cs_base
, pc
;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
178 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
179 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
180 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
181 tb
->flags
!= flags
)) {
182 tb
= tb_find_slow(pc
, cs_base
, flags
);
187 static CPUDebugExcpHandler
*debug_excp_handler
;
189 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
191 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
193 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUState
*env
)
201 if (!env
->watchpoint_hit
)
202 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
203 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
205 if (debug_excp_handler
)
206 debug_excp_handler(env
);
209 /* main execution loop */
211 int cpu_exec(CPUState
*env1
)
213 volatile host_reg_t saved_env_reg
;
214 int ret
, interrupt_request
;
215 TranslationBlock
*tb
;
217 unsigned long next_tb
;
219 if (cpu_halted(env1
) == EXCP_HALTED
)
222 cpu_single_env
= env1
;
224 /* the access to env below is actually saving the global register's
225 value, so that files not including target-xyz/exec.h are free to
227 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg
) != sizeof (env
));
228 saved_env_reg
= (host_reg_t
) env
;
232 #if defined(TARGET_I386)
233 if (!kvm_enabled()) {
234 /* put eflags in CPU temporary format */
235 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
236 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
237 CC_OP
= CC_OP_EFLAGS
;
238 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
240 #elif defined(TARGET_SPARC)
241 #elif defined(TARGET_M68K)
242 env
->cc_op
= CC_OP_FLAGS
;
243 env
->cc_dest
= env
->sr
& 0xf;
244 env
->cc_x
= (env
->sr
>> 4) & 1;
245 #elif defined(TARGET_ALPHA)
246 #elif defined(TARGET_ARM)
247 #elif defined(TARGET_PPC)
248 #elif defined(TARGET_MICROBLAZE)
249 #elif defined(TARGET_MIPS)
250 #elif defined(TARGET_SH4)
251 #elif defined(TARGET_CRIS)
252 #elif defined(TARGET_S390X)
255 #error unsupported target CPU
257 env
->exception_index
= -1;
259 /* prepare setjmp context for exception handling */
261 if (setjmp(env
->jmp_env
) == 0) {
262 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
264 env
= cpu_single_env
;
265 #define env cpu_single_env
267 /* if an exception is pending, we execute it here */
268 if (env
->exception_index
>= 0) {
269 if (env
->exception_index
>= EXCP_INTERRUPT
) {
270 /* exit request from the cpu execution loop */
271 ret
= env
->exception_index
;
272 if (ret
== EXCP_DEBUG
)
273 cpu_handle_debug_exception(env
);
276 #if defined(CONFIG_USER_ONLY)
277 /* if user mode only, we simulate a fake exception
278 which will be handled outside the cpu execution
280 #if defined(TARGET_I386)
281 do_interrupt_user(env
->exception_index
,
282 env
->exception_is_int
,
284 env
->exception_next_eip
);
285 /* successfully delivered */
286 env
->old_exception
= -1;
288 ret
= env
->exception_index
;
291 #if defined(TARGET_I386)
292 /* simulate a real cpu exception. On i386, it can
293 trigger new exceptions, but we do not handle
294 double or triple faults yet. */
295 do_interrupt(env
->exception_index
,
296 env
->exception_is_int
,
298 env
->exception_next_eip
, 0);
299 /* successfully delivered */
300 env
->old_exception
= -1;
301 #elif defined(TARGET_PPC)
303 #elif defined(TARGET_MICROBLAZE)
305 #elif defined(TARGET_MIPS)
307 #elif defined(TARGET_SPARC)
309 #elif defined(TARGET_ARM)
311 #elif defined(TARGET_SH4)
313 #elif defined(TARGET_ALPHA)
315 #elif defined(TARGET_CRIS)
317 #elif defined(TARGET_M68K)
320 env
->exception_index
= -1;
327 longjmp(env
->jmp_env
, 1);
330 next_tb
= 0; /* force lookup of first TB */
332 interrupt_request
= env
->interrupt_request
;
333 if (unlikely(interrupt_request
)) {
334 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
335 /* Mask out external interrupts for this step. */
336 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
341 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
342 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
343 env
->exception_index
= EXCP_DEBUG
;
346 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
347 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
348 defined(TARGET_MICROBLAZE)
349 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
350 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
352 env
->exception_index
= EXCP_HLT
;
356 #if defined(TARGET_I386)
357 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
358 svm_check_intercept(SVM_EXIT_INIT
);
360 env
->exception_index
= EXCP_HALTED
;
362 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
364 } else if (env
->hflags2
& HF2_GIF_MASK
) {
365 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
366 !(env
->hflags
& HF_SMM_MASK
)) {
367 svm_check_intercept(SVM_EXIT_SMI
);
368 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
371 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
372 !(env
->hflags2
& HF2_NMI_MASK
)) {
373 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
374 env
->hflags2
|= HF2_NMI_MASK
;
375 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
377 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
378 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
379 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
381 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
382 (((env
->hflags2
& HF2_VINTR_MASK
) &&
383 (env
->hflags2
& HF2_HIF_MASK
)) ||
384 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
385 (env
->eflags
& IF_MASK
&&
386 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
388 svm_check_intercept(SVM_EXIT_INTR
);
389 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
390 intno
= cpu_get_pic_interrupt(env
);
391 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
392 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
394 env
= cpu_single_env
;
395 #define env cpu_single_env
397 do_interrupt(intno
, 0, 0, 0, 1);
398 /* ensure that no TB jump will be modified as
399 the program flow was changed */
401 #if !defined(CONFIG_USER_ONLY)
402 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
403 (env
->eflags
& IF_MASK
) &&
404 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
406 /* FIXME: this should respect TPR */
407 svm_check_intercept(SVM_EXIT_VINTR
);
408 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
409 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
410 do_interrupt(intno
, 0, 0, 0, 1);
411 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
416 #elif defined(TARGET_PPC)
418 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
422 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
423 ppc_hw_interrupt(env
);
424 if (env
->pending_interrupts
== 0)
425 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
428 #elif defined(TARGET_MICROBLAZE)
429 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
430 && (env
->sregs
[SR_MSR
] & MSR_IE
)
431 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
432 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
433 env
->exception_index
= EXCP_IRQ
;
437 #elif defined(TARGET_MIPS)
438 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
439 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
440 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
441 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
442 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
443 !(env
->hflags
& MIPS_HFLAG_DM
)) {
445 env
->exception_index
= EXCP_EXT_INTERRUPT
;
450 #elif defined(TARGET_SPARC)
451 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
452 if (cpu_interrupts_enabled(env
) &&
453 env
->interrupt_index
> 0) {
454 int pil
= env
->interrupt_index
& 0xf;
455 int type
= env
->interrupt_index
& 0xf0;
457 if (((type
== TT_EXTINT
) &&
458 cpu_pil_allowed(env
, pil
)) ||
460 env
->exception_index
= env
->interrupt_index
;
465 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
466 //do_interrupt(0, 0, 0, 0, 0);
467 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
469 #elif defined(TARGET_ARM)
470 if (interrupt_request
& CPU_INTERRUPT_FIQ
471 && !(env
->uncached_cpsr
& CPSR_F
)) {
472 env
->exception_index
= EXCP_FIQ
;
476 /* ARMv7-M interrupt return works by loading a magic value
477 into the PC. On real hardware the load causes the
478 return to occur. The qemu implementation performs the
479 jump normally, then does the exception return when the
480 CPU tries to execute code at the magic address.
481 This will cause the magic PC value to be pushed to
482 the stack if an interrupt occured at the wrong time.
483 We avoid this by disabling interrupts when
484 pc contains a magic address. */
485 if (interrupt_request
& CPU_INTERRUPT_HARD
486 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
487 || !(env
->uncached_cpsr
& CPSR_I
))) {
488 env
->exception_index
= EXCP_IRQ
;
492 #elif defined(TARGET_SH4)
493 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
497 #elif defined(TARGET_ALPHA)
498 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
502 #elif defined(TARGET_CRIS)
503 if (interrupt_request
& CPU_INTERRUPT_HARD
504 && (env
->pregs
[PR_CCS
] & I_FLAG
)
505 && !env
->locked_irq
) {
506 env
->exception_index
= EXCP_IRQ
;
510 if (interrupt_request
& CPU_INTERRUPT_NMI
511 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
512 env
->exception_index
= EXCP_NMI
;
516 #elif defined(TARGET_M68K)
517 if (interrupt_request
& CPU_INTERRUPT_HARD
518 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
519 < env
->pending_level
) {
520 /* Real hardware gets the interrupt vector via an
521 IACK cycle at this point. Current emulated
522 hardware doesn't rely on this, so we
523 provide/save the vector when the interrupt is
525 env
->exception_index
= env
->pending_vector
;
530 /* Don't use the cached interupt_request value,
531 do_interrupt may have updated the EXITTB flag. */
532 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
533 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
534 /* ensure that no TB jump will be modified as
535 the program flow was changed */
539 if (unlikely(env
->exit_request
)) {
540 env
->exit_request
= 0;
541 env
->exception_index
= EXCP_INTERRUPT
;
544 #ifdef CONFIG_DEBUG_EXEC
545 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
546 /* restore flags in standard format */
547 #if defined(TARGET_I386)
548 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
549 log_cpu_state(env
, X86_DUMP_CCOP
);
550 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
551 #elif defined(TARGET_ARM)
552 log_cpu_state(env
, 0);
553 #elif defined(TARGET_SPARC)
554 log_cpu_state(env
, 0);
555 #elif defined(TARGET_PPC)
556 log_cpu_state(env
, 0);
557 #elif defined(TARGET_M68K)
558 cpu_m68k_flush_flags(env
, env
->cc_op
);
559 env
->cc_op
= CC_OP_FLAGS
;
560 env
->sr
= (env
->sr
& 0xffe0)
561 | env
->cc_dest
| (env
->cc_x
<< 4);
562 log_cpu_state(env
, 0);
563 #elif defined(TARGET_MICROBLAZE)
564 log_cpu_state(env
, 0);
565 #elif defined(TARGET_MIPS)
566 log_cpu_state(env
, 0);
567 #elif defined(TARGET_SH4)
568 log_cpu_state(env
, 0);
569 #elif defined(TARGET_ALPHA)
570 log_cpu_state(env
, 0);
571 #elif defined(TARGET_CRIS)
572 log_cpu_state(env
, 0);
574 #error unsupported target CPU
580 /* Note: we do it here to avoid a gcc bug on Mac OS X when
581 doing it in tb_find_slow */
582 if (tb_invalidated_flag
) {
583 /* as some TB could have been invalidated because
584 of memory exceptions while generating the code, we
585 must recompute the hash index here */
587 tb_invalidated_flag
= 0;
589 #ifdef CONFIG_DEBUG_EXEC
590 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
591 (long)tb
->tc_ptr
, tb
->pc
,
592 lookup_symbol(tb
->pc
));
594 /* see if we can patch the calling TB. When the TB
595 spans two pages, we cannot safely do a direct
597 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
598 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
600 spin_unlock(&tb_lock
);
602 /* cpu_interrupt might be called while translating the
603 TB, but before it is linked into a potentially
604 infinite loop and becomes env->current_tb. Avoid
605 starting execution if there is a pending interrupt. */
606 if (!unlikely (env
->exit_request
)) {
607 env
->current_tb
= tb
;
609 /* execute the generated code */
610 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
612 env
= cpu_single_env
;
613 #define env cpu_single_env
615 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
616 env
->current_tb
= NULL
;
617 if ((next_tb
& 3) == 2) {
618 /* Instruction counter expired. */
620 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
622 cpu_pc_from_tb(env
, tb
);
623 insns_left
= env
->icount_decr
.u32
;
624 if (env
->icount_extra
&& insns_left
>= 0) {
625 /* Refill decrementer and continue execution. */
626 env
->icount_extra
+= insns_left
;
627 if (env
->icount_extra
> 0xffff) {
630 insns_left
= env
->icount_extra
;
632 env
->icount_extra
-= insns_left
;
633 env
->icount_decr
.u16
.low
= insns_left
;
635 if (insns_left
> 0) {
636 /* Execute remaining instructions. */
637 cpu_exec_nocache(insns_left
, tb
);
639 env
->exception_index
= EXCP_INTERRUPT
;
645 /* reset soft MMU for next block (it can currently
646 only be set by a memory fault) */
652 #if defined(TARGET_I386)
653 /* restore flags in standard format */
654 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
655 #elif defined(TARGET_ARM)
656 /* XXX: Save/restore host fpu exception state?. */
657 #elif defined(TARGET_SPARC)
658 #elif defined(TARGET_PPC)
659 #elif defined(TARGET_M68K)
660 cpu_m68k_flush_flags(env
, env
->cc_op
);
661 env
->cc_op
= CC_OP_FLAGS
;
662 env
->sr
= (env
->sr
& 0xffe0)
663 | env
->cc_dest
| (env
->cc_x
<< 4);
664 #elif defined(TARGET_MICROBLAZE)
665 #elif defined(TARGET_MIPS)
666 #elif defined(TARGET_SH4)
667 #elif defined(TARGET_ALPHA)
668 #elif defined(TARGET_CRIS)
669 #elif defined(TARGET_S390X)
672 #error unsupported target CPU
675 /* restore global registers */
677 env
= (void *) saved_env_reg
;
679 /* fail safe : never use cpu_single_env outside cpu_exec() */
680 cpu_single_env
= NULL
;
684 /* must only be called from the generated code as an exception can be
686 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
688 /* XXX: cannot enable it yet because it yields to MMU exception
689 where NIP != read address on PowerPC */
691 target_ulong phys_addr
;
692 phys_addr
= get_phys_addr_code(env
, start
);
693 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
697 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
699 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
701 CPUX86State
*saved_env
;
705 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
707 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
708 (selector
<< 4), 0xffff, 0);
710 helper_load_seg(seg_reg
, selector
);
715 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
717 CPUX86State
*saved_env
;
722 helper_fsave(ptr
, data32
);
727 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
729 CPUX86State
*saved_env
;
734 helper_frstor(ptr
, data32
);
739 #endif /* TARGET_I386 */
741 #if !defined(CONFIG_SOFTMMU)
743 #if defined(TARGET_I386)
744 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
746 #define EXCEPTION_ACTION cpu_loop_exit()
749 /* 'pc' is the host PC at which the exception was raised. 'address' is
750 the effective address of the memory exception. 'is_write' is 1 if a
751 write caused the exception and otherwise 0'. 'old_set' is the
752 signal set which should be restored */
753 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
754 int is_write
, sigset_t
*old_set
,
757 TranslationBlock
*tb
;
761 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
762 #if defined(DEBUG_SIGNAL)
763 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
764 pc
, address
, is_write
, *(unsigned long *)old_set
);
766 /* XXX: locking issue */
767 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
771 /* see if it is an MMU fault */
772 ret
= cpu_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
774 return 0; /* not an MMU fault */
776 return 1; /* the MMU fault was handled without causing real CPU fault */
777 /* now we have a real cpu fault */
780 /* the PC is inside the translated code. It means that we have
781 a virtual CPU fault */
782 cpu_restore_state(tb
, env
, pc
, puc
);
785 /* we restore the process signal mask as the sigreturn should
786 do it (XXX: use sigsetjmp) */
787 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
790 /* never comes here */
794 #if defined(__i386__)
796 #if defined(__APPLE__)
797 # include <sys/ucontext.h>
799 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
800 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
801 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
802 # define MASK_sig(context) ((context)->uc_sigmask)
803 #elif defined (__NetBSD__)
804 # include <ucontext.h>
806 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
807 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
808 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
809 # define MASK_sig(context) ((context)->uc_sigmask)
810 #elif defined (__FreeBSD__) || defined(__DragonFly__)
811 # include <ucontext.h>
813 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
814 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
815 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
816 # define MASK_sig(context) ((context)->uc_sigmask)
817 #elif defined(__OpenBSD__)
818 # define EIP_sig(context) ((context)->sc_eip)
819 # define TRAP_sig(context) ((context)->sc_trapno)
820 # define ERROR_sig(context) ((context)->sc_err)
821 # define MASK_sig(context) ((context)->sc_mask)
823 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
824 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
825 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
826 # define MASK_sig(context) ((context)->uc_sigmask)
829 int cpu_signal_handler(int host_signum
, void *pinfo
,
832 siginfo_t
*info
= pinfo
;
833 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
834 ucontext_t
*uc
= puc
;
835 #elif defined(__OpenBSD__)
836 struct sigcontext
*uc
= puc
;
838 struct ucontext
*uc
= puc
;
847 #define REG_TRAPNO TRAPNO
850 trapno
= TRAP_sig(uc
);
851 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
853 (ERROR_sig(uc
) >> 1) & 1 : 0,
857 #elif defined(__x86_64__)
860 #define PC_sig(context) _UC_MACHINE_PC(context)
861 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
862 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
863 #define MASK_sig(context) ((context)->uc_sigmask)
864 #elif defined(__OpenBSD__)
865 #define PC_sig(context) ((context)->sc_rip)
866 #define TRAP_sig(context) ((context)->sc_trapno)
867 #define ERROR_sig(context) ((context)->sc_err)
868 #define MASK_sig(context) ((context)->sc_mask)
869 #elif defined (__FreeBSD__) || defined(__DragonFly__)
870 #include <ucontext.h>
872 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
873 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
874 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
875 #define MASK_sig(context) ((context)->uc_sigmask)
877 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
878 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
879 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
880 #define MASK_sig(context) ((context)->uc_sigmask)
883 int cpu_signal_handler(int host_signum
, void *pinfo
,
886 siginfo_t
*info
= pinfo
;
888 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
889 ucontext_t
*uc
= puc
;
890 #elif defined(__OpenBSD__)
891 struct sigcontext
*uc
= puc
;
893 struct ucontext
*uc
= puc
;
897 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
898 TRAP_sig(uc
) == 0xe ?
899 (ERROR_sig(uc
) >> 1) & 1 : 0,
903 #elif defined(_ARCH_PPC)
905 /***********************************************************************
906 * signal context platform-specific definitions
910 /* All Registers access - only for local access */
911 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
912 /* Gpr Registers access */
913 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
914 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
915 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
916 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
917 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
918 # define LR_sig(context) REG_sig(link, context) /* Link register */
919 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
920 /* Float Registers access */
921 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
922 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
923 /* Exception Registers access */
924 # define DAR_sig(context) REG_sig(dar, context)
925 # define DSISR_sig(context) REG_sig(dsisr, context)
926 # define TRAP_sig(context) REG_sig(trap, context)
929 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
930 #include <ucontext.h>
931 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
932 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
933 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
934 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
935 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
936 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
937 /* Exception Registers access */
938 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
939 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
940 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
941 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
944 # include <sys/ucontext.h>
945 typedef struct ucontext SIGCONTEXT
;
946 /* All Registers access - only for local access */
947 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
948 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
949 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
950 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
951 /* Gpr Registers access */
952 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
953 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
954 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
955 # define CTR_sig(context) REG_sig(ctr, context)
956 # define XER_sig(context) REG_sig(xer, context) /* Link register */
957 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
958 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
959 /* Float Registers access */
960 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
961 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
962 /* Exception Registers access */
963 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
964 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
965 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
966 #endif /* __APPLE__ */
968 int cpu_signal_handler(int host_signum
, void *pinfo
,
971 siginfo_t
*info
= pinfo
;
972 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
973 ucontext_t
*uc
= puc
;
975 struct ucontext
*uc
= puc
;
984 if (DSISR_sig(uc
) & 0x00800000)
987 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
990 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
991 is_write
, &uc
->uc_sigmask
, puc
);
994 #elif defined(__alpha__)
996 int cpu_signal_handler(int host_signum
, void *pinfo
,
999 siginfo_t
*info
= pinfo
;
1000 struct ucontext
*uc
= puc
;
1001 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1002 uint32_t insn
= *pc
;
1005 /* XXX: need kernel patch to get write flag faster */
1006 switch (insn
>> 26) {
1021 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1022 is_write
, &uc
->uc_sigmask
, puc
);
1024 #elif defined(__sparc__)
1026 int cpu_signal_handler(int host_signum
, void *pinfo
,
1029 siginfo_t
*info
= pinfo
;
1032 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1033 uint32_t *regs
= (uint32_t *)(info
+ 1);
1034 void *sigmask
= (regs
+ 20);
1035 /* XXX: is there a standard glibc define ? */
1036 unsigned long pc
= regs
[1];
1039 struct sigcontext
*sc
= puc
;
1040 unsigned long pc
= sc
->sigc_regs
.tpc
;
1041 void *sigmask
= (void *)sc
->sigc_mask
;
1042 #elif defined(__OpenBSD__)
1043 struct sigcontext
*uc
= puc
;
1044 unsigned long pc
= uc
->sc_pc
;
1045 void *sigmask
= (void *)(long)uc
->sc_mask
;
1049 /* XXX: need kernel patch to get write flag faster */
1051 insn
= *(uint32_t *)pc
;
1052 if ((insn
>> 30) == 3) {
1053 switch((insn
>> 19) & 0x3f) {
1077 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1078 is_write
, sigmask
, NULL
);
1081 #elif defined(__arm__)
1083 int cpu_signal_handler(int host_signum
, void *pinfo
,
1086 siginfo_t
*info
= pinfo
;
1087 struct ucontext
*uc
= puc
;
1091 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1092 pc
= uc
->uc_mcontext
.gregs
[R15
];
1094 pc
= uc
->uc_mcontext
.arm_pc
;
1096 /* XXX: compute is_write */
1098 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1100 &uc
->uc_sigmask
, puc
);
1103 #elif defined(__mc68000)
1105 int cpu_signal_handler(int host_signum
, void *pinfo
,
1108 siginfo_t
*info
= pinfo
;
1109 struct ucontext
*uc
= puc
;
1113 pc
= uc
->uc_mcontext
.gregs
[16];
1114 /* XXX: compute is_write */
1116 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1118 &uc
->uc_sigmask
, puc
);
1121 #elif defined(__ia64)
1124 /* This ought to be in <bits/siginfo.h>... */
1125 # define __ISR_VALID 1
1128 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1130 siginfo_t
*info
= pinfo
;
1131 struct ucontext
*uc
= puc
;
1135 ip
= uc
->uc_mcontext
.sc_ip
;
1136 switch (host_signum
) {
1142 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1143 /* ISR.W (write-access) is bit 33: */
1144 is_write
= (info
->si_isr
>> 33) & 1;
1150 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1152 &uc
->uc_sigmask
, puc
);
1155 #elif defined(__s390__)
1157 int cpu_signal_handler(int host_signum
, void *pinfo
,
1160 siginfo_t
*info
= pinfo
;
1161 struct ucontext
*uc
= puc
;
1165 pc
= uc
->uc_mcontext
.psw
.addr
;
1166 /* XXX: compute is_write */
1168 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1169 is_write
, &uc
->uc_sigmask
, puc
);
1172 #elif defined(__mips__)
1174 int cpu_signal_handler(int host_signum
, void *pinfo
,
1177 siginfo_t
*info
= pinfo
;
1178 struct ucontext
*uc
= puc
;
1179 greg_t pc
= uc
->uc_mcontext
.pc
;
1182 /* XXX: compute is_write */
1184 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1185 is_write
, &uc
->uc_sigmask
, puc
);
1188 #elif defined(__hppa__)
1190 int cpu_signal_handler(int host_signum
, void *pinfo
,
1193 struct siginfo
*info
= pinfo
;
1194 struct ucontext
*uc
= puc
;
1198 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1199 /* FIXME: compute is_write */
1201 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1203 &uc
->uc_sigmask
, puc
);
1208 #error host CPU specific signal handler needed
1212 #endif /* !defined(CONFIG_SOFTMMU) */