2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
44 #define env cpu_single_env
47 int tb_invalidated_flag
;
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
52 int qemu_cpu_has_work(CPUState
*env
)
54 return cpu_has_work(env
);
57 void cpu_loop_exit(void)
59 longjmp(env
->jmp_env
, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
70 #elif defined(__OpenBSD__)
71 struct sigcontext
*uc
= puc
;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
89 env
->exception_index
= -1;
90 longjmp(env
->jmp_env
, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
97 unsigned long next_tb
;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles
> CF_COUNT_MASK
)
103 max_cycles
= CF_COUNT_MASK
;
105 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
107 env
->current_tb
= tb
;
108 /* execute the generated code */
109 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
111 if ((next_tb
& 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env
, tb
);
116 tb_phys_invalidate(tb
, -1);
120 static TranslationBlock
*tb_find_slow(target_ulong pc
,
121 target_ulong cs_base
,
124 TranslationBlock
*tb
, **ptb1
;
126 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
128 tb_invalidated_flag
= 0;
130 /* find translated block using physical mappings */
131 phys_pc
= get_phys_addr_code(env
, pc
);
132 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
134 h
= tb_phys_hash_func(phys_pc
);
135 ptb1
= &tb_phys_hash
[h
];
141 tb
->page_addr
[0] == phys_page1
&&
142 tb
->cs_base
== cs_base
&&
143 tb
->flags
== flags
) {
144 /* check next page if needed */
145 if (tb
->page_addr
[1] != -1) {
146 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
148 phys_page2
= get_phys_addr_code(env
, virt_page2
);
149 if (tb
->page_addr
[1] == phys_page2
)
155 ptb1
= &tb
->phys_hash_next
;
158 /* if no translated code available, then translate it now */
159 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
162 /* we add the TB in the virtual pc hash table */
163 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
167 static inline TranslationBlock
*tb_find_fast(void)
169 TranslationBlock
*tb
;
170 target_ulong cs_base
, pc
;
173 /* we record a subset of the CPU state. It will
174 always be the same before a given translated block
176 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
177 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
178 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
179 tb
->flags
!= flags
)) {
180 tb
= tb_find_slow(pc
, cs_base
, flags
);
185 static CPUDebugExcpHandler
*debug_excp_handler
;
187 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
189 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
191 debug_excp_handler
= handler
;
195 static void cpu_handle_debug_exception(CPUState
*env
)
199 if (!env
->watchpoint_hit
)
200 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
201 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
203 if (debug_excp_handler
)
204 debug_excp_handler(env
);
207 /* main execution loop */
209 int cpu_exec(CPUState
*env1
)
211 #define DECLARE_HOST_REGS 1
212 #include "hostregs_helper.h"
213 int ret
, interrupt_request
;
214 TranslationBlock
*tb
;
216 unsigned long next_tb
;
218 if (cpu_halted(env1
) == EXCP_HALTED
)
221 cpu_single_env
= env1
;
223 /* first we save global registers */
224 #define SAVE_HOST_REGS 1
225 #include "hostregs_helper.h"
228 #if defined(TARGET_I386)
229 /* put eflags in CPU temporary format */
230 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
231 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
232 CC_OP
= CC_OP_EFLAGS
;
233 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
234 #elif defined(TARGET_SPARC)
235 #elif defined(TARGET_M68K)
236 env
->cc_op
= CC_OP_FLAGS
;
237 env
->cc_dest
= env
->sr
& 0xf;
238 env
->cc_x
= (env
->sr
>> 4) & 1;
239 #elif defined(TARGET_ALPHA)
240 #elif defined(TARGET_ARM)
241 #elif defined(TARGET_PPC)
242 #elif defined(TARGET_MICROBLAZE)
243 #elif defined(TARGET_MIPS)
244 #elif defined(TARGET_SH4)
245 #elif defined(TARGET_CRIS)
246 #elif defined(TARGET_S390X)
249 #error unsupported target CPU
251 env
->exception_index
= -1;
253 /* prepare setjmp context for exception handling */
255 if (setjmp(env
->jmp_env
) == 0) {
256 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
258 env
= cpu_single_env
;
259 #define env cpu_single_env
261 env
->current_tb
= NULL
;
262 /* if an exception is pending, we execute it here */
263 if (env
->exception_index
>= 0) {
264 if (env
->exception_index
>= EXCP_INTERRUPT
) {
265 /* exit request from the cpu execution loop */
266 ret
= env
->exception_index
;
267 if (ret
== EXCP_DEBUG
)
268 cpu_handle_debug_exception(env
);
271 #if defined(CONFIG_USER_ONLY)
272 /* if user mode only, we simulate a fake exception
273 which will be handled outside the cpu execution
275 #if defined(TARGET_I386)
276 do_interrupt_user(env
->exception_index
,
277 env
->exception_is_int
,
279 env
->exception_next_eip
);
280 /* successfully delivered */
281 env
->old_exception
= -1;
283 ret
= env
->exception_index
;
286 #if defined(TARGET_I386)
287 /* simulate a real cpu exception. On i386, it can
288 trigger new exceptions, but we do not handle
289 double or triple faults yet. */
290 do_interrupt(env
->exception_index
,
291 env
->exception_is_int
,
293 env
->exception_next_eip
, 0);
294 /* successfully delivered */
295 env
->old_exception
= -1;
296 #elif defined(TARGET_PPC)
298 #elif defined(TARGET_MICROBLAZE)
300 #elif defined(TARGET_MIPS)
302 #elif defined(TARGET_SPARC)
304 #elif defined(TARGET_ARM)
306 #elif defined(TARGET_SH4)
308 #elif defined(TARGET_ALPHA)
310 #elif defined(TARGET_CRIS)
312 #elif defined(TARGET_M68K)
317 env
->exception_index
= -1;
322 longjmp(env
->jmp_env
, 1);
325 next_tb
= 0; /* force lookup of first TB */
327 interrupt_request
= env
->interrupt_request
;
328 if (unlikely(interrupt_request
)) {
329 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
330 /* Mask out external interrupts for this step. */
331 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
336 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
337 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
338 env
->exception_index
= EXCP_DEBUG
;
341 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
342 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
343 defined(TARGET_MICROBLAZE)
344 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
345 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
347 env
->exception_index
= EXCP_HLT
;
351 #if defined(TARGET_I386)
352 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
353 svm_check_intercept(SVM_EXIT_INIT
);
355 env
->exception_index
= EXCP_HALTED
;
357 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
359 } else if (env
->hflags2
& HF2_GIF_MASK
) {
360 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
361 !(env
->hflags
& HF_SMM_MASK
)) {
362 svm_check_intercept(SVM_EXIT_SMI
);
363 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
366 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
367 !(env
->hflags2
& HF2_NMI_MASK
)) {
368 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
369 env
->hflags2
|= HF2_NMI_MASK
;
370 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
372 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
373 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
374 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
376 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
377 (((env
->hflags2
& HF2_VINTR_MASK
) &&
378 (env
->hflags2
& HF2_HIF_MASK
)) ||
379 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
380 (env
->eflags
& IF_MASK
&&
381 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
383 svm_check_intercept(SVM_EXIT_INTR
);
384 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
385 intno
= cpu_get_pic_interrupt(env
);
386 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
387 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
389 env
= cpu_single_env
;
390 #define env cpu_single_env
392 do_interrupt(intno
, 0, 0, 0, 1);
393 /* ensure that no TB jump will be modified as
394 the program flow was changed */
396 #if !defined(CONFIG_USER_ONLY)
397 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
398 (env
->eflags
& IF_MASK
) &&
399 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
401 /* FIXME: this should respect TPR */
402 svm_check_intercept(SVM_EXIT_VINTR
);
403 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
404 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
405 do_interrupt(intno
, 0, 0, 0, 1);
406 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
411 #elif defined(TARGET_PPC)
413 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
417 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
418 ppc_hw_interrupt(env
);
419 if (env
->pending_interrupts
== 0)
420 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
423 #elif defined(TARGET_MICROBLAZE)
424 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
425 && (env
->sregs
[SR_MSR
] & MSR_IE
)
426 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
427 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
428 env
->exception_index
= EXCP_IRQ
;
432 #elif defined(TARGET_MIPS)
433 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
434 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
435 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
436 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
437 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
438 !(env
->hflags
& MIPS_HFLAG_DM
)) {
440 env
->exception_index
= EXCP_EXT_INTERRUPT
;
445 #elif defined(TARGET_SPARC)
446 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
447 if (cpu_interrupts_enabled(env
) &&
448 env
->interrupt_index
> 0) {
449 int pil
= env
->interrupt_index
& 0xf;
450 int type
= env
->interrupt_index
& 0xf0;
452 if (((type
== TT_EXTINT
) &&
453 cpu_pil_allowed(env
, pil
)) ||
455 env
->exception_index
= env
->interrupt_index
;
460 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
461 //do_interrupt(0, 0, 0, 0, 0);
462 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
464 #elif defined(TARGET_ARM)
465 if (interrupt_request
& CPU_INTERRUPT_FIQ
466 && !(env
->uncached_cpsr
& CPSR_F
)) {
467 env
->exception_index
= EXCP_FIQ
;
471 /* ARMv7-M interrupt return works by loading a magic value
472 into the PC. On real hardware the load causes the
473 return to occur. The qemu implementation performs the
474 jump normally, then does the exception return when the
475 CPU tries to execute code at the magic address.
476 This will cause the magic PC value to be pushed to
477 the stack if an interrupt occured at the wrong time.
478 We avoid this by disabling interrupts when
479 pc contains a magic address. */
480 if (interrupt_request
& CPU_INTERRUPT_HARD
481 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
482 || !(env
->uncached_cpsr
& CPSR_I
))) {
483 env
->exception_index
= EXCP_IRQ
;
487 #elif defined(TARGET_SH4)
488 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
492 #elif defined(TARGET_ALPHA)
493 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
497 #elif defined(TARGET_CRIS)
498 if (interrupt_request
& CPU_INTERRUPT_HARD
499 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
500 env
->exception_index
= EXCP_IRQ
;
504 if (interrupt_request
& CPU_INTERRUPT_NMI
505 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
506 env
->exception_index
= EXCP_NMI
;
510 #elif defined(TARGET_M68K)
511 if (interrupt_request
& CPU_INTERRUPT_HARD
512 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
513 < env
->pending_level
) {
514 /* Real hardware gets the interrupt vector via an
515 IACK cycle at this point. Current emulated
516 hardware doesn't rely on this, so we
517 provide/save the vector when the interrupt is
519 env
->exception_index
= env
->pending_vector
;
524 /* Don't use the cached interupt_request value,
525 do_interrupt may have updated the EXITTB flag. */
526 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
527 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
528 /* ensure that no TB jump will be modified as
529 the program flow was changed */
533 if (unlikely(env
->exit_request
)) {
534 env
->exit_request
= 0;
535 env
->exception_index
= EXCP_INTERRUPT
;
538 #ifdef CONFIG_DEBUG_EXEC
539 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
540 /* restore flags in standard format */
541 #if defined(TARGET_I386)
542 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
543 log_cpu_state(env
, X86_DUMP_CCOP
);
544 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
545 #elif defined(TARGET_ARM)
546 log_cpu_state(env
, 0);
547 #elif defined(TARGET_SPARC)
548 log_cpu_state(env
, 0);
549 #elif defined(TARGET_PPC)
550 log_cpu_state(env
, 0);
551 #elif defined(TARGET_M68K)
552 cpu_m68k_flush_flags(env
, env
->cc_op
);
553 env
->cc_op
= CC_OP_FLAGS
;
554 env
->sr
= (env
->sr
& 0xffe0)
555 | env
->cc_dest
| (env
->cc_x
<< 4);
556 log_cpu_state(env
, 0);
557 #elif defined(TARGET_MICROBLAZE)
558 log_cpu_state(env
, 0);
559 #elif defined(TARGET_MIPS)
560 log_cpu_state(env
, 0);
561 #elif defined(TARGET_SH4)
562 log_cpu_state(env
, 0);
563 #elif defined(TARGET_ALPHA)
564 log_cpu_state(env
, 0);
565 #elif defined(TARGET_CRIS)
566 log_cpu_state(env
, 0);
568 #error unsupported target CPU
574 /* Note: we do it here to avoid a gcc bug on Mac OS X when
575 doing it in tb_find_slow */
576 if (tb_invalidated_flag
) {
577 /* as some TB could have been invalidated because
578 of memory exceptions while generating the code, we
579 must recompute the hash index here */
581 tb_invalidated_flag
= 0;
583 #ifdef CONFIG_DEBUG_EXEC
584 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
585 (long)tb
->tc_ptr
, tb
->pc
,
586 lookup_symbol(tb
->pc
));
588 /* see if we can patch the calling TB. When the TB
589 spans two pages, we cannot safely do a direct
592 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
593 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
596 spin_unlock(&tb_lock
);
597 env
->current_tb
= tb
;
599 /* cpu_interrupt might be called while translating the
600 TB, but before it is linked into a potentially
601 infinite loop and becomes env->current_tb. Avoid
602 starting execution if there is a pending interrupt. */
603 if (unlikely (env
->exit_request
))
604 env
->current_tb
= NULL
;
606 while (env
->current_tb
) {
608 /* execute the generated code */
609 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
611 env
= cpu_single_env
;
612 #define env cpu_single_env
614 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
615 env
->current_tb
= NULL
;
616 if ((next_tb
& 3) == 2) {
617 /* Instruction counter expired. */
619 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
621 cpu_pc_from_tb(env
, tb
);
622 insns_left
= env
->icount_decr
.u32
;
623 if (env
->icount_extra
&& insns_left
>= 0) {
624 /* Refill decrementer and continue execution. */
625 env
->icount_extra
+= insns_left
;
626 if (env
->icount_extra
> 0xffff) {
629 insns_left
= env
->icount_extra
;
631 env
->icount_extra
-= insns_left
;
632 env
->icount_decr
.u16
.low
= insns_left
;
634 if (insns_left
> 0) {
635 /* Execute remaining instructions. */
636 cpu_exec_nocache(insns_left
, tb
);
638 env
->exception_index
= EXCP_INTERRUPT
;
644 /* reset soft MMU for next block (it can currently
645 only be set by a memory fault) */
651 #if defined(TARGET_I386)
652 /* restore flags in standard format */
653 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
654 #elif defined(TARGET_ARM)
655 /* XXX: Save/restore host fpu exception state?. */
656 #elif defined(TARGET_SPARC)
657 #elif defined(TARGET_PPC)
658 #elif defined(TARGET_M68K)
659 cpu_m68k_flush_flags(env
, env
->cc_op
);
660 env
->cc_op
= CC_OP_FLAGS
;
661 env
->sr
= (env
->sr
& 0xffe0)
662 | env
->cc_dest
| (env
->cc_x
<< 4);
663 #elif defined(TARGET_MICROBLAZE)
664 #elif defined(TARGET_MIPS)
665 #elif defined(TARGET_SH4)
666 #elif defined(TARGET_ALPHA)
667 #elif defined(TARGET_CRIS)
668 #elif defined(TARGET_S390X)
671 #error unsupported target CPU
674 /* restore global registers */
675 #include "hostregs_helper.h"
677 /* fail safe : never use cpu_single_env outside cpu_exec() */
678 cpu_single_env
= NULL
;
682 /* must only be called from the generated code as an exception can be
684 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
686 /* XXX: cannot enable it yet because it yields to MMU exception
687 where NIP != read address on PowerPC */
689 target_ulong phys_addr
;
690 phys_addr
= get_phys_addr_code(env
, start
);
691 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
695 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
697 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
699 CPUX86State
*saved_env
;
703 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
705 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
706 (selector
<< 4), 0xffff, 0);
708 helper_load_seg(seg_reg
, selector
);
713 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
715 CPUX86State
*saved_env
;
720 helper_fsave(ptr
, data32
);
725 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
727 CPUX86State
*saved_env
;
732 helper_frstor(ptr
, data32
);
737 #endif /* TARGET_I386 */
739 #if !defined(CONFIG_SOFTMMU)
741 #if defined(TARGET_I386)
742 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
744 #define EXCEPTION_ACTION cpu_loop_exit()
747 /* 'pc' is the host PC at which the exception was raised. 'address' is
748 the effective address of the memory exception. 'is_write' is 1 if a
749 write caused the exception and otherwise 0'. 'old_set' is the
750 signal set which should be restored */
751 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
752 int is_write
, sigset_t
*old_set
,
755 TranslationBlock
*tb
;
759 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
760 #if defined(DEBUG_SIGNAL)
761 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
762 pc
, address
, is_write
, *(unsigned long *)old_set
);
764 /* XXX: locking issue */
765 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
769 /* see if it is an MMU fault */
770 ret
= cpu_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
772 return 0; /* not an MMU fault */
774 return 1; /* the MMU fault was handled without causing real CPU fault */
775 /* now we have a real cpu fault */
778 /* the PC is inside the translated code. It means that we have
779 a virtual CPU fault */
780 cpu_restore_state(tb
, env
, pc
, puc
);
783 /* we restore the process signal mask as the sigreturn should
784 do it (XXX: use sigsetjmp) */
785 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
788 /* never comes here */
792 #if defined(__i386__)
794 #if defined(__APPLE__)
795 # include <sys/ucontext.h>
797 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
798 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
799 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
800 # define MASK_sig(context) ((context)->uc_sigmask)
801 #elif defined (__NetBSD__)
802 # include <ucontext.h>
804 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
805 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
806 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
807 # define MASK_sig(context) ((context)->uc_sigmask)
808 #elif defined (__FreeBSD__) || defined(__DragonFly__)
809 # include <ucontext.h>
811 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
812 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
813 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
814 # define MASK_sig(context) ((context)->uc_sigmask)
815 #elif defined(__OpenBSD__)
816 # define EIP_sig(context) ((context)->sc_eip)
817 # define TRAP_sig(context) ((context)->sc_trapno)
818 # define ERROR_sig(context) ((context)->sc_err)
819 # define MASK_sig(context) ((context)->sc_mask)
821 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
822 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
823 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
824 # define MASK_sig(context) ((context)->uc_sigmask)
827 int cpu_signal_handler(int host_signum
, void *pinfo
,
830 siginfo_t
*info
= pinfo
;
831 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
832 ucontext_t
*uc
= puc
;
833 #elif defined(__OpenBSD__)
834 struct sigcontext
*uc
= puc
;
836 struct ucontext
*uc
= puc
;
845 #define REG_TRAPNO TRAPNO
848 trapno
= TRAP_sig(uc
);
849 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
851 (ERROR_sig(uc
) >> 1) & 1 : 0,
855 #elif defined(__x86_64__)
858 #define PC_sig(context) _UC_MACHINE_PC(context)
859 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
860 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
861 #define MASK_sig(context) ((context)->uc_sigmask)
862 #elif defined(__OpenBSD__)
863 #define PC_sig(context) ((context)->sc_rip)
864 #define TRAP_sig(context) ((context)->sc_trapno)
865 #define ERROR_sig(context) ((context)->sc_err)
866 #define MASK_sig(context) ((context)->sc_mask)
867 #elif defined (__FreeBSD__) || defined(__DragonFly__)
868 #include <ucontext.h>
870 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
871 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
872 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
873 #define MASK_sig(context) ((context)->uc_sigmask)
875 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
876 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
877 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
878 #define MASK_sig(context) ((context)->uc_sigmask)
881 int cpu_signal_handler(int host_signum
, void *pinfo
,
884 siginfo_t
*info
= pinfo
;
886 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
887 ucontext_t
*uc
= puc
;
888 #elif defined(__OpenBSD__)
889 struct sigcontext
*uc
= puc
;
891 struct ucontext
*uc
= puc
;
895 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
896 TRAP_sig(uc
) == 0xe ?
897 (ERROR_sig(uc
) >> 1) & 1 : 0,
901 #elif defined(_ARCH_PPC)
903 /***********************************************************************
904 * signal context platform-specific definitions
908 /* All Registers access - only for local access */
909 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
910 /* Gpr Registers access */
911 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
912 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
913 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
914 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
915 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
916 # define LR_sig(context) REG_sig(link, context) /* Link register */
917 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
918 /* Float Registers access */
919 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
920 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
921 /* Exception Registers access */
922 # define DAR_sig(context) REG_sig(dar, context)
923 # define DSISR_sig(context) REG_sig(dsisr, context)
924 # define TRAP_sig(context) REG_sig(trap, context)
928 # include <sys/ucontext.h>
929 typedef struct ucontext SIGCONTEXT
;
930 /* All Registers access - only for local access */
931 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
932 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
933 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
934 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
935 /* Gpr Registers access */
936 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
937 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
938 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
939 # define CTR_sig(context) REG_sig(ctr, context)
940 # define XER_sig(context) REG_sig(xer, context) /* Link register */
941 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
942 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
943 /* Float Registers access */
944 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
945 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
946 /* Exception Registers access */
947 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
948 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
949 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
950 #endif /* __APPLE__ */
952 int cpu_signal_handler(int host_signum
, void *pinfo
,
955 siginfo_t
*info
= pinfo
;
956 struct ucontext
*uc
= puc
;
964 if (DSISR_sig(uc
) & 0x00800000)
967 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
970 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
971 is_write
, &uc
->uc_sigmask
, puc
);
974 #elif defined(__alpha__)
976 int cpu_signal_handler(int host_signum
, void *pinfo
,
979 siginfo_t
*info
= pinfo
;
980 struct ucontext
*uc
= puc
;
981 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
985 /* XXX: need kernel patch to get write flag faster */
986 switch (insn
>> 26) {
1001 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1002 is_write
, &uc
->uc_sigmask
, puc
);
1004 #elif defined(__sparc__)
1006 int cpu_signal_handler(int host_signum
, void *pinfo
,
1009 siginfo_t
*info
= pinfo
;
1012 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1013 uint32_t *regs
= (uint32_t *)(info
+ 1);
1014 void *sigmask
= (regs
+ 20);
1015 /* XXX: is there a standard glibc define ? */
1016 unsigned long pc
= regs
[1];
1019 struct sigcontext
*sc
= puc
;
1020 unsigned long pc
= sc
->sigc_regs
.tpc
;
1021 void *sigmask
= (void *)sc
->sigc_mask
;
1022 #elif defined(__OpenBSD__)
1023 struct sigcontext
*uc
= puc
;
1024 unsigned long pc
= uc
->sc_pc
;
1025 void *sigmask
= (void *)(long)uc
->sc_mask
;
1029 /* XXX: need kernel patch to get write flag faster */
1031 insn
= *(uint32_t *)pc
;
1032 if ((insn
>> 30) == 3) {
1033 switch((insn
>> 19) & 0x3f) {
1057 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1058 is_write
, sigmask
, NULL
);
1061 #elif defined(__arm__)
1063 int cpu_signal_handler(int host_signum
, void *pinfo
,
1066 siginfo_t
*info
= pinfo
;
1067 struct ucontext
*uc
= puc
;
1071 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1072 pc
= uc
->uc_mcontext
.gregs
[R15
];
1074 pc
= uc
->uc_mcontext
.arm_pc
;
1076 /* XXX: compute is_write */
1078 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1080 &uc
->uc_sigmask
, puc
);
1083 #elif defined(__mc68000)
1085 int cpu_signal_handler(int host_signum
, void *pinfo
,
1088 siginfo_t
*info
= pinfo
;
1089 struct ucontext
*uc
= puc
;
1093 pc
= uc
->uc_mcontext
.gregs
[16];
1094 /* XXX: compute is_write */
1096 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1098 &uc
->uc_sigmask
, puc
);
1101 #elif defined(__ia64)
1104 /* This ought to be in <bits/siginfo.h>... */
1105 # define __ISR_VALID 1
1108 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1110 siginfo_t
*info
= pinfo
;
1111 struct ucontext
*uc
= puc
;
1115 ip
= uc
->uc_mcontext
.sc_ip
;
1116 switch (host_signum
) {
1122 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1123 /* ISR.W (write-access) is bit 33: */
1124 is_write
= (info
->si_isr
>> 33) & 1;
1130 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1132 &uc
->uc_sigmask
, puc
);
1135 #elif defined(__s390__)
1137 int cpu_signal_handler(int host_signum
, void *pinfo
,
1140 siginfo_t
*info
= pinfo
;
1141 struct ucontext
*uc
= puc
;
1145 pc
= uc
->uc_mcontext
.psw
.addr
;
1146 /* XXX: compute is_write */
1148 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1149 is_write
, &uc
->uc_sigmask
, puc
);
1152 #elif defined(__mips__)
1154 int cpu_signal_handler(int host_signum
, void *pinfo
,
1157 siginfo_t
*info
= pinfo
;
1158 struct ucontext
*uc
= puc
;
1159 greg_t pc
= uc
->uc_mcontext
.pc
;
1162 /* XXX: compute is_write */
1164 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1165 is_write
, &uc
->uc_sigmask
, puc
);
1168 #elif defined(__hppa__)
1170 int cpu_signal_handler(int host_signum
, void *pinfo
,
1173 struct siginfo
*info
= pinfo
;
1174 struct ucontext
*uc
= puc
;
1178 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1179 /* FIXME: compute is_write */
1181 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1183 &uc
->uc_sigmask
, puc
);
1188 #error host CPU specific signal handler needed
1192 #endif /* !defined(CONFIG_SOFTMMU) */