2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #if !defined(TARGET_IA64)
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
45 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
46 // Work around ugly bugs in glibc that mangle global register contents
48 #define env cpu_single_env
51 int tb_invalidated_flag
;
53 //#define CONFIG_DEBUG_EXEC
54 //#define DEBUG_SIGNAL
56 int qemu_cpu_has_work(CPUState
*env
)
58 return cpu_has_work(env
);
61 void cpu_loop_exit(void)
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
66 longjmp(env
->jmp_env
, 1);
69 /* exit the current TB from a signal handler. The host registers are
70 restored in a state compatible with the CPU emulator
72 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
74 #if !defined(CONFIG_SOFTMMU)
76 struct ucontext
*uc
= puc
;
77 #elif defined(__OpenBSD__)
78 struct sigcontext
*uc
= puc
;
84 /* XXX: restore cpu registers saved in host registers */
86 #if !defined(CONFIG_SOFTMMU)
88 /* XXX: use siglongjmp ? */
90 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
91 #elif defined(__OpenBSD__)
92 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
96 env
->exception_index
= -1;
97 longjmp(env
->jmp_env
, 1);
100 /* Execute the code without caching the generated code. An interpreter
101 could be used if available. */
102 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
104 unsigned long next_tb
;
105 TranslationBlock
*tb
;
107 /* Should never happen.
108 We only end up here when an existing TB is too long. */
109 if (max_cycles
> CF_COUNT_MASK
)
110 max_cycles
= CF_COUNT_MASK
;
112 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
114 env
->current_tb
= tb
;
115 /* execute the generated code */
116 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
118 if ((next_tb
& 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env
, tb
);
123 tb_phys_invalidate(tb
, -1);
127 static TranslationBlock
*tb_find_slow(target_ulong pc
,
128 target_ulong cs_base
,
131 TranslationBlock
*tb
, **ptb1
;
133 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
135 tb_invalidated_flag
= 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc
= get_phys_addr_code(env
, pc
);
141 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
143 h
= tb_phys_hash_func(phys_pc
);
144 ptb1
= &tb_phys_hash
[h
];
150 tb
->page_addr
[0] == phys_page1
&&
151 tb
->cs_base
== cs_base
&&
152 tb
->flags
== flags
) {
153 /* check next page if needed */
154 if (tb
->page_addr
[1] != -1) {
155 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
157 phys_page2
= get_phys_addr_code(env
, virt_page2
);
158 if (tb
->page_addr
[1] == phys_page2
)
164 ptb1
= &tb
->phys_hash_next
;
167 /* if no translated code available, then translate it now */
168 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
171 /* we add the TB in the virtual pc hash table */
172 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
176 static inline TranslationBlock
*tb_find_fast(void)
178 TranslationBlock
*tb
;
179 target_ulong cs_base
, pc
;
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
185 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
186 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
187 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
188 tb
->flags
!= flags
)) {
189 tb
= tb_find_slow(pc
, cs_base
, flags
);
194 static CPUDebugExcpHandler
*debug_excp_handler
;
196 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
198 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
200 debug_excp_handler
= handler
;
204 static void cpu_handle_debug_exception(CPUState
*env
)
208 if (!env
->watchpoint_hit
)
209 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
210 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
212 if (debug_excp_handler
)
213 debug_excp_handler(env
);
216 /* main execution loop */
218 int cpu_exec(CPUState
*env1
)
220 #define DECLARE_HOST_REGS 1
221 #include "hostregs_helper.h"
222 int ret
, interrupt_request
;
223 TranslationBlock
*tb
;
225 unsigned long next_tb
;
227 if (cpu_halted(env1
) == EXCP_HALTED
)
230 cpu_single_env
= env1
;
232 /* first we save global registers */
233 #define SAVE_HOST_REGS 1
234 #include "hostregs_helper.h"
238 #if defined(TARGET_I386)
239 /* put eflags in CPU temporary format */
240 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
242 CC_OP
= CC_OP_EFLAGS
;
243 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
244 #elif defined(TARGET_SPARC)
245 #elif defined(TARGET_M68K)
246 env
->cc_op
= CC_OP_FLAGS
;
247 env
->cc_dest
= env
->sr
& 0xf;
248 env
->cc_x
= (env
->sr
>> 4) & 1;
249 #elif defined(TARGET_ALPHA)
250 #elif defined(TARGET_ARM)
251 #elif defined(TARGET_PPC)
252 #elif defined(TARGET_MICROBLAZE)
253 #elif defined(TARGET_MIPS)
254 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_CRIS)
256 #elif defined(TARGET_S390X)
257 #elif defined(TARGET_IA64)
260 #error unsupported target CPU
262 env
->exception_index
= -1;
264 /* prepare setjmp context for exception handling */
266 if (setjmp(env
->jmp_env
) == 0) {
267 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
269 env
= cpu_single_env
;
270 #define env cpu_single_env
272 env
->current_tb
= NULL
;
273 /* if an exception is pending, we execute it here */
274 if (env
->exception_index
>= 0) {
275 if (env
->exception_index
>= EXCP_INTERRUPT
) {
276 /* exit request from the cpu execution loop */
277 ret
= env
->exception_index
;
278 if (ret
== EXCP_DEBUG
)
279 cpu_handle_debug_exception(env
);
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
286 #if defined(TARGET_I386)
287 do_interrupt_user(env
->exception_index
,
288 env
->exception_is_int
,
290 env
->exception_next_eip
);
291 /* successfully delivered */
292 env
->old_exception
= -1;
294 ret
= env
->exception_index
;
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env
->exception_index
,
302 env
->exception_is_int
,
304 env
->exception_next_eip
, 0);
305 /* successfully delivered */
306 env
->old_exception
= -1;
307 #elif defined(TARGET_PPC)
309 #elif defined(TARGET_MICROBLAZE)
311 #elif defined(TARGET_MIPS)
313 #elif defined(TARGET_SPARC)
315 #elif defined(TARGET_ARM)
317 #elif defined(TARGET_SH4)
319 #elif defined(TARGET_ALPHA)
321 #elif defined(TARGET_CRIS)
323 #elif defined(TARGET_M68K)
325 #elif defined(TARGET_IA64)
330 env
->exception_index
= -1;
335 longjmp(env
->jmp_env
, 1);
338 next_tb
= 0; /* force lookup of first TB */
340 interrupt_request
= env
->interrupt_request
;
341 if (unlikely(interrupt_request
)) {
342 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
343 /* Mask out external interrupts for this step. */
344 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
349 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
350 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
351 env
->exception_index
= EXCP_DEBUG
;
354 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
355 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
356 defined(TARGET_MICROBLAZE)
357 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
358 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
360 env
->exception_index
= EXCP_HLT
;
364 #if defined(TARGET_I386)
365 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
366 svm_check_intercept(SVM_EXIT_INIT
);
368 env
->exception_index
= EXCP_HALTED
;
370 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
372 } else if (env
->hflags2
& HF2_GIF_MASK
) {
373 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
374 !(env
->hflags
& HF_SMM_MASK
)) {
375 svm_check_intercept(SVM_EXIT_SMI
);
376 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
379 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
380 !(env
->hflags2
& HF2_NMI_MASK
)) {
381 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
382 env
->hflags2
|= HF2_NMI_MASK
;
383 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
385 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
386 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
387 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
389 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
390 (((env
->hflags2
& HF2_VINTR_MASK
) &&
391 (env
->hflags2
& HF2_HIF_MASK
)) ||
392 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
393 (env
->eflags
& IF_MASK
&&
394 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
396 svm_check_intercept(SVM_EXIT_INTR
);
397 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
398 intno
= cpu_get_pic_interrupt(env
);
399 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
400 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
402 env
= cpu_single_env
;
403 #define env cpu_single_env
405 do_interrupt(intno
, 0, 0, 0, 1);
406 /* ensure that no TB jump will be modified as
407 the program flow was changed */
409 #if !defined(CONFIG_USER_ONLY)
410 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
411 (env
->eflags
& IF_MASK
) &&
412 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
414 /* FIXME: this should respect TPR */
415 svm_check_intercept(SVM_EXIT_VINTR
);
416 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
417 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
418 do_interrupt(intno
, 0, 0, 0, 1);
419 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
424 #elif defined(TARGET_PPC)
426 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
430 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
431 ppc_hw_interrupt(env
);
432 if (env
->pending_interrupts
== 0)
433 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
436 #elif defined(TARGET_MICROBLAZE)
437 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
438 && (env
->sregs
[SR_MSR
] & MSR_IE
)
439 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
440 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
441 env
->exception_index
= EXCP_IRQ
;
445 #elif defined(TARGET_MIPS)
446 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
447 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
448 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
449 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
450 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
451 !(env
->hflags
& MIPS_HFLAG_DM
)) {
453 env
->exception_index
= EXCP_EXT_INTERRUPT
;
458 #elif defined(TARGET_SPARC)
459 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
460 cpu_interrupts_enabled(env
)) {
461 int pil
= env
->interrupt_index
& 15;
462 int type
= env
->interrupt_index
& 0xf0;
464 if (((type
== TT_EXTINT
) &&
465 (pil
== 15 || pil
> env
->psrpil
)) ||
467 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
468 env
->exception_index
= env
->interrupt_index
;
470 env
->interrupt_index
= 0;
473 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
474 //do_interrupt(0, 0, 0, 0, 0);
475 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
477 #elif defined(TARGET_ARM)
478 if (interrupt_request
& CPU_INTERRUPT_FIQ
479 && !(env
->uncached_cpsr
& CPSR_F
)) {
480 env
->exception_index
= EXCP_FIQ
;
484 /* ARMv7-M interrupt return works by loading a magic value
485 into the PC. On real hardware the load causes the
486 return to occur. The qemu implementation performs the
487 jump normally, then does the exception return when the
488 CPU tries to execute code at the magic address.
489 This will cause the magic PC value to be pushed to
490 the stack if an interrupt occured at the wrong time.
491 We avoid this by disabling interrupts when
492 pc contains a magic address. */
493 if (interrupt_request
& CPU_INTERRUPT_HARD
494 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
495 || !(env
->uncached_cpsr
& CPSR_I
))) {
496 env
->exception_index
= EXCP_IRQ
;
500 #elif defined(TARGET_SH4)
501 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
505 #elif defined(TARGET_ALPHA)
506 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
510 #elif defined(TARGET_CRIS)
511 if (interrupt_request
& CPU_INTERRUPT_HARD
512 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
513 env
->exception_index
= EXCP_IRQ
;
517 if (interrupt_request
& CPU_INTERRUPT_NMI
518 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
519 env
->exception_index
= EXCP_NMI
;
523 #elif defined(TARGET_M68K)
524 if (interrupt_request
& CPU_INTERRUPT_HARD
525 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
526 < env
->pending_level
) {
527 /* Real hardware gets the interrupt vector via an
528 IACK cycle at this point. Current emulated
529 hardware doesn't rely on this, so we
530 provide/save the vector when the interrupt is
532 env
->exception_index
= env
->pending_vector
;
537 /* Don't use the cached interupt_request value,
538 do_interrupt may have updated the EXITTB flag. */
539 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
540 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
541 /* ensure that no TB jump will be modified as
542 the program flow was changed */
546 if (unlikely(env
->exit_request
)) {
547 env
->exit_request
= 0;
548 env
->exception_index
= EXCP_INTERRUPT
;
551 #ifdef CONFIG_DEBUG_EXEC
552 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
553 /* restore flags in standard format */
555 #if defined(TARGET_I386)
556 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
557 log_cpu_state(env
, X86_DUMP_CCOP
);
558 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
559 #elif defined(TARGET_ARM)
560 log_cpu_state(env
, 0);
561 #elif defined(TARGET_SPARC)
562 log_cpu_state(env
, 0);
563 #elif defined(TARGET_PPC)
564 log_cpu_state(env
, 0);
565 #elif defined(TARGET_M68K)
566 cpu_m68k_flush_flags(env
, env
->cc_op
);
567 env
->cc_op
= CC_OP_FLAGS
;
568 env
->sr
= (env
->sr
& 0xffe0)
569 | env
->cc_dest
| (env
->cc_x
<< 4);
570 log_cpu_state(env
, 0);
571 #elif defined(TARGET_MICROBLAZE)
572 log_cpu_state(env
, 0);
573 #elif defined(TARGET_MIPS)
574 log_cpu_state(env
, 0);
575 #elif defined(TARGET_SH4)
576 log_cpu_state(env
, 0);
577 #elif defined(TARGET_ALPHA)
578 log_cpu_state(env
, 0);
579 #elif defined(TARGET_CRIS)
580 log_cpu_state(env
, 0);
582 #error unsupported target CPU
588 /* Note: we do it here to avoid a gcc bug on Mac OS X when
589 doing it in tb_find_slow */
590 if (tb_invalidated_flag
) {
591 /* as some TB could have been invalidated because
592 of memory exceptions while generating the code, we
593 must recompute the hash index here */
595 tb_invalidated_flag
= 0;
597 #ifdef CONFIG_DEBUG_EXEC
598 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
599 (long)tb
->tc_ptr
, tb
->pc
,
600 lookup_symbol(tb
->pc
));
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
606 if (next_tb
!= 0 && tb
->page_addr
[1] == -1) {
607 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
610 spin_unlock(&tb_lock
);
611 env
->current_tb
= tb
;
613 /* cpu_interrupt might be called while translating the
614 TB, but before it is linked into a potentially
615 infinite loop and becomes env->current_tb. Avoid
616 starting execution if there is a pending interrupt. */
617 if (unlikely (env
->exit_request
))
618 env
->current_tb
= NULL
;
620 while (env
->current_tb
) {
622 /* execute the generated code */
623 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
625 env
= cpu_single_env
;
626 #define env cpu_single_env
628 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
629 env
->current_tb
= NULL
;
630 if ((next_tb
& 3) == 2) {
631 /* Instruction counter expired. */
633 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
635 cpu_pc_from_tb(env
, tb
);
636 insns_left
= env
->icount_decr
.u32
;
637 if (env
->icount_extra
&& insns_left
>= 0) {
638 /* Refill decrementer and continue execution. */
639 env
->icount_extra
+= insns_left
;
640 if (env
->icount_extra
> 0xffff) {
643 insns_left
= env
->icount_extra
;
645 env
->icount_extra
-= insns_left
;
646 env
->icount_decr
.u16
.low
= insns_left
;
648 if (insns_left
> 0) {
649 /* Execute remaining instructions. */
650 cpu_exec_nocache(insns_left
, tb
);
652 env
->exception_index
= EXCP_INTERRUPT
;
658 /* reset soft MMU for next block (it can currently
659 only be set by a memory fault) */
667 #if defined(TARGET_I386)
668 /* restore flags in standard format */
669 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
670 #elif defined(TARGET_ARM)
671 /* XXX: Save/restore host fpu exception state?. */
672 #elif defined(TARGET_SPARC)
673 #elif defined(TARGET_PPC)
674 #elif defined(TARGET_M68K)
675 cpu_m68k_flush_flags(env
, env
->cc_op
);
676 env
->cc_op
= CC_OP_FLAGS
;
677 env
->sr
= (env
->sr
& 0xffe0)
678 | env
->cc_dest
| (env
->cc_x
<< 4);
679 #elif defined(TARGET_MICROBLAZE)
680 #elif defined(TARGET_MIPS)
681 #elif defined(TARGET_SH4)
682 #elif defined(TARGET_IA64)
683 #elif defined(TARGET_ALPHA)
684 #elif defined(TARGET_CRIS)
685 #elif defined(TARGET_S390X)
688 #error unsupported target CPU
691 /* restore global registers */
692 #include "hostregs_helper.h"
694 /* fail safe : never use cpu_single_env outside cpu_exec() */
695 cpu_single_env
= NULL
;
699 /* must only be called from the generated code as an exception can be
701 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
703 /* XXX: cannot enable it yet because it yields to MMU exception
704 where NIP != read address on PowerPC */
706 target_ulong phys_addr
;
707 phys_addr
= get_phys_addr_code(env
, start
);
708 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
712 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
714 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
716 CPUX86State
*saved_env
;
720 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
722 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
723 (selector
<< 4), 0xffff, 0);
725 helper_load_seg(seg_reg
, selector
);
730 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
732 CPUX86State
*saved_env
;
737 helper_fsave(ptr
, data32
);
742 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
744 CPUX86State
*saved_env
;
749 helper_frstor(ptr
, data32
);
754 #endif /* TARGET_I386 */
756 #if !defined(CONFIG_SOFTMMU)
758 #if defined(TARGET_I386)
759 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
761 #define EXCEPTION_ACTION cpu_loop_exit()
764 /* 'pc' is the host PC at which the exception was raised. 'address' is
765 the effective address of the memory exception. 'is_write' is 1 if a
766 write caused the exception and otherwise 0'. 'old_set' is the
767 signal set which should be restored */
768 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
769 int is_write
, sigset_t
*old_set
,
772 TranslationBlock
*tb
;
776 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
777 #if defined(DEBUG_SIGNAL)
778 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
779 pc
, address
, is_write
, *(unsigned long *)old_set
);
781 /* XXX: locking issue */
782 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
786 /* see if it is an MMU fault */
787 ret
= cpu_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
789 return 0; /* not an MMU fault */
791 return 1; /* the MMU fault was handled without causing real CPU fault */
792 /* now we have a real cpu fault */
795 /* the PC is inside the translated code. It means that we have
796 a virtual CPU fault */
797 cpu_restore_state(tb
, env
, pc
, puc
);
800 /* we restore the process signal mask as the sigreturn should
801 do it (XXX: use sigsetjmp) */
802 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
805 /* never comes here */
809 #if defined(__i386__)
811 #if defined(__APPLE__)
812 # include <sys/ucontext.h>
814 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
815 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
816 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
817 # define MASK_sig(context) ((context)->uc_sigmask)
818 #elif defined (__NetBSD__)
819 # include <ucontext.h>
821 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
822 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
823 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
824 # define MASK_sig(context) ((context)->uc_sigmask)
825 #elif defined (__FreeBSD__) || defined(__DragonFly__)
826 # include <ucontext.h>
828 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
829 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
830 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
831 # define MASK_sig(context) ((context)->uc_sigmask)
832 #elif defined(__OpenBSD__)
833 # define EIP_sig(context) ((context)->sc_eip)
834 # define TRAP_sig(context) ((context)->sc_trapno)
835 # define ERROR_sig(context) ((context)->sc_err)
836 # define MASK_sig(context) ((context)->sc_mask)
838 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
839 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
840 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
841 # define MASK_sig(context) ((context)->uc_sigmask)
844 int cpu_signal_handler(int host_signum
, void *pinfo
,
847 siginfo_t
*info
= pinfo
;
848 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
849 ucontext_t
*uc
= puc
;
850 #elif defined(__OpenBSD__)
851 struct sigcontext
*uc
= puc
;
853 struct ucontext
*uc
= puc
;
862 #define REG_TRAPNO TRAPNO
865 trapno
= TRAP_sig(uc
);
866 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
868 (ERROR_sig(uc
) >> 1) & 1 : 0,
872 #elif defined(__x86_64__)
875 #define PC_sig(context) _UC_MACHINE_PC(context)
876 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
877 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
878 #define MASK_sig(context) ((context)->uc_sigmask)
879 #elif defined(__OpenBSD__)
880 #define PC_sig(context) ((context)->sc_rip)
881 #define TRAP_sig(context) ((context)->sc_trapno)
882 #define ERROR_sig(context) ((context)->sc_err)
883 #define MASK_sig(context) ((context)->sc_mask)
884 #elif defined (__FreeBSD__) || defined(__DragonFly__)
885 #include <ucontext.h>
887 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
888 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
889 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
890 #define MASK_sig(context) ((context)->uc_sigmask)
892 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
893 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
894 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
895 #define MASK_sig(context) ((context)->uc_sigmask)
898 int cpu_signal_handler(int host_signum
, void *pinfo
,
901 siginfo_t
*info
= pinfo
;
903 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
904 ucontext_t
*uc
= puc
;
905 #elif defined(__OpenBSD__)
906 struct sigcontext
*uc
= puc
;
908 struct ucontext
*uc
= puc
;
912 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
913 TRAP_sig(uc
) == 0xe ?
914 (ERROR_sig(uc
) >> 1) & 1 : 0,
918 #elif defined(_ARCH_PPC)
920 /***********************************************************************
921 * signal context platform-specific definitions
925 /* All Registers access - only for local access */
926 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
927 /* Gpr Registers access */
928 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
929 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
930 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
931 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
932 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
933 # define LR_sig(context) REG_sig(link, context) /* Link register */
934 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
935 /* Float Registers access */
936 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
937 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
938 /* Exception Registers access */
939 # define DAR_sig(context) REG_sig(dar, context)
940 # define DSISR_sig(context) REG_sig(dsisr, context)
941 # define TRAP_sig(context) REG_sig(trap, context)
945 # include <sys/ucontext.h>
946 typedef struct ucontext SIGCONTEXT
;
947 /* All Registers access - only for local access */
948 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
949 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
950 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
951 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
952 /* Gpr Registers access */
953 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
954 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
955 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
956 # define CTR_sig(context) REG_sig(ctr, context)
957 # define XER_sig(context) REG_sig(xer, context) /* Link register */
958 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
959 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
960 /* Float Registers access */
961 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
962 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
963 /* Exception Registers access */
964 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
965 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
966 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
967 #endif /* __APPLE__ */
969 int cpu_signal_handler(int host_signum
, void *pinfo
,
972 siginfo_t
*info
= pinfo
;
973 struct ucontext
*uc
= puc
;
981 if (DSISR_sig(uc
) & 0x00800000)
984 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
987 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
988 is_write
, &uc
->uc_sigmask
, puc
);
991 #elif defined(__alpha__)
993 int cpu_signal_handler(int host_signum
, void *pinfo
,
996 siginfo_t
*info
= pinfo
;
997 struct ucontext
*uc
= puc
;
998 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1002 /* XXX: need kernel patch to get write flag faster */
1003 switch (insn
>> 26) {
1018 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1019 is_write
, &uc
->uc_sigmask
, puc
);
1021 #elif defined(__sparc__)
1023 int cpu_signal_handler(int host_signum
, void *pinfo
,
1026 siginfo_t
*info
= pinfo
;
1029 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1030 uint32_t *regs
= (uint32_t *)(info
+ 1);
1031 void *sigmask
= (regs
+ 20);
1032 /* XXX: is there a standard glibc define ? */
1033 unsigned long pc
= regs
[1];
1036 struct sigcontext
*sc
= puc
;
1037 unsigned long pc
= sc
->sigc_regs
.tpc
;
1038 void *sigmask
= (void *)sc
->sigc_mask
;
1039 #elif defined(__OpenBSD__)
1040 struct sigcontext
*uc
= puc
;
1041 unsigned long pc
= uc
->sc_pc
;
1042 void *sigmask
= (void *)(long)uc
->sc_mask
;
1046 /* XXX: need kernel patch to get write flag faster */
1048 insn
= *(uint32_t *)pc
;
1049 if ((insn
>> 30) == 3) {
1050 switch((insn
>> 19) & 0x3f) {
1074 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1075 is_write
, sigmask
, NULL
);
1078 #elif defined(__arm__)
1080 int cpu_signal_handler(int host_signum
, void *pinfo
,
1083 siginfo_t
*info
= pinfo
;
1084 struct ucontext
*uc
= puc
;
1088 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1089 pc
= uc
->uc_mcontext
.gregs
[R15
];
1091 pc
= uc
->uc_mcontext
.arm_pc
;
1093 /* XXX: compute is_write */
1095 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1097 &uc
->uc_sigmask
, puc
);
1100 #elif defined(__mc68000)
1102 int cpu_signal_handler(int host_signum
, void *pinfo
,
1105 siginfo_t
*info
= pinfo
;
1106 struct ucontext
*uc
= puc
;
1110 pc
= uc
->uc_mcontext
.gregs
[16];
1111 /* XXX: compute is_write */
1113 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1115 &uc
->uc_sigmask
, puc
);
1118 #elif defined(__ia64)
1121 /* This ought to be in <bits/siginfo.h>... */
1122 # define __ISR_VALID 1
1125 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1127 siginfo_t
*info
= pinfo
;
1128 struct ucontext
*uc
= puc
;
1132 ip
= uc
->uc_mcontext
.sc_ip
;
1133 switch (host_signum
) {
1139 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1140 /* ISR.W (write-access) is bit 33: */
1141 is_write
= (info
->si_isr
>> 33) & 1;
1147 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1149 &uc
->uc_sigmask
, puc
);
1152 #elif defined(__s390__)
1154 int cpu_signal_handler(int host_signum
, void *pinfo
,
1157 siginfo_t
*info
= pinfo
;
1158 struct ucontext
*uc
= puc
;
1162 pc
= uc
->uc_mcontext
.psw
.addr
;
1163 /* XXX: compute is_write */
1165 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1166 is_write
, &uc
->uc_sigmask
, puc
);
1169 #elif defined(__mips__)
1171 int cpu_signal_handler(int host_signum
, void *pinfo
,
1174 siginfo_t
*info
= pinfo
;
1175 struct ucontext
*uc
= puc
;
1176 greg_t pc
= uc
->uc_mcontext
.pc
;
1179 /* XXX: compute is_write */
1181 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1182 is_write
, &uc
->uc_sigmask
, puc
);
1185 #elif defined(__hppa__)
1187 int cpu_signal_handler(int host_signum
, void *pinfo
,
1190 struct siginfo
*info
= pinfo
;
1191 struct ucontext
*uc
= puc
;
1195 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1196 /* FIXME: compute is_write */
1198 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1200 &uc
->uc_sigmask
, puc
);
1205 #error host CPU specific signal handler needed
1209 #endif /* !defined(CONFIG_SOFTMMU) */