2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
42 int tb_invalidated_flag
;
45 //#define DEBUG_SIGNAL
47 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
48 /* XXX: unify with i386 target */
49 void cpu_loop_exit(void)
51 longjmp(env
->jmp_env
, 1);
54 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4))
58 /* exit the current TB from a signal handler. The host registers are
59 restored in a state compatible with the CPU emulator
61 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
63 #if !defined(CONFIG_SOFTMMU)
64 struct ucontext
*uc
= puc
;
69 /* XXX: restore cpu registers saved in host registers */
71 #if !defined(CONFIG_SOFTMMU)
73 /* XXX: use siglongjmp ? */
74 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
77 longjmp(env
->jmp_env
, 1);
81 static TranslationBlock
*tb_find_slow(target_ulong pc
,
85 TranslationBlock
*tb
, **ptb1
;
88 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
93 tb_invalidated_flag
= 0;
95 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
97 /* find translated block using physical mappings */
98 phys_pc
= get_phys_addr_code(env
, pc
);
99 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
101 h
= tb_phys_hash_func(phys_pc
);
102 ptb1
= &tb_phys_hash
[h
];
108 tb
->page_addr
[0] == phys_page1
&&
109 tb
->cs_base
== cs_base
&&
110 tb
->flags
== flags
) {
111 /* check next page if needed */
112 if (tb
->page_addr
[1] != -1) {
113 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
115 phys_page2
= get_phys_addr_code(env
, virt_page2
);
116 if (tb
->page_addr
[1] == phys_page2
)
122 ptb1
= &tb
->phys_hash_next
;
125 /* if no translated code available, then translate it now */
128 /* flush must be done */
130 /* cannot fail at this point */
132 /* don't forget to invalidate previous TB info */
133 tb_invalidated_flag
= 1;
135 tc_ptr
= code_gen_ptr
;
137 tb
->cs_base
= cs_base
;
139 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
140 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
142 /* check next page if needed */
143 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
145 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
146 phys_page2
= get_phys_addr_code(env
, virt_page2
);
148 tb_link_phys(tb
, phys_pc
, phys_page2
);
151 /* we add the TB in the virtual pc hash table */
152 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
153 spin_unlock(&tb_lock
);
157 static inline TranslationBlock
*tb_find_fast(void)
159 TranslationBlock
*tb
;
160 target_ulong cs_base
, pc
;
163 /* we record a subset of the CPU state. It will
164 always be the same before a given translated block
166 #if defined(TARGET_I386)
168 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
169 cs_base
= env
->segs
[R_CS
].base
;
170 pc
= cs_base
+ env
->eip
;
171 #elif defined(TARGET_ARM)
172 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
173 | (env
->vfp
.vec_stride
<< 4);
174 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
176 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
180 #elif defined(TARGET_SPARC)
181 #ifdef TARGET_SPARC64
182 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
183 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
184 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
186 // FPU enable . MMU enabled . MMU no-fault . Supervisor
187 flags
= (env
->psref
<< 3) | ((env
->mmuregs
[0] & (MMU_E
| MMU_NF
)) << 1)
192 #elif defined(TARGET_PPC)
193 flags
= (msr_pr
<< MSR_PR
) | (msr_fp
<< MSR_FP
) |
194 (msr_se
<< MSR_SE
) | (msr_le
<< MSR_LE
);
197 #elif defined(TARGET_MIPS)
198 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
201 #elif defined(TARGET_SH4)
202 flags
= env
->sr
& (SR_MD
| SR_RB
);
203 cs_base
= 0; /* XXXXX */
206 #error unsupported CPU
208 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
209 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
210 tb
->flags
!= flags
, 0)) {
211 tb
= tb_find_slow(pc
, cs_base
, flags
);
212 /* Note: we do it here to avoid a gcc bug on Mac OS X when
213 doing it in tb_find_slow */
214 if (tb_invalidated_flag
) {
215 /* as some TB could have been invalidated because
216 of memory exceptions while generating the code, we
217 must recompute the hash index here */
225 /* main execution loop */
227 int cpu_exec(CPUState
*env1
)
229 int saved_T0
, saved_T1
;
234 #if defined(TARGET_I386)
259 #elif defined(TARGET_SPARC)
260 #if defined(reg_REGWPTR)
261 uint32_t *saved_regwptr
;
264 #if defined(__sparc__) && !defined(HOST_SOLARIS)
265 int saved_i7
, tmp_T0
;
267 int ret
, interrupt_request
;
268 void (*gen_func
)(void);
269 TranslationBlock
*tb
;
272 #if defined(TARGET_I386)
273 /* handle exit of HALTED state */
274 if (env1
->hflags
& HF_HALTED_MASK
) {
275 /* disable halt condition */
276 if ((env1
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
277 (env1
->eflags
& IF_MASK
)) {
278 env1
->hflags
&= ~HF_HALTED_MASK
;
283 #elif defined(TARGET_PPC)
285 if (env1
->msr
[MSR_EE
] &&
286 (env1
->interrupt_request
&
287 (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_TIMER
))) {
293 #elif defined(TARGET_SPARC)
295 if ((env1
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
296 (env1
->psret
!= 0)) {
302 #elif defined(TARGET_ARM)
304 /* An interrupt wakes the CPU even if the I and F CPSR bits are
306 if (env1
->interrupt_request
307 & (CPU_INTERRUPT_FIQ
| CPU_INTERRUPT_HARD
)) {
313 #elif defined(TARGET_MIPS)
315 if (env1
->interrupt_request
&
316 (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_TIMER
)) {
324 cpu_single_env
= env1
;
326 /* first we save global registers */
334 #if defined(__sparc__) && !defined(HOST_SOLARIS)
335 /* we also save i7 because longjmp may not restore it */
336 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
339 #if defined(TARGET_I386)
366 /* put eflags in CPU temporary format */
367 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
368 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
369 CC_OP
= CC_OP_EFLAGS
;
370 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
371 #elif defined(TARGET_ARM)
372 #elif defined(TARGET_SPARC)
373 #if defined(reg_REGWPTR)
374 saved_regwptr
= REGWPTR
;
376 #elif defined(TARGET_PPC)
377 #elif defined(TARGET_MIPS)
378 #elif defined(TARGET_SH4)
381 #error unsupported target CPU
383 env
->exception_index
= -1;
385 /* prepare setjmp context for exception handling */
387 if (setjmp(env
->jmp_env
) == 0) {
388 env
->current_tb
= NULL
;
389 /* if an exception is pending, we execute it here */
390 if (env
->exception_index
>= 0) {
391 if (env
->exception_index
>= EXCP_INTERRUPT
) {
392 /* exit request from the cpu execution loop */
393 ret
= env
->exception_index
;
395 } else if (env
->user_mode_only
) {
396 /* if user mode only, we simulate a fake exception
397 which will be hanlded outside the cpu execution
399 #if defined(TARGET_I386)
400 do_interrupt_user(env
->exception_index
,
401 env
->exception_is_int
,
403 env
->exception_next_eip
);
405 ret
= env
->exception_index
;
408 #if defined(TARGET_I386)
409 /* simulate a real cpu exception. On i386, it can
410 trigger new exceptions, but we do not handle
411 double or triple faults yet. */
412 do_interrupt(env
->exception_index
,
413 env
->exception_is_int
,
415 env
->exception_next_eip
, 0);
416 #elif defined(TARGET_PPC)
418 #elif defined(TARGET_MIPS)
420 #elif defined(TARGET_SPARC)
421 do_interrupt(env
->exception_index
);
422 #elif defined(TARGET_ARM)
424 #elif defined(TARGET_SH4)
428 env
->exception_index
= -1;
431 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
433 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
434 ret
= kqemu_cpu_exec(env
);
435 /* put eflags in CPU temporary format */
436 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
437 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
438 CC_OP
= CC_OP_EFLAGS
;
439 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
442 longjmp(env
->jmp_env
, 1);
443 } else if (ret
== 2) {
444 /* softmmu execution needed */
446 if (env
->interrupt_request
!= 0) {
447 /* hardware interrupt will be executed just after */
449 /* otherwise, we restart */
450 longjmp(env
->jmp_env
, 1);
458 longjmp(env
->jmp_env
, 1);
460 T0
= 0; /* force lookup of first TB */
462 #if defined(__sparc__) && !defined(HOST_SOLARIS)
463 /* g1 can be modified by some libc? functions */
466 interrupt_request
= env
->interrupt_request
;
467 if (__builtin_expect(interrupt_request
, 0)
469 #if defined(TARGET_I386)
470 /* if hardware interrupt pending, we execute it */
471 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
472 (env
->eflags
& IF_MASK
) &&
473 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
475 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
476 intno
= cpu_get_pic_interrupt(env
);
477 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
478 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
480 do_interrupt(intno
, 0, 0, 0, 1);
481 /* ensure that no TB jump will be modified as
482 the program flow was changed */
483 #if defined(__sparc__) && !defined(HOST_SOLARIS)
489 #elif defined(TARGET_PPC)
491 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
496 if ((interrupt_request
& CPU_INTERRUPT_HARD
)) {
498 env
->exception_index
= EXCP_EXTERNAL
;
501 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
502 #if defined(__sparc__) && !defined(HOST_SOLARIS)
507 } else if ((interrupt_request
& CPU_INTERRUPT_TIMER
)) {
509 env
->exception_index
= EXCP_DECR
;
512 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
513 #if defined(__sparc__) && !defined(HOST_SOLARIS)
520 #elif defined(TARGET_MIPS)
521 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
522 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
523 (env
->CP0_Status
& env
->CP0_Cause
& 0x0000FF00) &&
524 !(env
->hflags
& MIPS_HFLAG_EXL
) &&
525 !(env
->hflags
& MIPS_HFLAG_ERL
) &&
526 !(env
->hflags
& MIPS_HFLAG_DM
)) {
528 env
->exception_index
= EXCP_EXT_INTERRUPT
;
531 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
532 #if defined(__sparc__) && !defined(HOST_SOLARIS)
538 #elif defined(TARGET_SPARC)
539 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
541 int pil
= env
->interrupt_index
& 15;
542 int type
= env
->interrupt_index
& 0xf0;
544 if (((type
== TT_EXTINT
) &&
545 (pil
== 15 || pil
> env
->psrpil
)) ||
547 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
548 do_interrupt(env
->interrupt_index
);
549 env
->interrupt_index
= 0;
550 #if defined(__sparc__) && !defined(HOST_SOLARIS)
556 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
557 //do_interrupt(0, 0, 0, 0, 0);
558 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
559 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
563 #elif defined(TARGET_ARM)
564 if (interrupt_request
& CPU_INTERRUPT_FIQ
565 && !(env
->uncached_cpsr
& CPSR_F
)) {
566 env
->exception_index
= EXCP_FIQ
;
569 if (interrupt_request
& CPU_INTERRUPT_HARD
570 && !(env
->uncached_cpsr
& CPSR_I
)) {
571 env
->exception_index
= EXCP_IRQ
;
574 #elif defined(TARGET_SH4)
577 /* Don't use the cached interupt_request value,
578 do_interrupt may have updated the EXITTB flag. */
579 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
580 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
581 /* ensure that no TB jump will be modified as
582 the program flow was changed */
583 #if defined(__sparc__) && !defined(HOST_SOLARIS)
589 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
590 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
591 env
->exception_index
= EXCP_INTERRUPT
;
596 if ((loglevel
& CPU_LOG_TB_CPU
)) {
597 #if defined(TARGET_I386)
598 /* restore flags in standard format */
600 env
->regs
[R_EAX
] = EAX
;
603 env
->regs
[R_EBX
] = EBX
;
606 env
->regs
[R_ECX
] = ECX
;
609 env
->regs
[R_EDX
] = EDX
;
612 env
->regs
[R_ESI
] = ESI
;
615 env
->regs
[R_EDI
] = EDI
;
618 env
->regs
[R_EBP
] = EBP
;
621 env
->regs
[R_ESP
] = ESP
;
623 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
624 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
625 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
626 #elif defined(TARGET_ARM)
627 cpu_dump_state(env
, logfile
, fprintf
, 0);
628 #elif defined(TARGET_SPARC)
629 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
630 env
->regwptr
= REGWPTR
;
631 cpu_dump_state(env
, logfile
, fprintf
, 0);
632 #elif defined(TARGET_PPC)
633 cpu_dump_state(env
, logfile
, fprintf
, 0);
634 #elif defined(TARGET_MIPS)
635 cpu_dump_state(env
, logfile
, fprintf
, 0);
636 #elif defined(TARGET_SH4)
637 cpu_dump_state(env
, logfile
, fprintf
, 0);
639 #error unsupported target CPU
645 if ((loglevel
& CPU_LOG_EXEC
)) {
646 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
647 (long)tb
->tc_ptr
, tb
->pc
,
648 lookup_symbol(tb
->pc
));
651 #if defined(__sparc__) && !defined(HOST_SOLARIS)
654 /* see if we can patch the calling TB. When the TB
655 spans two pages, we cannot safely do a direct
660 (env
->kqemu_enabled
!= 2) &&
662 tb
->page_addr
[1] == -1
663 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
664 && (tb
->cflags
& CF_CODE_COPY
) ==
665 (((TranslationBlock
*)(T0
& ~3))->cflags
& CF_CODE_COPY
)
669 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
670 #if defined(USE_CODE_COPY)
671 /* propagates the FP use info */
672 ((TranslationBlock
*)(T0
& ~3))->cflags
|=
673 (tb
->cflags
& CF_FP_USED
);
675 spin_unlock(&tb_lock
);
679 env
->current_tb
= tb
;
680 /* execute the generated code */
681 gen_func
= (void *)tc_ptr
;
682 #if defined(__sparc__)
683 __asm__
__volatile__("call %0\n\t"
687 : "i0", "i1", "i2", "i3", "i4", "i5",
688 "l0", "l1", "l2", "l3", "l4", "l5",
690 #elif defined(__arm__)
691 asm volatile ("mov pc, %0\n\t"
692 ".global exec_loop\n\t"
696 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
697 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
699 if (!(tb
->cflags
& CF_CODE_COPY
)) {
700 if ((tb
->cflags
& CF_FP_USED
) && env
->native_fp_regs
) {
701 save_native_fp_state(env
);
705 if ((tb
->cflags
& CF_FP_USED
) && !env
->native_fp_regs
) {
706 restore_native_fp_state(env
);
708 /* we work with native eflags */
709 CC_SRC
= cc_table
[CC_OP
].compute_all();
710 CC_OP
= CC_OP_EFLAGS
;
711 asm(".globl exec_loop\n"
716 " fs movl %11, %%eax\n"
717 " andl $0x400, %%eax\n"
718 " fs orl %8, %%eax\n"
721 " fs movl %%esp, %12\n"
722 " fs movl %0, %%eax\n"
723 " fs movl %1, %%ecx\n"
724 " fs movl %2, %%edx\n"
725 " fs movl %3, %%ebx\n"
726 " fs movl %4, %%esp\n"
727 " fs movl %5, %%ebp\n"
728 " fs movl %6, %%esi\n"
729 " fs movl %7, %%edi\n"
732 " fs movl %%esp, %4\n"
733 " fs movl %12, %%esp\n"
734 " fs movl %%eax, %0\n"
735 " fs movl %%ecx, %1\n"
736 " fs movl %%edx, %2\n"
737 " fs movl %%ebx, %3\n"
738 " fs movl %%ebp, %5\n"
739 " fs movl %%esi, %6\n"
740 " fs movl %%edi, %7\n"
743 " movl %%eax, %%ecx\n"
744 " andl $0x400, %%ecx\n"
746 " andl $0x8d5, %%eax\n"
747 " fs movl %%eax, %8\n"
749 " subl %%ecx, %%eax\n"
750 " fs movl %%eax, %11\n"
751 " fs movl %9, %%ebx\n" /* get T0 value */
754 : "m" (*(uint8_t *)offsetof(CPUState
, regs
[0])),
755 "m" (*(uint8_t *)offsetof(CPUState
, regs
[1])),
756 "m" (*(uint8_t *)offsetof(CPUState
, regs
[2])),
757 "m" (*(uint8_t *)offsetof(CPUState
, regs
[3])),
758 "m" (*(uint8_t *)offsetof(CPUState
, regs
[4])),
759 "m" (*(uint8_t *)offsetof(CPUState
, regs
[5])),
760 "m" (*(uint8_t *)offsetof(CPUState
, regs
[6])),
761 "m" (*(uint8_t *)offsetof(CPUState
, regs
[7])),
762 "m" (*(uint8_t *)offsetof(CPUState
, cc_src
)),
763 "m" (*(uint8_t *)offsetof(CPUState
, tmp0
)),
765 "m" (*(uint8_t *)offsetof(CPUState
, df
)),
766 "m" (*(uint8_t *)offsetof(CPUState
, saved_esp
))
771 #elif defined(__ia64)
778 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
779 (*(void (*)(void)) &fp
)();
783 env
->current_tb
= NULL
;
784 /* reset soft MMU for next block (it can currently
785 only be set by a memory fault) */
786 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
787 if (env
->hflags
& HF_SOFTMMU_MASK
) {
788 env
->hflags
&= ~HF_SOFTMMU_MASK
;
789 /* do not allow linking to another block */
793 #if defined(USE_KQEMU)
794 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
795 if (kqemu_is_ok(env
) &&
796 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
807 #if defined(TARGET_I386)
808 #if defined(USE_CODE_COPY)
809 if (env
->native_fp_regs
) {
810 save_native_fp_state(env
);
813 /* restore flags in standard format */
814 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
816 /* restore global registers */
841 #elif defined(TARGET_ARM)
842 /* XXX: Save/restore host fpu exception state?. */
843 #elif defined(TARGET_SPARC)
844 #if defined(reg_REGWPTR)
845 REGWPTR
= saved_regwptr
;
847 #elif defined(TARGET_PPC)
848 #elif defined(TARGET_MIPS)
849 #elif defined(TARGET_SH4)
852 #error unsupported target CPU
854 #if defined(__sparc__) && !defined(HOST_SOLARIS)
855 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
863 /* fail safe : never use cpu_single_env outside cpu_exec() */
864 cpu_single_env
= NULL
;
868 /* must only be called from the generated code as an exception can be
870 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
872 /* XXX: cannot enable it yet because it yields to MMU exception
873 where NIP != read address on PowerPC */
875 target_ulong phys_addr
;
876 phys_addr
= get_phys_addr_code(env
, start
);
877 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
881 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
883 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
885 CPUX86State
*saved_env
;
889 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
891 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
892 (selector
<< 4), 0xffff, 0);
894 load_seg(seg_reg
, selector
);
899 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
901 CPUX86State
*saved_env
;
906 helper_fsave((target_ulong
)ptr
, data32
);
911 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
913 CPUX86State
*saved_env
;
918 helper_frstor((target_ulong
)ptr
, data32
);
923 #endif /* TARGET_I386 */
925 #if !defined(CONFIG_SOFTMMU)
927 #if defined(TARGET_I386)
929 /* 'pc' is the host PC at which the exception was raised. 'address' is
930 the effective address of the memory exception. 'is_write' is 1 if a
931 write caused the exception and otherwise 0'. 'old_set' is the
932 signal set which should be restored */
933 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
934 int is_write
, sigset_t
*old_set
,
937 TranslationBlock
*tb
;
941 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
942 #if defined(DEBUG_SIGNAL)
943 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
944 pc
, address
, is_write
, *(unsigned long *)old_set
);
946 /* XXX: locking issue */
947 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
951 /* see if it is an MMU fault */
952 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
,
953 ((env
->hflags
& HF_CPL_MASK
) == 3), 0);
955 return 0; /* not an MMU fault */
957 return 1; /* the MMU fault was handled without causing real CPU fault */
958 /* now we have a real cpu fault */
961 /* the PC is inside the translated code. It means that we have
962 a virtual CPU fault */
963 cpu_restore_state(tb
, env
, pc
, puc
);
967 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
968 env
->eip
, env
->cr
[2], env
->error_code
);
970 /* we restore the process signal mask as the sigreturn should
971 do it (XXX: use sigsetjmp) */
972 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
973 raise_exception_err(env
->exception_index
, env
->error_code
);
975 /* activate soft MMU for this block */
976 env
->hflags
|= HF_SOFTMMU_MASK
;
977 cpu_resume_from_signal(env
, puc
);
979 /* never comes here */
983 #elif defined(TARGET_ARM)
984 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
985 int is_write
, sigset_t
*old_set
,
988 TranslationBlock
*tb
;
992 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
993 #if defined(DEBUG_SIGNAL)
994 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
995 pc
, address
, is_write
, *(unsigned long *)old_set
);
997 /* XXX: locking issue */
998 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1001 /* see if it is an MMU fault */
1002 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1004 return 0; /* not an MMU fault */
1006 return 1; /* the MMU fault was handled without causing real CPU fault */
1007 /* now we have a real cpu fault */
1008 tb
= tb_find_pc(pc
);
1010 /* the PC is inside the translated code. It means that we have
1011 a virtual CPU fault */
1012 cpu_restore_state(tb
, env
, pc
, puc
);
1014 /* we restore the process signal mask as the sigreturn should
1015 do it (XXX: use sigsetjmp) */
1016 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1019 #elif defined(TARGET_SPARC)
1020 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1021 int is_write
, sigset_t
*old_set
,
1024 TranslationBlock
*tb
;
1028 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1029 #if defined(DEBUG_SIGNAL)
1030 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1031 pc
, address
, is_write
, *(unsigned long *)old_set
);
1033 /* XXX: locking issue */
1034 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1037 /* see if it is an MMU fault */
1038 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1040 return 0; /* not an MMU fault */
1042 return 1; /* the MMU fault was handled without causing real CPU fault */
1043 /* now we have a real cpu fault */
1044 tb
= tb_find_pc(pc
);
1046 /* the PC is inside the translated code. It means that we have
1047 a virtual CPU fault */
1048 cpu_restore_state(tb
, env
, pc
, puc
);
1050 /* we restore the process signal mask as the sigreturn should
1051 do it (XXX: use sigsetjmp) */
1052 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1055 #elif defined (TARGET_PPC)
1056 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1057 int is_write
, sigset_t
*old_set
,
1060 TranslationBlock
*tb
;
1064 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1065 #if defined(DEBUG_SIGNAL)
1066 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1067 pc
, address
, is_write
, *(unsigned long *)old_set
);
1069 /* XXX: locking issue */
1070 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1074 /* see if it is an MMU fault */
1075 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, msr_pr
, 0);
1077 return 0; /* not an MMU fault */
1079 return 1; /* the MMU fault was handled without causing real CPU fault */
1081 /* now we have a real cpu fault */
1082 tb
= tb_find_pc(pc
);
1084 /* the PC is inside the translated code. It means that we have
1085 a virtual CPU fault */
1086 cpu_restore_state(tb
, env
, pc
, puc
);
1090 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1091 env
->nip
, env
->error_code
, tb
);
1093 /* we restore the process signal mask as the sigreturn should
1094 do it (XXX: use sigsetjmp) */
1095 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1096 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1098 /* activate soft MMU for this block */
1099 cpu_resume_from_signal(env
, puc
);
1101 /* never comes here */
1105 #elif defined (TARGET_MIPS)
1106 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1107 int is_write
, sigset_t
*old_set
,
1110 TranslationBlock
*tb
;
1114 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1115 #if defined(DEBUG_SIGNAL)
1116 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1117 pc
, address
, is_write
, *(unsigned long *)old_set
);
1119 /* XXX: locking issue */
1120 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1124 /* see if it is an MMU fault */
1125 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1127 return 0; /* not an MMU fault */
1129 return 1; /* the MMU fault was handled without causing real CPU fault */
1131 /* now we have a real cpu fault */
1132 tb
= tb_find_pc(pc
);
1134 /* the PC is inside the translated code. It means that we have
1135 a virtual CPU fault */
1136 cpu_restore_state(tb
, env
, pc
, puc
);
1140 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1141 env
->nip
, env
->error_code
, tb
);
1143 /* we restore the process signal mask as the sigreturn should
1144 do it (XXX: use sigsetjmp) */
1145 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1146 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1148 /* activate soft MMU for this block */
1149 cpu_resume_from_signal(env
, puc
);
1151 /* never comes here */
1155 #elif defined (TARGET_SH4)
1156 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1157 int is_write
, sigset_t
*old_set
,
1160 TranslationBlock
*tb
;
1164 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1165 #if defined(DEBUG_SIGNAL)
1166 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1167 pc
, address
, is_write
, *(unsigned long *)old_set
);
1169 /* XXX: locking issue */
1170 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1174 /* see if it is an MMU fault */
1175 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1177 return 0; /* not an MMU fault */
1179 return 1; /* the MMU fault was handled without causing real CPU fault */
1181 /* now we have a real cpu fault */
1182 tb
= tb_find_pc(pc
);
1184 /* the PC is inside the translated code. It means that we have
1185 a virtual CPU fault */
1186 cpu_restore_state(tb
, env
, pc
, puc
);
1189 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1190 env
->nip
, env
->error_code
, tb
);
1192 /* we restore the process signal mask as the sigreturn should
1193 do it (XXX: use sigsetjmp) */
1194 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1196 /* never comes here */
1200 #error unsupported target CPU
1203 #if defined(__i386__)
1205 #if defined(USE_CODE_COPY)
1206 static void cpu_send_trap(unsigned long pc
, int trap
,
1207 struct ucontext
*uc
)
1209 TranslationBlock
*tb
;
1212 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1213 /* now we have a real cpu fault */
1214 tb
= tb_find_pc(pc
);
1216 /* the PC is inside the translated code. It means that we have
1217 a virtual CPU fault */
1218 cpu_restore_state(tb
, env
, pc
, uc
);
1220 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
1221 raise_exception_err(trap
, env
->error_code
);
1225 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1228 struct ucontext
*uc
= puc
;
1236 #define REG_TRAPNO TRAPNO
1238 pc
= uc
->uc_mcontext
.gregs
[REG_EIP
];
1239 trapno
= uc
->uc_mcontext
.gregs
[REG_TRAPNO
];
1240 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1241 if (trapno
== 0x00 || trapno
== 0x05) {
1242 /* send division by zero or bound exception */
1243 cpu_send_trap(pc
, trapno
, uc
);
1247 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1249 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1250 &uc
->uc_sigmask
, puc
);
1253 #elif defined(__x86_64__)
1255 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1258 struct ucontext
*uc
= puc
;
1261 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1262 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1263 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1264 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1265 &uc
->uc_sigmask
, puc
);
1268 #elif defined(__powerpc__)
1270 /***********************************************************************
1271 * signal context platform-specific definitions
1275 /* All Registers access - only for local access */
1276 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1277 /* Gpr Registers access */
1278 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1279 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1280 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1281 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1282 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1283 # define LR_sig(context) REG_sig(link, context) /* Link register */
1284 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1285 /* Float Registers access */
1286 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1287 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1288 /* Exception Registers access */
1289 # define DAR_sig(context) REG_sig(dar, context)
1290 # define DSISR_sig(context) REG_sig(dsisr, context)
1291 # define TRAP_sig(context) REG_sig(trap, context)
1295 # include <sys/ucontext.h>
1296 typedef struct ucontext SIGCONTEXT
;
1297 /* All Registers access - only for local access */
1298 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1299 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1300 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1301 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1302 /* Gpr Registers access */
1303 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1304 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1305 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1306 # define CTR_sig(context) REG_sig(ctr, context)
1307 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1308 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1309 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1310 /* Float Registers access */
1311 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1312 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1313 /* Exception Registers access */
1314 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1315 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1316 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1317 #endif /* __APPLE__ */
1319 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1322 struct ucontext
*uc
= puc
;
1330 if (DSISR_sig(uc
) & 0x00800000)
1333 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1336 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1337 is_write
, &uc
->uc_sigmask
, puc
);
1340 #elif defined(__alpha__)
1342 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1345 struct ucontext
*uc
= puc
;
1346 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1347 uint32_t insn
= *pc
;
1350 /* XXX: need kernel patch to get write flag faster */
1351 switch (insn
>> 26) {
1366 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1367 is_write
, &uc
->uc_sigmask
, puc
);
1369 #elif defined(__sparc__)
1371 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1374 uint32_t *regs
= (uint32_t *)(info
+ 1);
1375 void *sigmask
= (regs
+ 20);
1380 /* XXX: is there a standard glibc define ? */
1382 /* XXX: need kernel patch to get write flag faster */
1384 insn
= *(uint32_t *)pc
;
1385 if ((insn
>> 30) == 3) {
1386 switch((insn
>> 19) & 0x3f) {
1398 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1399 is_write
, sigmask
, NULL
);
1402 #elif defined(__arm__)
1404 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1407 struct ucontext
*uc
= puc
;
1411 pc
= uc
->uc_mcontext
.gregs
[R15
];
1412 /* XXX: compute is_write */
1414 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1419 #elif defined(__mc68000)
1421 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1424 struct ucontext
*uc
= puc
;
1428 pc
= uc
->uc_mcontext
.gregs
[16];
1429 /* XXX: compute is_write */
1431 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1433 &uc
->uc_sigmask
, puc
);
1436 #elif defined(__ia64)
1439 /* This ought to be in <bits/siginfo.h>... */
1440 # define __ISR_VALID 1
1443 int cpu_signal_handler(int host_signum
, struct siginfo
*info
, void *puc
)
1445 struct ucontext
*uc
= puc
;
1449 ip
= uc
->uc_mcontext
.sc_ip
;
1450 switch (host_signum
) {
1456 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1457 /* ISR.W (write-access) is bit 33: */
1458 is_write
= (info
->si_isr
>> 33) & 1;
1464 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1466 &uc
->uc_sigmask
, puc
);
1469 #elif defined(__s390__)
1471 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
1474 struct ucontext
*uc
= puc
;
1478 pc
= uc
->uc_mcontext
.psw
.addr
;
1479 /* XXX: compute is_write */
1481 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1483 &uc
->uc_sigmask
, puc
);
1488 #error host CPU specific signal handler needed
1492 #endif /* !defined(CONFIG_SOFTMMU) */