2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
43 #define env cpu_single_env
46 int tb_invalidated_flag
;
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
56 longjmp(env
->jmp_env
, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
82 longjmp(env
->jmp_env
, 1);
85 static TranslationBlock
*tb_find_slow(target_ulong pc
,
89 TranslationBlock
*tb
, **ptb1
;
92 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
95 tb_invalidated_flag
= 0;
97 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
99 /* find translated block using physical mappings */
100 phys_pc
= get_phys_addr_code(env
, pc
);
101 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
103 h
= tb_phys_hash_func(phys_pc
);
104 ptb1
= &tb_phys_hash
[h
];
110 tb
->page_addr
[0] == phys_page1
&&
111 tb
->cs_base
== cs_base
&&
112 tb
->flags
== flags
) {
113 /* check next page if needed */
114 if (tb
->page_addr
[1] != -1) {
115 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
117 phys_page2
= get_phys_addr_code(env
, virt_page2
);
118 if (tb
->page_addr
[1] == phys_page2
)
124 ptb1
= &tb
->phys_hash_next
;
127 /* if no translated code available, then translate it now */
130 /* flush must be done */
132 /* cannot fail at this point */
134 /* don't forget to invalidate previous TB info */
135 tb_invalidated_flag
= 1;
137 tc_ptr
= code_gen_ptr
;
139 tb
->cs_base
= cs_base
;
141 cpu_gen_code(env
, tb
, &code_gen_size
);
142 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
144 /* check next page if needed */
145 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
147 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
148 phys_page2
= get_phys_addr_code(env
, virt_page2
);
150 tb_link_phys(tb
, phys_pc
, phys_page2
);
153 /* we add the TB in the virtual pc hash table */
154 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
158 static inline TranslationBlock
*tb_find_fast(void)
160 TranslationBlock
*tb
;
161 target_ulong cs_base
, pc
;
164 /* we record a subset of the CPU state. It will
165 always be the same before a given translated block
167 #if defined(TARGET_I386)
169 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
170 cs_base
= env
->segs
[R_CS
].base
;
171 pc
= cs_base
+ env
->eip
;
172 #elif defined(TARGET_ARM)
173 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
174 | (env
->vfp
.vec_stride
<< 4);
175 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
177 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
179 flags
|= (env
->condexec_bits
<< 8);
182 #elif defined(TARGET_SPARC)
183 #ifdef TARGET_SPARC64
184 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
185 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
186 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
188 // FPU enable . Supervisor
189 flags
= (env
->psref
<< 4) | env
->psrs
;
193 #elif defined(TARGET_PPC)
197 #elif defined(TARGET_MIPS)
198 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
200 pc
= env
->PC
[env
->current_tc
];
201 #elif defined(TARGET_M68K)
202 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
203 | (env
->sr
& SR_S
) /* Bit 13 */
204 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
207 #elif defined(TARGET_SH4)
211 #elif defined(TARGET_ALPHA)
215 #elif defined(TARGET_CRIS)
216 flags
= env
->pregs
[PR_CCS
] & (P_FLAG
| U_FLAG
| X_FLAG
);
221 #error unsupported CPU
223 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
224 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
225 tb
->flags
!= flags
, 0)) {
226 tb
= tb_find_slow(pc
, cs_base
, flags
);
231 /* main execution loop */
233 int cpu_exec(CPUState
*env1
)
235 #define DECLARE_HOST_REGS 1
236 #include "hostregs_helper.h"
237 int ret
, interrupt_request
;
238 TranslationBlock
*tb
;
240 unsigned long next_tb
;
242 if (cpu_halted(env1
) == EXCP_HALTED
)
245 cpu_single_env
= env1
;
247 /* first we save global registers */
248 #define SAVE_HOST_REGS 1
249 #include "hostregs_helper.h"
253 #if defined(TARGET_I386)
254 /* put eflags in CPU temporary format */
255 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
256 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
257 CC_OP
= CC_OP_EFLAGS
;
258 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
259 #elif defined(TARGET_SPARC)
260 #elif defined(TARGET_M68K)
261 env
->cc_op
= CC_OP_FLAGS
;
262 env
->cc_dest
= env
->sr
& 0xf;
263 env
->cc_x
= (env
->sr
>> 4) & 1;
264 #elif defined(TARGET_ALPHA)
265 #elif defined(TARGET_ARM)
266 #elif defined(TARGET_PPC)
267 #elif defined(TARGET_MIPS)
268 #elif defined(TARGET_SH4)
269 #elif defined(TARGET_CRIS)
272 #error unsupported target CPU
274 env
->exception_index
= -1;
276 /* prepare setjmp context for exception handling */
278 if (setjmp(env
->jmp_env
) == 0) {
279 env
->current_tb
= NULL
;
280 /* if an exception is pending, we execute it here */
281 if (env
->exception_index
>= 0) {
282 if (env
->exception_index
>= EXCP_INTERRUPT
) {
283 /* exit request from the cpu execution loop */
284 ret
= env
->exception_index
;
286 } else if (env
->user_mode_only
) {
287 /* if user mode only, we simulate a fake exception
288 which will be handled outside the cpu execution
290 #if defined(TARGET_I386)
291 do_interrupt_user(env
->exception_index
,
292 env
->exception_is_int
,
294 env
->exception_next_eip
);
295 /* successfully delivered */
296 env
->old_exception
= -1;
298 ret
= env
->exception_index
;
301 #if defined(TARGET_I386)
302 /* simulate a real cpu exception. On i386, it can
303 trigger new exceptions, but we do not handle
304 double or triple faults yet. */
305 do_interrupt(env
->exception_index
,
306 env
->exception_is_int
,
308 env
->exception_next_eip
, 0);
309 /* successfully delivered */
310 env
->old_exception
= -1;
311 #elif defined(TARGET_PPC)
313 #elif defined(TARGET_MIPS)
315 #elif defined(TARGET_SPARC)
317 #elif defined(TARGET_ARM)
319 #elif defined(TARGET_SH4)
321 #elif defined(TARGET_ALPHA)
323 #elif defined(TARGET_CRIS)
325 #elif defined(TARGET_M68K)
329 env
->exception_index
= -1;
332 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
334 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
335 ret
= kqemu_cpu_exec(env
);
336 /* put eflags in CPU temporary format */
337 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
338 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
339 CC_OP
= CC_OP_EFLAGS
;
340 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
343 longjmp(env
->jmp_env
, 1);
344 } else if (ret
== 2) {
345 /* softmmu execution needed */
347 if (env
->interrupt_request
!= 0) {
348 /* hardware interrupt will be executed just after */
350 /* otherwise, we restart */
351 longjmp(env
->jmp_env
, 1);
357 next_tb
= 0; /* force lookup of first TB */
359 interrupt_request
= env
->interrupt_request
;
360 if (__builtin_expect(interrupt_request
, 0) &&
361 likely(!(env
->singlestep_enabled
& SSTEP_NOIRQ
))) {
362 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
363 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
364 env
->exception_index
= EXCP_DEBUG
;
367 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
368 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
369 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
370 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
372 env
->exception_index
= EXCP_HLT
;
376 #if defined(TARGET_I386)
377 if (env
->hflags2
& HF2_GIF_MASK
) {
378 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
379 !(env
->hflags
& HF_SMM_MASK
)) {
380 svm_check_intercept(SVM_EXIT_SMI
);
381 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
384 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
385 !(env
->hflags2
& HF2_NMI_MASK
)) {
386 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
387 env
->hflags2
|= HF2_NMI_MASK
;
388 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
390 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
391 (((env
->hflags2
& HF2_VINTR_MASK
) &&
392 (env
->hflags2
& HF2_HIF_MASK
)) ||
393 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
394 (env
->eflags
& IF_MASK
&&
395 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
397 svm_check_intercept(SVM_EXIT_INTR
);
398 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
399 intno
= cpu_get_pic_interrupt(env
);
400 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
401 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
403 do_interrupt(intno
, 0, 0, 0, 1);
404 /* ensure that no TB jump will be modified as
405 the program flow was changed */
407 #if !defined(CONFIG_USER_ONLY)
408 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
409 (env
->eflags
& IF_MASK
) &&
410 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
412 /* FIXME: this should respect TPR */
413 svm_check_intercept(SVM_EXIT_VINTR
);
414 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
415 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
416 if (loglevel
& CPU_LOG_TB_IN_ASM
)
417 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
418 do_interrupt(intno
, 0, 0, 0, 1);
423 #elif defined(TARGET_PPC)
425 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
429 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
430 ppc_hw_interrupt(env
);
431 if (env
->pending_interrupts
== 0)
432 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
435 #elif defined(TARGET_MIPS)
436 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
437 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
438 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
439 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
440 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
441 !(env
->hflags
& MIPS_HFLAG_DM
)) {
443 env
->exception_index
= EXCP_EXT_INTERRUPT
;
448 #elif defined(TARGET_SPARC)
449 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
451 int pil
= env
->interrupt_index
& 15;
452 int type
= env
->interrupt_index
& 0xf0;
454 if (((type
== TT_EXTINT
) &&
455 (pil
== 15 || pil
> env
->psrpil
)) ||
457 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
458 env
->exception_index
= env
->interrupt_index
;
460 env
->interrupt_index
= 0;
461 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
466 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
467 //do_interrupt(0, 0, 0, 0, 0);
468 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
470 #elif defined(TARGET_ARM)
471 if (interrupt_request
& CPU_INTERRUPT_FIQ
472 && !(env
->uncached_cpsr
& CPSR_F
)) {
473 env
->exception_index
= EXCP_FIQ
;
477 /* ARMv7-M interrupt return works by loading a magic value
478 into the PC. On real hardware the load causes the
479 return to occur. The qemu implementation performs the
480 jump normally, then does the exception return when the
481 CPU tries to execute code at the magic address.
482 This will cause the magic PC value to be pushed to
483 the stack if an interrupt occured at the wrong time.
484 We avoid this by disabling interrupts when
485 pc contains a magic address. */
486 if (interrupt_request
& CPU_INTERRUPT_HARD
487 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
488 || !(env
->uncached_cpsr
& CPSR_I
))) {
489 env
->exception_index
= EXCP_IRQ
;
493 #elif defined(TARGET_SH4)
494 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
498 #elif defined(TARGET_ALPHA)
499 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
503 #elif defined(TARGET_CRIS)
504 if (interrupt_request
& CPU_INTERRUPT_HARD
505 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
506 env
->exception_index
= EXCP_IRQ
;
510 if (interrupt_request
& CPU_INTERRUPT_NMI
511 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
512 env
->exception_index
= EXCP_NMI
;
516 #elif defined(TARGET_M68K)
517 if (interrupt_request
& CPU_INTERRUPT_HARD
518 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
519 < env
->pending_level
) {
520 /* Real hardware gets the interrupt vector via an
521 IACK cycle at this point. Current emulated
522 hardware doesn't rely on this, so we
523 provide/save the vector when the interrupt is
525 env
->exception_index
= env
->pending_vector
;
530 /* Don't use the cached interupt_request value,
531 do_interrupt may have updated the EXITTB flag. */
532 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
533 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
534 /* ensure that no TB jump will be modified as
535 the program flow was changed */
538 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
539 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
540 env
->exception_index
= EXCP_INTERRUPT
;
545 if ((loglevel
& CPU_LOG_TB_CPU
)) {
546 /* restore flags in standard format */
548 #if defined(TARGET_I386)
549 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
550 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
551 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
552 #elif defined(TARGET_ARM)
553 cpu_dump_state(env
, logfile
, fprintf
, 0);
554 #elif defined(TARGET_SPARC)
555 cpu_dump_state(env
, logfile
, fprintf
, 0);
556 #elif defined(TARGET_PPC)
557 cpu_dump_state(env
, logfile
, fprintf
, 0);
558 #elif defined(TARGET_M68K)
559 cpu_m68k_flush_flags(env
, env
->cc_op
);
560 env
->cc_op
= CC_OP_FLAGS
;
561 env
->sr
= (env
->sr
& 0xffe0)
562 | env
->cc_dest
| (env
->cc_x
<< 4);
563 cpu_dump_state(env
, logfile
, fprintf
, 0);
564 #elif defined(TARGET_MIPS)
565 cpu_dump_state(env
, logfile
, fprintf
, 0);
566 #elif defined(TARGET_SH4)
567 cpu_dump_state(env
, logfile
, fprintf
, 0);
568 #elif defined(TARGET_ALPHA)
569 cpu_dump_state(env
, logfile
, fprintf
, 0);
570 #elif defined(TARGET_CRIS)
571 cpu_dump_state(env
, logfile
, fprintf
, 0);
573 #error unsupported target CPU
579 /* Note: we do it here to avoid a gcc bug on Mac OS X when
580 doing it in tb_find_slow */
581 if (tb_invalidated_flag
) {
582 /* as some TB could have been invalidated because
583 of memory exceptions while generating the code, we
584 must recompute the hash index here */
588 if ((loglevel
& CPU_LOG_EXEC
)) {
589 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
590 (long)tb
->tc_ptr
, tb
->pc
,
591 lookup_symbol(tb
->pc
));
594 /* see if we can patch the calling TB. When the TB
595 spans two pages, we cannot safely do a direct
600 (env
->kqemu_enabled
!= 2) &&
602 tb
->page_addr
[1] == -1) {
603 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
606 spin_unlock(&tb_lock
);
608 env
->current_tb
= tb
;
609 /* execute the generated code */
610 #if defined(__sparc__) && !defined(HOST_SOLARIS)
612 env
= cpu_single_env
;
613 #define env cpu_single_env
615 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
616 env
->current_tb
= NULL
;
617 /* reset soft MMU for next block (it can currently
618 only be set by a memory fault) */
619 #if defined(USE_KQEMU)
620 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
621 if (kqemu_is_ok(env
) &&
622 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
633 #if defined(TARGET_I386)
634 /* restore flags in standard format */
635 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
636 #elif defined(TARGET_ARM)
637 /* XXX: Save/restore host fpu exception state?. */
638 #elif defined(TARGET_SPARC)
639 #elif defined(TARGET_PPC)
640 #elif defined(TARGET_M68K)
641 cpu_m68k_flush_flags(env
, env
->cc_op
);
642 env
->cc_op
= CC_OP_FLAGS
;
643 env
->sr
= (env
->sr
& 0xffe0)
644 | env
->cc_dest
| (env
->cc_x
<< 4);
645 #elif defined(TARGET_MIPS)
646 #elif defined(TARGET_SH4)
647 #elif defined(TARGET_ALPHA)
648 #elif defined(TARGET_CRIS)
651 #error unsupported target CPU
654 /* restore global registers */
655 #include "hostregs_helper.h"
657 /* fail safe : never use cpu_single_env outside cpu_exec() */
658 cpu_single_env
= NULL
;
662 /* must only be called from the generated code as an exception can be
664 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
666 /* XXX: cannot enable it yet because it yields to MMU exception
667 where NIP != read address on PowerPC */
669 target_ulong phys_addr
;
670 phys_addr
= get_phys_addr_code(env
, start
);
671 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
675 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
677 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
679 CPUX86State
*saved_env
;
683 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
685 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
686 (selector
<< 4), 0xffff, 0);
688 helper_load_seg(seg_reg
, selector
);
693 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
695 CPUX86State
*saved_env
;
700 helper_fsave(ptr
, data32
);
705 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
707 CPUX86State
*saved_env
;
712 helper_frstor(ptr
, data32
);
717 #endif /* TARGET_I386 */
719 #if !defined(CONFIG_SOFTMMU)
721 #if defined(TARGET_I386)
723 /* 'pc' is the host PC at which the exception was raised. 'address' is
724 the effective address of the memory exception. 'is_write' is 1 if a
725 write caused the exception and otherwise 0'. 'old_set' is the
726 signal set which should be restored */
727 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
728 int is_write
, sigset_t
*old_set
,
731 TranslationBlock
*tb
;
735 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
736 #if defined(DEBUG_SIGNAL)
737 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
738 pc
, address
, is_write
, *(unsigned long *)old_set
);
740 /* XXX: locking issue */
741 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
745 /* see if it is an MMU fault */
746 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
748 return 0; /* not an MMU fault */
750 return 1; /* the MMU fault was handled without causing real CPU fault */
751 /* now we have a real cpu fault */
754 /* the PC is inside the translated code. It means that we have
755 a virtual CPU fault */
756 cpu_restore_state(tb
, env
, pc
, puc
);
760 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
761 env
->eip
, env
->cr
[2], env
->error_code
);
763 /* we restore the process signal mask as the sigreturn should
764 do it (XXX: use sigsetjmp) */
765 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
766 raise_exception_err(env
->exception_index
, env
->error_code
);
768 /* activate soft MMU for this block */
769 env
->hflags
|= HF_SOFTMMU_MASK
;
770 cpu_resume_from_signal(env
, puc
);
772 /* never comes here */
776 #elif defined(TARGET_ARM)
777 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
778 int is_write
, sigset_t
*old_set
,
781 TranslationBlock
*tb
;
785 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
786 #if defined(DEBUG_SIGNAL)
787 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
788 pc
, address
, is_write
, *(unsigned long *)old_set
);
790 /* XXX: locking issue */
791 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
794 /* see if it is an MMU fault */
795 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
797 return 0; /* not an MMU fault */
799 return 1; /* the MMU fault was handled without causing real CPU fault */
800 /* now we have a real cpu fault */
803 /* the PC is inside the translated code. It means that we have
804 a virtual CPU fault */
805 cpu_restore_state(tb
, env
, pc
, puc
);
807 /* we restore the process signal mask as the sigreturn should
808 do it (XXX: use sigsetjmp) */
809 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
811 /* never comes here */
814 #elif defined(TARGET_SPARC)
815 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
816 int is_write
, sigset_t
*old_set
,
819 TranslationBlock
*tb
;
823 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
824 #if defined(DEBUG_SIGNAL)
825 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
826 pc
, address
, is_write
, *(unsigned long *)old_set
);
828 /* XXX: locking issue */
829 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
832 /* see if it is an MMU fault */
833 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
835 return 0; /* not an MMU fault */
837 return 1; /* the MMU fault was handled without causing real CPU fault */
838 /* now we have a real cpu fault */
841 /* the PC is inside the translated code. It means that we have
842 a virtual CPU fault */
843 cpu_restore_state(tb
, env
, pc
, puc
);
845 /* we restore the process signal mask as the sigreturn should
846 do it (XXX: use sigsetjmp) */
847 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
849 /* never comes here */
852 #elif defined (TARGET_PPC)
853 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
854 int is_write
, sigset_t
*old_set
,
857 TranslationBlock
*tb
;
861 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
862 #if defined(DEBUG_SIGNAL)
863 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
864 pc
, address
, is_write
, *(unsigned long *)old_set
);
866 /* XXX: locking issue */
867 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
871 /* see if it is an MMU fault */
872 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
874 return 0; /* not an MMU fault */
876 return 1; /* the MMU fault was handled without causing real CPU fault */
878 /* now we have a real cpu fault */
881 /* the PC is inside the translated code. It means that we have
882 a virtual CPU fault */
883 cpu_restore_state(tb
, env
, pc
, puc
);
887 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
888 env
->nip
, env
->error_code
, tb
);
890 /* we restore the process signal mask as the sigreturn should
891 do it (XXX: use sigsetjmp) */
892 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
893 do_raise_exception_err(env
->exception_index
, env
->error_code
);
895 /* activate soft MMU for this block */
896 cpu_resume_from_signal(env
, puc
);
898 /* never comes here */
902 #elif defined(TARGET_M68K)
903 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
904 int is_write
, sigset_t
*old_set
,
907 TranslationBlock
*tb
;
911 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
912 #if defined(DEBUG_SIGNAL)
913 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
914 pc
, address
, is_write
, *(unsigned long *)old_set
);
916 /* XXX: locking issue */
917 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
920 /* see if it is an MMU fault */
921 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
923 return 0; /* not an MMU fault */
925 return 1; /* the MMU fault was handled without causing real CPU fault */
926 /* now we have a real cpu fault */
929 /* the PC is inside the translated code. It means that we have
930 a virtual CPU fault */
931 cpu_restore_state(tb
, env
, pc
, puc
);
933 /* we restore the process signal mask as the sigreturn should
934 do it (XXX: use sigsetjmp) */
935 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
937 /* never comes here */
941 #elif defined (TARGET_MIPS)
942 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
943 int is_write
, sigset_t
*old_set
,
946 TranslationBlock
*tb
;
950 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
951 #if defined(DEBUG_SIGNAL)
952 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
953 pc
, address
, is_write
, *(unsigned long *)old_set
);
955 /* XXX: locking issue */
956 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
960 /* see if it is an MMU fault */
961 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
963 return 0; /* not an MMU fault */
965 return 1; /* the MMU fault was handled without causing real CPU fault */
967 /* now we have a real cpu fault */
970 /* the PC is inside the translated code. It means that we have
971 a virtual CPU fault */
972 cpu_restore_state(tb
, env
, pc
, puc
);
976 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
977 env
->PC
, env
->error_code
, tb
);
979 /* we restore the process signal mask as the sigreturn should
980 do it (XXX: use sigsetjmp) */
981 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
982 do_raise_exception_err(env
->exception_index
, env
->error_code
);
984 /* activate soft MMU for this block */
985 cpu_resume_from_signal(env
, puc
);
987 /* never comes here */
991 #elif defined (TARGET_SH4)
992 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
993 int is_write
, sigset_t
*old_set
,
996 TranslationBlock
*tb
;
1000 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1001 #if defined(DEBUG_SIGNAL)
1002 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1003 pc
, address
, is_write
, *(unsigned long *)old_set
);
1005 /* XXX: locking issue */
1006 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1010 /* see if it is an MMU fault */
1011 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1013 return 0; /* not an MMU fault */
1015 return 1; /* the MMU fault was handled without causing real CPU fault */
1017 /* now we have a real cpu fault */
1018 tb
= tb_find_pc(pc
);
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb
, env
, pc
, puc
);
1025 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1026 env
->nip
, env
->error_code
, tb
);
1028 /* we restore the process signal mask as the sigreturn should
1029 do it (XXX: use sigsetjmp) */
1030 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1032 /* never comes here */
1036 #elif defined (TARGET_ALPHA)
1037 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1038 int is_write
, sigset_t
*old_set
,
1041 TranslationBlock
*tb
;
1045 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1046 #if defined(DEBUG_SIGNAL)
1047 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1048 pc
, address
, is_write
, *(unsigned long *)old_set
);
1050 /* XXX: locking issue */
1051 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1055 /* see if it is an MMU fault */
1056 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1058 return 0; /* not an MMU fault */
1060 return 1; /* the MMU fault was handled without causing real CPU fault */
1062 /* now we have a real cpu fault */
1063 tb
= tb_find_pc(pc
);
1065 /* the PC is inside the translated code. It means that we have
1066 a virtual CPU fault */
1067 cpu_restore_state(tb
, env
, pc
, puc
);
1070 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1071 env
->nip
, env
->error_code
, tb
);
1073 /* we restore the process signal mask as the sigreturn should
1074 do it (XXX: use sigsetjmp) */
1075 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1077 /* never comes here */
1080 #elif defined (TARGET_CRIS)
1081 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1082 int is_write
, sigset_t
*old_set
,
1085 TranslationBlock
*tb
;
1089 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1090 #if defined(DEBUG_SIGNAL)
1091 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1092 pc
, address
, is_write
, *(unsigned long *)old_set
);
1094 /* XXX: locking issue */
1095 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1099 /* see if it is an MMU fault */
1100 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1102 return 0; /* not an MMU fault */
1104 return 1; /* the MMU fault was handled without causing real CPU fault */
1106 /* now we have a real cpu fault */
1107 tb
= tb_find_pc(pc
);
1109 /* the PC is inside the translated code. It means that we have
1110 a virtual CPU fault */
1111 cpu_restore_state(tb
, env
, pc
, puc
);
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1117 /* never comes here */
1122 #error unsupported target CPU
1125 #if defined(__i386__)
1127 #if defined(__APPLE__)
1128 # include <sys/ucontext.h>
1130 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1131 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1132 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1134 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1135 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1136 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1139 int cpu_signal_handler(int host_signum
, void *pinfo
,
1142 siginfo_t
*info
= pinfo
;
1143 struct ucontext
*uc
= puc
;
1151 #define REG_TRAPNO TRAPNO
1154 trapno
= TRAP_sig(uc
);
1155 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1157 (ERROR_sig(uc
) >> 1) & 1 : 0,
1158 &uc
->uc_sigmask
, puc
);
1161 #elif defined(__x86_64__)
1163 int cpu_signal_handler(int host_signum
, void *pinfo
,
1166 siginfo_t
*info
= pinfo
;
1167 struct ucontext
*uc
= puc
;
1170 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1171 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1172 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1173 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1174 &uc
->uc_sigmask
, puc
);
1177 #elif defined(__powerpc__)
1179 /***********************************************************************
1180 * signal context platform-specific definitions
1184 /* All Registers access - only for local access */
1185 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1186 /* Gpr Registers access */
1187 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1188 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1189 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1190 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1191 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1192 # define LR_sig(context) REG_sig(link, context) /* Link register */
1193 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1194 /* Float Registers access */
1195 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1196 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1197 /* Exception Registers access */
1198 # define DAR_sig(context) REG_sig(dar, context)
1199 # define DSISR_sig(context) REG_sig(dsisr, context)
1200 # define TRAP_sig(context) REG_sig(trap, context)
1204 # include <sys/ucontext.h>
1205 typedef struct ucontext SIGCONTEXT
;
1206 /* All Registers access - only for local access */
1207 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1208 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1209 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1210 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1211 /* Gpr Registers access */
1212 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1213 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1214 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1215 # define CTR_sig(context) REG_sig(ctr, context)
1216 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1217 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1218 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1219 /* Float Registers access */
1220 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1221 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1222 /* Exception Registers access */
1223 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1224 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1225 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1226 #endif /* __APPLE__ */
1228 int cpu_signal_handler(int host_signum
, void *pinfo
,
1231 siginfo_t
*info
= pinfo
;
1232 struct ucontext
*uc
= puc
;
1240 if (DSISR_sig(uc
) & 0x00800000)
1243 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1246 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1247 is_write
, &uc
->uc_sigmask
, puc
);
1250 #elif defined(__alpha__)
1252 int cpu_signal_handler(int host_signum
, void *pinfo
,
1255 siginfo_t
*info
= pinfo
;
1256 struct ucontext
*uc
= puc
;
1257 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1258 uint32_t insn
= *pc
;
1261 /* XXX: need kernel patch to get write flag faster */
1262 switch (insn
>> 26) {
1277 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1278 is_write
, &uc
->uc_sigmask
, puc
);
1280 #elif defined(__sparc__)
1282 int cpu_signal_handler(int host_signum
, void *pinfo
,
1285 siginfo_t
*info
= pinfo
;
1288 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1289 uint32_t *regs
= (uint32_t *)(info
+ 1);
1290 void *sigmask
= (regs
+ 20);
1291 /* XXX: is there a standard glibc define ? */
1292 unsigned long pc
= regs
[1];
1294 struct sigcontext
*sc
= puc
;
1295 unsigned long pc
= sc
->sigc_regs
.tpc
;
1296 void *sigmask
= (void *)sc
->sigc_mask
;
1299 /* XXX: need kernel patch to get write flag faster */
1301 insn
= *(uint32_t *)pc
;
1302 if ((insn
>> 30) == 3) {
1303 switch((insn
>> 19) & 0x3f) {
1315 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1316 is_write
, sigmask
, NULL
);
1319 #elif defined(__arm__)
1321 int cpu_signal_handler(int host_signum
, void *pinfo
,
1324 siginfo_t
*info
= pinfo
;
1325 struct ucontext
*uc
= puc
;
1329 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1330 pc
= uc
->uc_mcontext
.gregs
[R15
];
1332 pc
= uc
->uc_mcontext
.arm_pc
;
1334 /* XXX: compute is_write */
1336 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1338 &uc
->uc_sigmask
, puc
);
1341 #elif defined(__mc68000)
1343 int cpu_signal_handler(int host_signum
, void *pinfo
,
1346 siginfo_t
*info
= pinfo
;
1347 struct ucontext
*uc
= puc
;
1351 pc
= uc
->uc_mcontext
.gregs
[16];
1352 /* XXX: compute is_write */
1354 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1356 &uc
->uc_sigmask
, puc
);
1359 #elif defined(__ia64)
1362 /* This ought to be in <bits/siginfo.h>... */
1363 # define __ISR_VALID 1
1366 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1368 siginfo_t
*info
= pinfo
;
1369 struct ucontext
*uc
= puc
;
1373 ip
= uc
->uc_mcontext
.sc_ip
;
1374 switch (host_signum
) {
1380 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1381 /* ISR.W (write-access) is bit 33: */
1382 is_write
= (info
->si_isr
>> 33) & 1;
1388 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1390 &uc
->uc_sigmask
, puc
);
1393 #elif defined(__s390__)
1395 int cpu_signal_handler(int host_signum
, void *pinfo
,
1398 siginfo_t
*info
= pinfo
;
1399 struct ucontext
*uc
= puc
;
1403 pc
= uc
->uc_mcontext
.psw
.addr
;
1404 /* XXX: compute is_write */
1406 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1407 is_write
, &uc
->uc_sigmask
, puc
);
1410 #elif defined(__mips__)
1412 int cpu_signal_handler(int host_signum
, void *pinfo
,
1415 siginfo_t
*info
= pinfo
;
1416 struct ucontext
*uc
= puc
;
1417 greg_t pc
= uc
->uc_mcontext
.pc
;
1420 /* XXX: compute is_write */
1422 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1423 is_write
, &uc
->uc_sigmask
, puc
);
1426 #elif defined(__hppa__)
1428 int cpu_signal_handler(int host_signum
, void *pinfo
,
1431 struct siginfo
*info
= pinfo
;
1432 struct ucontext
*uc
= puc
;
1436 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1437 /* FIXME: compute is_write */
1439 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1441 &uc
->uc_sigmask
, puc
);
1446 #error host CPU specific signal handler needed
1450 #endif /* !defined(CONFIG_SOFTMMU) */