2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #if !defined(CONFIG_SOFTMMU)
36 #include <sys/ucontext.h>
41 int tb_invalidated_flag
;
44 //#define DEBUG_SIGNAL
46 /* translation settings */
47 int translation_settings
= 0;
49 #define SAVE_GLOBALS()
50 #define RESTORE_GLOBALS()
52 #if defined(__sparc__) && !defined(HOST_SOLARIS)
54 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
55 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
56 // Work around ugly bugs in glibc that mangle global register contents
58 static volatile void *saved_env
;
59 static volatile unsigned long saved_t0
, saved_i7
;
61 #define SAVE_GLOBALS() do { \
64 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
67 #undef RESTORE_GLOBALS
68 #define RESTORE_GLOBALS() do { \
69 env = (void *)saved_env; \
71 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
74 static int sparc_setjmp(jmp_buf buf
)
84 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
86 static void sparc_longjmp(jmp_buf buf
, int val
)
91 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
95 void cpu_loop_exit(void)
97 /* NOTE: the register at this point must be saved by hand because
98 longjmp restore them */
100 longjmp(env
->jmp_env
, 1);
103 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
107 /* exit the current TB from a signal handler. The host registers are
108 restored in a state compatible with the CPU emulator
110 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
112 #if !defined(CONFIG_SOFTMMU)
113 struct ucontext
*uc
= puc
;
118 /* XXX: restore cpu registers saved in host registers */
120 #if !defined(CONFIG_SOFTMMU)
122 /* XXX: use siglongjmp ? */
123 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
126 longjmp(env
->jmp_env
, 1);
129 CPUTranslationSetting cpu_translation_settings
[] = {
130 { CPU_SETTING_NO_CACHE
, "no-cache",
131 "Do not use translation blocks cache (very slow!)" },
135 void cpu_set_translation_settings(int translation_flags
)
137 translation_settings
= translation_flags
;
140 static int cmp1(const char *s1
, int n
, const char *s2
)
144 return memcmp(s1
, s2
, n
) == 0;
147 /* takes a comma separated list of translation settings. Return 0 if error. */
148 int cpu_str_to_translation_mask(const char *str
)
150 CPUTranslationSetting
*setting
;
160 if(cmp1(p
,p1
-p
,"all")) {
161 for(setting
= cpu_translation_settings
; setting
->mask
!= 0; setting
++) {
162 mask
|= setting
->mask
;
165 for(setting
= cpu_translation_settings
; setting
->mask
!= 0; setting
++) {
166 if (cmp1(p
, p1
- p
, setting
->name
))
172 mask
|= setting
->mask
;
180 static TranslationBlock
*tb_find_slow(target_ulong pc
,
181 target_ulong cs_base
,
184 TranslationBlock
*tb
, **ptb1
;
187 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
192 tb_invalidated_flag
= 0;
194 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
196 /* find translated block using physical mappings */
197 phys_pc
= get_phys_addr_code(env
, pc
);
198 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
200 if (translation_settings
& CPU_SETTING_NO_CACHE
)
203 h
= tb_phys_hash_func(phys_pc
);
204 ptb1
= &tb_phys_hash
[h
];
210 tb
->page_addr
[0] == phys_page1
&&
211 tb
->cs_base
== cs_base
&&
212 tb
->flags
== flags
) {
213 /* check next page if needed */
214 if (tb
->page_addr
[1] != -1) {
215 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
217 phys_page2
= get_phys_addr_code(env
, virt_page2
);
218 if (tb
->page_addr
[1] == phys_page2
)
224 ptb1
= &tb
->phys_hash_next
;
227 /* if no translated code available, then translate it now */
230 /* flush must be done */
232 /* cannot fail at this point */
234 /* don't forget to invalidate previous TB info */
235 tb_invalidated_flag
= 1;
237 tc_ptr
= code_gen_ptr
;
239 tb
->cs_base
= cs_base
;
242 cpu_gen_code(env
, tb
, &code_gen_size
);
244 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
246 /* check next page if needed */
247 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
249 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
250 phys_page2
= get_phys_addr_code(env
, virt_page2
);
252 tb_link_phys(tb
, phys_pc
, phys_page2
);
255 /* we add the TB in the virtual pc hash table */
256 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
257 spin_unlock(&tb_lock
);
261 static inline TranslationBlock
*tb_find_fast(void)
263 TranslationBlock
*tb
;
264 target_ulong cs_base
, pc
;
267 /* we record a subset of the CPU state. It will
268 always be the same before a given translated block
270 #if defined(TARGET_I386)
272 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
273 flags
|= env
->intercept
;
274 cs_base
= env
->segs
[R_CS
].base
;
275 pc
= cs_base
+ env
->eip
;
276 #elif defined(TARGET_ARM)
277 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
278 | (env
->vfp
.vec_stride
<< 4);
279 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
281 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
283 flags
|= (env
->condexec_bits
<< 8);
286 #elif defined(TARGET_SPARC)
287 #ifdef TARGET_SPARC64
288 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
289 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
290 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
292 // FPU enable . Supervisor
293 flags
= (env
->psref
<< 4) | env
->psrs
;
297 #elif defined(TARGET_PPC)
301 #elif defined(TARGET_MIPS)
302 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
304 pc
= env
->PC
[env
->current_tc
];
305 #elif defined(TARGET_M68K)
306 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
307 | (env
->sr
& SR_S
) /* Bit 13 */
308 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
311 #elif defined(TARGET_SH4)
315 #elif defined(TARGET_ALPHA)
319 #elif defined(TARGET_CRIS)
323 #elif defined(TARGET_IA64)
325 cs_base
= 0; /* XXXXX */
328 #error unsupported CPU
330 if (translation_settings
& CPU_SETTING_NO_CACHE
)
333 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
334 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
335 tb
->flags
!= flags
, 0)) {
336 tb
= tb_find_slow(pc
, cs_base
, flags
);
337 /* Note: we do it here to avoid a gcc bug on Mac OS X when
338 doing it in tb_find_slow */
339 if (tb_invalidated_flag
) {
340 /* as some TB could have been invalidated because
341 of memory exceptions while generating the code, we
342 must recompute the hash index here */
349 #define BREAK_CHAIN T0 = 0
351 /* main execution loop */
353 int cpu_exec(CPUState
*env1
)
355 #define DECLARE_HOST_REGS 1
356 #include "hostregs_helper.h"
357 #if defined(TARGET_SPARC)
358 #if defined(reg_REGWPTR)
359 uint32_t *saved_regwptr
;
362 int ret
, interrupt_request
;
363 void (*gen_func
)(void);
364 TranslationBlock
*tb
;
367 if (cpu_halted(env1
) == EXCP_HALTED
)
370 cpu_single_env
= env1
;
372 /* first we save global registers */
373 #define SAVE_HOST_REGS 1
374 #include "hostregs_helper.h"
379 #if defined(TARGET_I386)
380 /* put eflags in CPU temporary format */
381 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
382 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
383 CC_OP
= CC_OP_EFLAGS
;
384 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
385 #elif defined(TARGET_SPARC)
386 #if defined(reg_REGWPTR)
387 saved_regwptr
= REGWPTR
;
389 #elif defined(TARGET_M68K)
390 env
->cc_op
= CC_OP_FLAGS
;
391 env
->cc_dest
= env
->sr
& 0xf;
392 env
->cc_x
= (env
->sr
>> 4) & 1;
393 #elif defined(TARGET_ALPHA)
394 #elif defined(TARGET_ARM)
395 #elif defined(TARGET_PPC)
396 #elif defined(TARGET_MIPS)
397 #elif defined(TARGET_SH4)
398 #elif defined(TARGET_CRIS)
399 #elif defined(TARGET_IA64)
402 #error unsupported target CPU
404 env
->exception_index
= -1;
406 /* prepare setjmp context for exception handling */
408 if (setjmp(env
->jmp_env
) == 0) {
409 env
->current_tb
= NULL
;
410 /* if an exception is pending, we execute it here */
411 if (env
->exception_index
>= 0) {
412 if (env
->exception_index
>= EXCP_INTERRUPT
) {
413 /* exit request from the cpu execution loop */
414 ret
= env
->exception_index
;
416 } else if (env
->user_mode_only
) {
417 /* if user mode only, we simulate a fake exception
418 which will be handled outside the cpu execution
420 #if defined(TARGET_I386)
421 do_interrupt_user(env
->exception_index
,
422 env
->exception_is_int
,
424 env
->exception_next_eip
);
426 ret
= env
->exception_index
;
429 #if defined(TARGET_I386)
430 /* simulate a real cpu exception. On i386, it can
431 trigger new exceptions, but we do not handle
432 double or triple faults yet. */
433 do_interrupt(env
->exception_index
,
434 env
->exception_is_int
,
436 env
->exception_next_eip
, 0);
437 /* successfully delivered */
438 env
->old_exception
= -1;
439 #elif defined(TARGET_PPC)
441 #elif defined(TARGET_MIPS)
443 #elif defined(TARGET_SPARC)
444 do_interrupt(env
->exception_index
);
445 #elif defined(TARGET_ARM)
447 #elif defined(TARGET_SH4)
449 #elif defined(TARGET_ALPHA)
451 #elif defined(TARGET_CRIS)
453 #elif defined(TARGET_M68K)
455 #elif defined(TARGET_IA64)
459 env
->exception_index
= -1;
462 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
464 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
465 ret
= kqemu_cpu_exec(env
);
466 /* put eflags in CPU temporary format */
467 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
468 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
469 CC_OP
= CC_OP_EFLAGS
;
470 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
473 longjmp(env
->jmp_env
, 1);
474 } else if (ret
== 2) {
475 /* softmmu execution needed */
477 if (env
->interrupt_request
!= 0) {
478 /* hardware interrupt will be executed just after */
480 /* otherwise, we restart */
481 longjmp(env
->jmp_env
, 1);
489 longjmp(env
->jmp_env
, 1);
491 T0
= 0; /* force lookup of first TB */
494 interrupt_request
= env
->interrupt_request
;
495 if (__builtin_expect(interrupt_request
, 0)
496 #if defined(TARGET_I386)
497 && env
->hflags
& HF_GIF_MASK
500 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
501 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
502 env
->exception_index
= EXCP_DEBUG
;
505 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
506 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
507 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
508 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
510 env
->exception_index
= EXCP_HLT
;
514 #if defined(TARGET_I386)
515 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
516 !(env
->hflags
& HF_SMM_MASK
)) {
517 svm_check_intercept(SVM_EXIT_SMI
);
518 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
521 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
522 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
523 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
525 svm_check_intercept(SVM_EXIT_INTR
);
526 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
527 intno
= cpu_get_pic_interrupt(env
);
528 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
529 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
531 do_interrupt(intno
, 0, 0, 0, 1);
532 /* ensure that no TB jump will be modified as
533 the program flow was changed */
535 #if !defined(CONFIG_USER_ONLY)
536 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
537 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
539 /* FIXME: this should respect TPR */
540 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
541 svm_check_intercept(SVM_EXIT_VINTR
);
542 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
543 if (loglevel
& CPU_LOG_TB_IN_ASM
)
544 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
545 do_interrupt(intno
, 0, 0, -1, 1);
546 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
547 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
551 #elif defined(TARGET_PPC)
553 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
557 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
558 ppc_hw_interrupt(env
);
559 if (env
->pending_interrupts
== 0)
560 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
563 #elif defined(TARGET_MIPS)
564 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
565 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
566 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
567 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
568 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
569 !(env
->hflags
& MIPS_HFLAG_DM
)) {
571 env
->exception_index
= EXCP_EXT_INTERRUPT
;
576 #elif defined(TARGET_SPARC)
577 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
579 int pil
= env
->interrupt_index
& 15;
580 int type
= env
->interrupt_index
& 0xf0;
582 if (((type
== TT_EXTINT
) &&
583 (pil
== 15 || pil
> env
->psrpil
)) ||
585 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
586 do_interrupt(env
->interrupt_index
);
587 env
->interrupt_index
= 0;
588 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
593 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
594 //do_interrupt(0, 0, 0, 0, 0);
595 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
597 #elif defined(TARGET_ARM)
598 if (interrupt_request
& CPU_INTERRUPT_FIQ
599 && !(env
->uncached_cpsr
& CPSR_F
)) {
600 env
->exception_index
= EXCP_FIQ
;
604 /* ARMv7-M interrupt return works by loading a magic value
605 into the PC. On real hardware the load causes the
606 return to occur. The qemu implementation performs the
607 jump normally, then does the exception return when the
608 CPU tries to execute code at the magic address.
609 This will cause the magic PC value to be pushed to
610 the stack if an interrupt occured at the wrong time.
611 We avoid this by disabling interrupts when
612 pc contains a magic address. */
613 if (interrupt_request
& CPU_INTERRUPT_HARD
614 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
615 || !(env
->uncached_cpsr
& CPSR_I
))) {
616 env
->exception_index
= EXCP_IRQ
;
620 #elif defined(TARGET_SH4)
621 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
625 #elif defined(TARGET_ALPHA)
626 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
630 #elif defined(TARGET_CRIS)
631 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
633 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
636 #elif defined(TARGET_M68K)
637 if (interrupt_request
& CPU_INTERRUPT_HARD
638 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
639 < env
->pending_level
) {
640 /* Real hardware gets the interrupt vector via an
641 IACK cycle at this point. Current emulated
642 hardware doesn't rely on this, so we
643 provide/save the vector when the interrupt is
645 env
->exception_index
= env
->pending_vector
;
650 /* Don't use the cached interupt_request value,
651 do_interrupt may have updated the EXITTB flag. */
652 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
653 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
654 /* ensure that no TB jump will be modified as
655 the program flow was changed */
658 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
659 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
660 env
->exception_index
= EXCP_INTERRUPT
;
665 if ((loglevel
& CPU_LOG_TB_CPU
)) {
666 /* restore flags in standard format */
668 #if defined(TARGET_I386)
669 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
670 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
671 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
672 #elif defined(TARGET_ARM)
673 cpu_dump_state(env
, logfile
, fprintf
, 0);
674 #elif defined(TARGET_SPARC)
675 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
676 env
->regwptr
= REGWPTR
;
677 cpu_dump_state(env
, logfile
, fprintf
, 0);
678 #elif defined(TARGET_PPC)
679 cpu_dump_state(env
, logfile
, fprintf
, 0);
680 #elif defined(TARGET_M68K)
681 cpu_m68k_flush_flags(env
, env
->cc_op
);
682 env
->cc_op
= CC_OP_FLAGS
;
683 env
->sr
= (env
->sr
& 0xffe0)
684 | env
->cc_dest
| (env
->cc_x
<< 4);
685 cpu_dump_state(env
, logfile
, fprintf
, 0);
686 #elif defined(TARGET_MIPS)
687 cpu_dump_state(env
, logfile
, fprintf
, 0);
688 #elif defined(TARGET_SH4)
689 cpu_dump_state(env
, logfile
, fprintf
, 0);
690 #elif defined(TARGET_ALPHA)
691 cpu_dump_state(env
, logfile
, fprintf
, 0);
692 #elif defined(TARGET_CRIS)
693 cpu_dump_state(env
, logfile
, fprintf
, 0);
695 #error unsupported target CPU
701 if ((loglevel
& CPU_LOG_EXEC
)) {
702 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
703 (long)tb
->tc_ptr
, tb
->pc
,
704 lookup_symbol(tb
->pc
));
708 /* see if we can patch the calling TB. When the TB
709 spans two pages, we cannot safely do a direct
714 (env
->kqemu_enabled
!= 2) &&
716 tb
->page_addr
[1] == -1) {
718 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
719 spin_unlock(&tb_lock
);
723 env
->current_tb
= tb
;
724 /* execute the generated code */
725 gen_func
= (void *)tc_ptr
;
726 #if defined(__sparc__)
727 __asm__
__volatile__("call %0\n\t"
731 : "i0", "i1", "i2", "i3", "i4", "i5",
732 "o0", "o1", "o2", "o3", "o4", "o5",
733 "l0", "l1", "l2", "l3", "l4", "l5",
735 #elif defined(__arm__)
736 asm volatile ("mov pc, %0\n\t"
737 ".global exec_loop\n\t"
741 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
742 #elif defined(__ia64)
749 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
750 (*(void (*)(void)) &fp
)();
754 env
->current_tb
= NULL
;
755 /* reset soft MMU for next block (it can currently
756 only be set by a memory fault) */
757 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
758 if (env
->hflags
& HF_SOFTMMU_MASK
) {
759 env
->hflags
&= ~HF_SOFTMMU_MASK
;
760 /* do not allow linking to another block */
764 #if defined(USE_KQEMU)
765 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
766 if (kqemu_is_ok(env
) &&
767 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
778 #if defined(TARGET_I386)
779 /* restore flags in standard format */
780 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
781 #elif defined(TARGET_ARM)
782 /* XXX: Save/restore host fpu exception state?. */
783 #elif defined(TARGET_SPARC)
784 #if defined(reg_REGWPTR)
785 REGWPTR
= saved_regwptr
;
787 #elif defined(TARGET_PPC)
788 #elif defined(TARGET_M68K)
789 cpu_m68k_flush_flags(env
, env
->cc_op
);
790 env
->cc_op
= CC_OP_FLAGS
;
791 env
->sr
= (env
->sr
& 0xffe0)
792 | env
->cc_dest
| (env
->cc_x
<< 4);
793 #elif defined(TARGET_MIPS)
794 #elif defined(TARGET_SH4)
795 #elif defined(TARGET_IA64)
796 #elif defined(TARGET_ALPHA)
797 #elif defined(TARGET_CRIS)
800 #error unsupported target CPU
803 /* restore global registers */
805 #include "hostregs_helper.h"
807 /* fail safe : never use cpu_single_env outside cpu_exec() */
808 cpu_single_env
= NULL
;
812 /* must only be called from the generated code as an exception can be
814 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
816 /* XXX: cannot enable it yet because it yields to MMU exception
817 where NIP != read address on PowerPC */
819 target_ulong phys_addr
;
820 phys_addr
= get_phys_addr_code(env
, start
);
821 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
825 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
827 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
829 CPUX86State
*saved_env
;
833 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
835 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
836 (selector
<< 4), 0xffff, 0);
838 load_seg(seg_reg
, selector
);
843 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
845 CPUX86State
*saved_env
;
850 helper_fsave(ptr
, data32
);
855 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
857 CPUX86State
*saved_env
;
862 helper_frstor(ptr
, data32
);
867 #endif /* TARGET_I386 */
869 #if !defined(CONFIG_SOFTMMU)
871 #if defined(TARGET_I386)
873 /* 'pc' is the host PC at which the exception was raised. 'address' is
874 the effective address of the memory exception. 'is_write' is 1 if a
875 write caused the exception and otherwise 0'. 'old_set' is the
876 signal set which should be restored */
877 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
878 int is_write
, sigset_t
*old_set
,
881 TranslationBlock
*tb
;
885 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
886 #if defined(DEBUG_SIGNAL)
887 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
888 pc
, address
, is_write
, *(unsigned long *)old_set
);
890 /* XXX: locking issue */
891 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
895 /* see if it is an MMU fault */
896 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
898 return 0; /* not an MMU fault */
900 return 1; /* the MMU fault was handled without causing real CPU fault */
901 /* now we have a real cpu fault */
904 /* the PC is inside the translated code. It means that we have
905 a virtual CPU fault */
906 cpu_restore_state(tb
, env
, pc
, puc
);
910 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
911 env
->eip
, env
->cr
[2], env
->error_code
);
913 /* we restore the process signal mask as the sigreturn should
914 do it (XXX: use sigsetjmp) */
915 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
916 raise_exception_err(env
->exception_index
, env
->error_code
);
918 /* activate soft MMU for this block */
919 env
->hflags
|= HF_SOFTMMU_MASK
;
920 cpu_resume_from_signal(env
, puc
);
922 /* never comes here */
926 #elif defined(TARGET_ARM)
927 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
928 int is_write
, sigset_t
*old_set
,
931 TranslationBlock
*tb
;
935 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
936 #if defined(DEBUG_SIGNAL)
937 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
938 pc
, address
, is_write
, *(unsigned long *)old_set
);
940 /* XXX: locking issue */
941 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
944 /* see if it is an MMU fault */
945 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
947 return 0; /* not an MMU fault */
949 return 1; /* the MMU fault was handled without causing real CPU fault */
950 /* now we have a real cpu fault */
953 /* the PC is inside the translated code. It means that we have
954 a virtual CPU fault */
955 cpu_restore_state(tb
, env
, pc
, puc
);
957 /* we restore the process signal mask as the sigreturn should
958 do it (XXX: use sigsetjmp) */
959 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
962 #elif defined(TARGET_SPARC)
963 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
964 int is_write
, sigset_t
*old_set
,
967 TranslationBlock
*tb
;
971 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
972 #if defined(DEBUG_SIGNAL)
973 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
974 pc
, address
, is_write
, *(unsigned long *)old_set
);
976 /* XXX: locking issue */
977 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
980 /* see if it is an MMU fault */
981 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
983 return 0; /* not an MMU fault */
985 return 1; /* the MMU fault was handled without causing real CPU fault */
986 /* now we have a real cpu fault */
989 /* the PC is inside the translated code. It means that we have
990 a virtual CPU fault */
991 cpu_restore_state(tb
, env
, pc
, puc
);
993 /* we restore the process signal mask as the sigreturn should
994 do it (XXX: use sigsetjmp) */
995 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
998 #elif defined (TARGET_PPC)
999 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1000 int is_write
, sigset_t
*old_set
,
1003 TranslationBlock
*tb
;
1007 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1008 #if defined(DEBUG_SIGNAL)
1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1010 pc
, address
, is_write
, *(unsigned long *)old_set
);
1012 /* XXX: locking issue */
1013 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1017 /* see if it is an MMU fault */
1018 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1020 return 0; /* not an MMU fault */
1022 return 1; /* the MMU fault was handled without causing real CPU fault */
1024 /* now we have a real cpu fault */
1025 tb
= tb_find_pc(pc
);
1027 /* the PC is inside the translated code. It means that we have
1028 a virtual CPU fault */
1029 cpu_restore_state(tb
, env
, pc
, puc
);
1033 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1034 env
->nip
, env
->error_code
, tb
);
1036 /* we restore the process signal mask as the sigreturn should
1037 do it (XXX: use sigsetjmp) */
1038 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1039 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1041 /* activate soft MMU for this block */
1042 cpu_resume_from_signal(env
, puc
);
1044 /* never comes here */
1048 #elif defined(TARGET_M68K)
1049 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1050 int is_write
, sigset_t
*old_set
,
1053 TranslationBlock
*tb
;
1057 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1058 #if defined(DEBUG_SIGNAL)
1059 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1060 pc
, address
, is_write
, *(unsigned long *)old_set
);
1062 /* XXX: locking issue */
1063 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1066 /* see if it is an MMU fault */
1067 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1069 return 0; /* not an MMU fault */
1071 return 1; /* the MMU fault was handled without causing real CPU fault */
1072 /* now we have a real cpu fault */
1073 tb
= tb_find_pc(pc
);
1075 /* the PC is inside the translated code. It means that we have
1076 a virtual CPU fault */
1077 cpu_restore_state(tb
, env
, pc
, puc
);
1079 /* we restore the process signal mask as the sigreturn should
1080 do it (XXX: use sigsetjmp) */
1081 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1083 /* never comes here */
1087 #elif defined (TARGET_MIPS)
1088 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1089 int is_write
, sigset_t
*old_set
,
1092 TranslationBlock
*tb
;
1096 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1097 #if defined(DEBUG_SIGNAL)
1098 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1099 pc
, address
, is_write
, *(unsigned long *)old_set
);
1101 /* XXX: locking issue */
1102 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1106 /* see if it is an MMU fault */
1107 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1109 return 0; /* not an MMU fault */
1111 return 1; /* the MMU fault was handled without causing real CPU fault */
1113 /* now we have a real cpu fault */
1114 tb
= tb_find_pc(pc
);
1116 /* the PC is inside the translated code. It means that we have
1117 a virtual CPU fault */
1118 cpu_restore_state(tb
, env
, pc
, puc
);
1122 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1123 env
->PC
, env
->error_code
, tb
);
1125 /* we restore the process signal mask as the sigreturn should
1126 do it (XXX: use sigsetjmp) */
1127 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1128 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1130 /* activate soft MMU for this block */
1131 cpu_resume_from_signal(env
, puc
);
1133 /* never comes here */
1137 #elif defined (TARGET_SH4)
1138 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1139 int is_write
, sigset_t
*old_set
,
1142 TranslationBlock
*tb
;
1146 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1147 #if defined(DEBUG_SIGNAL)
1148 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1149 pc
, address
, is_write
, *(unsigned long *)old_set
);
1151 /* XXX: locking issue */
1152 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1156 /* see if it is an MMU fault */
1157 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1159 return 0; /* not an MMU fault */
1161 return 1; /* the MMU fault was handled without causing real CPU fault */
1163 /* now we have a real cpu fault */
1164 tb
= tb_find_pc(pc
);
1166 /* the PC is inside the translated code. It means that we have
1167 a virtual CPU fault */
1168 cpu_restore_state(tb
, env
, pc
, puc
);
1171 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1172 env
->nip
, env
->error_code
, tb
);
1174 /* we restore the process signal mask as the sigreturn should
1175 do it (XXX: use sigsetjmp) */
1176 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1178 /* never comes here */
1182 #elif defined (TARGET_ALPHA)
1183 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1184 int is_write
, sigset_t
*old_set
,
1187 TranslationBlock
*tb
;
1191 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1192 #if defined(DEBUG_SIGNAL)
1193 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1194 pc
, address
, is_write
, *(unsigned long *)old_set
);
1196 /* XXX: locking issue */
1197 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1201 /* see if it is an MMU fault */
1202 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1204 return 0; /* not an MMU fault */
1206 return 1; /* the MMU fault was handled without causing real CPU fault */
1208 /* now we have a real cpu fault */
1209 tb
= tb_find_pc(pc
);
1211 /* the PC is inside the translated code. It means that we have
1212 a virtual CPU fault */
1213 cpu_restore_state(tb
, env
, pc
, puc
);
1216 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1217 env
->nip
, env
->error_code
, tb
);
1219 /* we restore the process signal mask as the sigreturn should
1220 do it (XXX: use sigsetjmp) */
1221 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1223 /* never comes here */
1226 #elif defined (TARGET_CRIS)
1227 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1228 int is_write
, sigset_t
*old_set
,
1231 TranslationBlock
*tb
;
1235 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1236 #if defined(DEBUG_SIGNAL)
1237 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1238 pc
, address
, is_write
, *(unsigned long *)old_set
);
1240 /* XXX: locking issue */
1241 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1245 /* see if it is an MMU fault */
1246 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1248 return 0; /* not an MMU fault */
1250 return 1; /* the MMU fault was handled without causing real CPU fault */
1252 /* now we have a real cpu fault */
1253 tb
= tb_find_pc(pc
);
1255 /* the PC is inside the translated code. It means that we have
1256 a virtual CPU fault */
1257 cpu_restore_state(tb
, env
, pc
, puc
);
1260 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1261 env
->nip
, env
->error_code
, tb
);
1263 /* we restore the process signal mask as the sigreturn should
1264 do it (XXX: use sigsetjmp) */
1265 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1267 /* never comes here */
1272 #error unsupported target CPU
1275 #if defined(__i386__)
1277 #if defined(__APPLE__)
1278 # include <sys/ucontext.h>
1280 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1281 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1282 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1284 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1285 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1286 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1289 int cpu_signal_handler(int host_signum
, void *pinfo
,
1292 siginfo_t
*info
= pinfo
;
1293 struct ucontext
*uc
= puc
;
1301 #define REG_TRAPNO TRAPNO
1304 trapno
= TRAP_sig(uc
);
1305 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1307 (ERROR_sig(uc
) >> 1) & 1 : 0,
1308 &uc
->uc_sigmask
, puc
);
1311 #elif defined(__x86_64__)
1313 int cpu_signal_handler(int host_signum
, void *pinfo
,
1316 siginfo_t
*info
= pinfo
;
1317 struct ucontext
*uc
= puc
;
1320 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1321 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1322 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1323 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1324 &uc
->uc_sigmask
, puc
);
1327 #elif defined(__powerpc__)
1329 /***********************************************************************
1330 * signal context platform-specific definitions
1334 /* All Registers access - only for local access */
1335 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1336 /* Gpr Registers access */
1337 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1338 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1339 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1340 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1341 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1342 # define LR_sig(context) REG_sig(link, context) /* Link register */
1343 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1344 /* Float Registers access */
1345 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1346 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1347 /* Exception Registers access */
1348 # define DAR_sig(context) REG_sig(dar, context)
1349 # define DSISR_sig(context) REG_sig(dsisr, context)
1350 # define TRAP_sig(context) REG_sig(trap, context)
1354 # include <sys/ucontext.h>
1355 typedef struct ucontext SIGCONTEXT
;
1356 /* All Registers access - only for local access */
1357 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1358 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1359 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1360 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1361 /* Gpr Registers access */
1362 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1363 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1364 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1365 # define CTR_sig(context) REG_sig(ctr, context)
1366 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1367 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1368 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1369 /* Float Registers access */
1370 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1371 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1372 /* Exception Registers access */
1373 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1374 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1375 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1376 #endif /* __APPLE__ */
1378 int cpu_signal_handler(int host_signum
, void *pinfo
,
1381 siginfo_t
*info
= pinfo
;
1382 struct ucontext
*uc
= puc
;
1390 if (DSISR_sig(uc
) & 0x00800000)
1393 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1396 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1397 is_write
, &uc
->uc_sigmask
, puc
);
1400 #elif defined(__alpha__)
1402 int cpu_signal_handler(int host_signum
, void *pinfo
,
1405 siginfo_t
*info
= pinfo
;
1406 struct ucontext
*uc
= puc
;
1407 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1408 uint32_t insn
= *pc
;
1411 /* XXX: need kernel patch to get write flag faster */
1412 switch (insn
>> 26) {
1427 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1428 is_write
, &uc
->uc_sigmask
, puc
);
1430 #elif defined(__sparc__)
1432 int cpu_signal_handler(int host_signum
, void *pinfo
,
1435 siginfo_t
*info
= pinfo
;
1436 uint32_t *regs
= (uint32_t *)(info
+ 1);
1437 void *sigmask
= (regs
+ 20);
1442 /* XXX: is there a standard glibc define ? */
1444 /* XXX: need kernel patch to get write flag faster */
1446 insn
= *(uint32_t *)pc
;
1447 if ((insn
>> 30) == 3) {
1448 switch((insn
>> 19) & 0x3f) {
1460 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1461 is_write
, sigmask
, NULL
);
1464 #elif defined(__arm__)
1466 int cpu_signal_handler(int host_signum
, void *pinfo
,
1469 siginfo_t
*info
= pinfo
;
1470 struct ucontext
*uc
= puc
;
1474 pc
= uc
->uc_mcontext
.gregs
[R15
];
1475 /* XXX: compute is_write */
1477 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1479 &uc
->uc_sigmask
, puc
);
1482 #elif defined(__mc68000)
1484 int cpu_signal_handler(int host_signum
, void *pinfo
,
1487 siginfo_t
*info
= pinfo
;
1488 struct ucontext
*uc
= puc
;
1492 pc
= uc
->uc_mcontext
.gregs
[16];
1493 /* XXX: compute is_write */
1495 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1497 &uc
->uc_sigmask
, puc
);
1500 #elif defined(__ia64)
1503 /* This ought to be in <bits/siginfo.h>... */
1504 # define __ISR_VALID 1
1507 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1509 siginfo_t
*info
= pinfo
;
1510 struct ucontext
*uc
= puc
;
1514 ip
= uc
->uc_mcontext
.sc_ip
;
1515 switch (host_signum
) {
1521 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1522 /* ISR.W (write-access) is bit 33: */
1523 is_write
= (info
->si_isr
>> 33) & 1;
1529 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1531 &uc
->uc_sigmask
, puc
);
1534 #elif defined(__s390__)
1536 int cpu_signal_handler(int host_signum
, void *pinfo
,
1539 siginfo_t
*info
= pinfo
;
1540 struct ucontext
*uc
= puc
;
1544 pc
= uc
->uc_mcontext
.psw
.addr
;
1545 /* XXX: compute is_write */
1547 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1548 is_write
, &uc
->uc_sigmask
, puc
);
1551 #elif defined(__mips__)
1553 int cpu_signal_handler(int host_signum
, void *pinfo
,
1556 siginfo_t
*info
= pinfo
;
1557 struct ucontext
*uc
= puc
;
1558 greg_t pc
= uc
->uc_mcontext
.pc
;
1561 /* XXX: compute is_write */
1563 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1564 is_write
, &uc
->uc_sigmask
, puc
);
1569 #error host CPU specific signal handler needed
1573 #endif /* !defined(CONFIG_SOFTMMU) */