2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #if !defined(CONFIG_SOFTMMU)
36 #include <sys/ucontext.h>
41 extern int kvm_allowed
;
44 int tb_invalidated_flag
;
47 //#define DEBUG_SIGNAL
49 /* translation settings */
50 int translation_settings
= 0;
52 #define SAVE_GLOBALS()
53 #define RESTORE_GLOBALS()
55 #if defined(__sparc__) && !defined(HOST_SOLARIS)
57 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
58 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
59 // Work around ugly bugs in glibc that mangle global register contents
61 static volatile void *saved_env
;
62 static volatile unsigned long saved_t0
, saved_i7
;
64 #define SAVE_GLOBALS() do { \
67 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
70 #undef RESTORE_GLOBALS
71 #define RESTORE_GLOBALS() do { \
72 env = (void *)saved_env; \
74 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
77 static int sparc_setjmp(jmp_buf buf
)
87 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
89 static void sparc_longjmp(jmp_buf buf
, int val
)
94 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
98 void cpu_loop_exit(void)
100 /* NOTE: the register at this point must be saved by hand because
101 longjmp restore them */
103 longjmp(env
->jmp_env
, 1);
106 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
110 /* exit the current TB from a signal handler. The host registers are
111 restored in a state compatible with the CPU emulator
113 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
115 #if !defined(CONFIG_SOFTMMU)
116 struct ucontext
*uc
= puc
;
121 /* XXX: restore cpu registers saved in host registers */
123 #if !defined(CONFIG_SOFTMMU)
125 /* XXX: use siglongjmp ? */
126 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
129 longjmp(env
->jmp_env
, 1);
132 CPUTranslationSetting cpu_translation_settings
[] = {
133 { CPU_SETTING_NO_CACHE
, "no-cache",
134 "Do not use translation blocks cache (very slow!)" },
138 void cpu_set_translation_settings(int translation_flags
)
140 translation_settings
= translation_flags
;
143 static int cmp1(const char *s1
, int n
, const char *s2
)
147 return memcmp(s1
, s2
, n
) == 0;
150 /* takes a comma separated list of translation settings. Return 0 if error. */
151 int cpu_str_to_translation_mask(const char *str
)
153 CPUTranslationSetting
*setting
;
163 if(cmp1(p
,p1
-p
,"all")) {
164 for(setting
= cpu_translation_settings
; setting
->mask
!= 0; setting
++) {
165 mask
|= setting
->mask
;
168 for(setting
= cpu_translation_settings
; setting
->mask
!= 0; setting
++) {
169 if (cmp1(p
, p1
- p
, setting
->name
))
175 mask
|= setting
->mask
;
183 static TranslationBlock
*tb_find_slow(target_ulong pc
,
184 target_ulong cs_base
,
187 TranslationBlock
*tb
, **ptb1
;
190 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
195 tb_invalidated_flag
= 0;
197 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
199 /* find translated block using physical mappings */
200 phys_pc
= get_phys_addr_code(env
, pc
);
201 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
203 if (translation_settings
& CPU_SETTING_NO_CACHE
)
206 h
= tb_phys_hash_func(phys_pc
);
207 ptb1
= &tb_phys_hash
[h
];
213 tb
->page_addr
[0] == phys_page1
&&
214 tb
->cs_base
== cs_base
&&
215 tb
->flags
== flags
) {
216 /* check next page if needed */
217 if (tb
->page_addr
[1] != -1) {
218 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
220 phys_page2
= get_phys_addr_code(env
, virt_page2
);
221 if (tb
->page_addr
[1] == phys_page2
)
227 ptb1
= &tb
->phys_hash_next
;
230 /* if no translated code available, then translate it now */
233 /* flush must be done */
235 /* cannot fail at this point */
237 /* don't forget to invalidate previous TB info */
238 tb_invalidated_flag
= 1;
240 tc_ptr
= code_gen_ptr
;
242 tb
->cs_base
= cs_base
;
245 cpu_gen_code(env
, tb
, &code_gen_size
);
247 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
249 /* check next page if needed */
250 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
252 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
253 phys_page2
= get_phys_addr_code(env
, virt_page2
);
255 tb_link_phys(tb
, phys_pc
, phys_page2
);
258 /* we add the TB in the virtual pc hash table */
259 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
260 spin_unlock(&tb_lock
);
264 static inline TranslationBlock
*tb_find_fast(void)
266 TranslationBlock
*tb
;
267 target_ulong cs_base
, pc
;
270 /* we record a subset of the CPU state. It will
271 always be the same before a given translated block
273 #if defined(TARGET_I386)
275 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
276 flags
|= env
->intercept
;
277 cs_base
= env
->segs
[R_CS
].base
;
278 pc
= cs_base
+ env
->eip
;
279 #elif defined(TARGET_ARM)
280 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
281 | (env
->vfp
.vec_stride
<< 4);
282 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
284 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
286 flags
|= (env
->condexec_bits
<< 8);
289 #elif defined(TARGET_SPARC)
290 #ifdef TARGET_SPARC64
291 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
292 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
293 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
295 // FPU enable . Supervisor
296 flags
= (env
->psref
<< 4) | env
->psrs
;
300 #elif defined(TARGET_PPC)
304 #elif defined(TARGET_MIPS)
305 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
307 pc
= env
->PC
[env
->current_tc
];
308 #elif defined(TARGET_M68K)
309 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
310 | (env
->sr
& SR_S
) /* Bit 13 */
311 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
314 #elif defined(TARGET_SH4)
318 #elif defined(TARGET_ALPHA)
322 #elif defined(TARGET_CRIS)
326 #elif defined(TARGET_IA64)
328 cs_base
= 0; /* XXXXX */
331 #error unsupported CPU
333 if (translation_settings
& CPU_SETTING_NO_CACHE
)
336 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
337 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
338 tb
->flags
!= flags
, 0)) {
339 tb
= tb_find_slow(pc
, cs_base
, flags
);
340 /* Note: we do it here to avoid a gcc bug on Mac OS X when
341 doing it in tb_find_slow */
342 if (tb_invalidated_flag
) {
343 /* as some TB could have been invalidated because
344 of memory exceptions while generating the code, we
345 must recompute the hash index here */
352 #define BREAK_CHAIN T0 = 0
354 /* main execution loop */
356 int cpu_exec(CPUState
*env1
)
358 #define DECLARE_HOST_REGS 1
359 #include "hostregs_helper.h"
360 #if defined(TARGET_SPARC)
361 #if defined(reg_REGWPTR)
362 uint32_t *saved_regwptr
;
365 int ret
, interrupt_request
;
366 void (*gen_func
)(void);
367 TranslationBlock
*tb
;
370 if (cpu_halted(env1
) == EXCP_HALTED
)
373 cpu_single_env
= env1
;
375 /* first we save global registers */
376 #define SAVE_HOST_REGS 1
377 #include "hostregs_helper.h"
382 #if defined(TARGET_I386)
383 /* put eflags in CPU temporary format */
384 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
385 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
386 CC_OP
= CC_OP_EFLAGS
;
387 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
388 #elif defined(TARGET_SPARC)
389 #if defined(reg_REGWPTR)
390 saved_regwptr
= REGWPTR
;
392 #elif defined(TARGET_M68K)
393 env
->cc_op
= CC_OP_FLAGS
;
394 env
->cc_dest
= env
->sr
& 0xf;
395 env
->cc_x
= (env
->sr
>> 4) & 1;
396 #elif defined(TARGET_ALPHA)
397 #elif defined(TARGET_ARM)
398 #elif defined(TARGET_PPC)
399 #elif defined(TARGET_MIPS)
400 #elif defined(TARGET_SH4)
401 #elif defined(TARGET_CRIS)
402 #elif defined(TARGET_IA64)
405 #error unsupported target CPU
407 env
->exception_index
= -1;
409 /* prepare setjmp context for exception handling */
411 if (setjmp(env
->jmp_env
) == 0) {
412 env
->current_tb
= NULL
;
413 /* if an exception is pending, we execute it here */
414 if (env
->exception_index
>= 0) {
415 if (env
->exception_index
>= EXCP_INTERRUPT
) {
416 /* exit request from the cpu execution loop */
417 ret
= env
->exception_index
;
419 } else if (env
->user_mode_only
) {
420 /* if user mode only, we simulate a fake exception
421 which will be handled outside the cpu execution
423 #if defined(TARGET_I386)
424 do_interrupt_user(env
->exception_index
,
425 env
->exception_is_int
,
427 env
->exception_next_eip
);
429 ret
= env
->exception_index
;
432 #if defined(TARGET_I386)
433 /* simulate a real cpu exception. On i386, it can
434 trigger new exceptions, but we do not handle
435 double or triple faults yet. */
436 do_interrupt(env
->exception_index
,
437 env
->exception_is_int
,
439 env
->exception_next_eip
, 0);
440 /* successfully delivered */
441 env
->old_exception
= -1;
442 #elif defined(TARGET_PPC)
444 #elif defined(TARGET_MIPS)
446 #elif defined(TARGET_SPARC)
447 do_interrupt(env
->exception_index
);
448 #elif defined(TARGET_ARM)
450 #elif defined(TARGET_SH4)
452 #elif defined(TARGET_ALPHA)
454 #elif defined(TARGET_CRIS)
456 #elif defined(TARGET_M68K)
458 #elif defined(TARGET_IA64)
462 env
->exception_index
= -1;
465 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
467 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
468 ret
= kqemu_cpu_exec(env
);
469 /* put eflags in CPU temporary format */
470 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
471 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
472 CC_OP
= CC_OP_EFLAGS
;
473 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
476 longjmp(env
->jmp_env
, 1);
477 } else if (ret
== 2) {
478 /* softmmu execution needed */
480 if (env
->interrupt_request
!= 0) {
481 /* hardware interrupt will be executed just after */
483 /* otherwise, we restart */
484 longjmp(env
->jmp_env
, 1);
493 longjmp(env
->jmp_env
, 1);
496 T0
= 0; /* force lookup of first TB */
499 interrupt_request
= env
->interrupt_request
;
500 if (__builtin_expect(interrupt_request
, 0)
501 #if defined(TARGET_I386)
502 && env
->hflags
& HF_GIF_MASK
505 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
506 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
507 env
->exception_index
= EXCP_DEBUG
;
510 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
511 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
512 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
513 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
515 env
->exception_index
= EXCP_HLT
;
519 #if defined(TARGET_I386)
520 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
521 !(env
->hflags
& HF_SMM_MASK
)) {
522 svm_check_intercept(SVM_EXIT_SMI
);
523 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
526 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
527 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
528 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
530 svm_check_intercept(SVM_EXIT_INTR
);
531 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
532 intno
= cpu_get_pic_interrupt(env
);
533 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
534 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
536 do_interrupt(intno
, 0, 0, 0, 1);
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
540 #if !defined(CONFIG_USER_ONLY)
541 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
542 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
544 /* FIXME: this should respect TPR */
545 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
546 svm_check_intercept(SVM_EXIT_VINTR
);
547 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
548 if (loglevel
& CPU_LOG_TB_IN_ASM
)
549 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
550 do_interrupt(intno
, 0, 0, -1, 1);
551 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
552 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
556 #elif defined(TARGET_PPC)
558 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
562 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
563 ppc_hw_interrupt(env
);
564 if (env
->pending_interrupts
== 0)
565 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
568 #elif defined(TARGET_MIPS)
569 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
570 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
571 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
572 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
573 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
574 !(env
->hflags
& MIPS_HFLAG_DM
)) {
576 env
->exception_index
= EXCP_EXT_INTERRUPT
;
581 #elif defined(TARGET_SPARC)
582 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
584 int pil
= env
->interrupt_index
& 15;
585 int type
= env
->interrupt_index
& 0xf0;
587 if (((type
== TT_EXTINT
) &&
588 (pil
== 15 || pil
> env
->psrpil
)) ||
590 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
591 do_interrupt(env
->interrupt_index
);
592 env
->interrupt_index
= 0;
593 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
598 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
599 //do_interrupt(0, 0, 0, 0, 0);
600 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
602 #elif defined(TARGET_ARM)
603 if (interrupt_request
& CPU_INTERRUPT_FIQ
604 && !(env
->uncached_cpsr
& CPSR_F
)) {
605 env
->exception_index
= EXCP_FIQ
;
609 /* ARMv7-M interrupt return works by loading a magic value
610 into the PC. On real hardware the load causes the
611 return to occur. The qemu implementation performs the
612 jump normally, then does the exception return when the
613 CPU tries to execute code at the magic address.
614 This will cause the magic PC value to be pushed to
615 the stack if an interrupt occured at the wrong time.
616 We avoid this by disabling interrupts when
617 pc contains a magic address. */
618 if (interrupt_request
& CPU_INTERRUPT_HARD
619 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
620 || !(env
->uncached_cpsr
& CPSR_I
))) {
621 env
->exception_index
= EXCP_IRQ
;
625 #elif defined(TARGET_SH4)
626 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
630 #elif defined(TARGET_ALPHA)
631 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
635 #elif defined(TARGET_CRIS)
636 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
638 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
641 #elif defined(TARGET_M68K)
642 if (interrupt_request
& CPU_INTERRUPT_HARD
643 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
644 < env
->pending_level
) {
645 /* Real hardware gets the interrupt vector via an
646 IACK cycle at this point. Current emulated
647 hardware doesn't rely on this, so we
648 provide/save the vector when the interrupt is
650 env
->exception_index
= env
->pending_vector
;
655 /* Don't use the cached interupt_request value,
656 do_interrupt may have updated the EXITTB flag. */
657 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
658 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
659 /* ensure that no TB jump will be modified as
660 the program flow was changed */
663 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
664 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
665 env
->exception_index
= EXCP_INTERRUPT
;
670 if ((loglevel
& CPU_LOG_TB_CPU
)) {
671 /* restore flags in standard format */
673 #if defined(TARGET_I386)
674 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
675 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
676 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
677 #elif defined(TARGET_ARM)
678 cpu_dump_state(env
, logfile
, fprintf
, 0);
679 #elif defined(TARGET_SPARC)
680 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
681 env
->regwptr
= REGWPTR
;
682 cpu_dump_state(env
, logfile
, fprintf
, 0);
683 #elif defined(TARGET_PPC)
684 cpu_dump_state(env
, logfile
, fprintf
, 0);
685 #elif defined(TARGET_M68K)
686 cpu_m68k_flush_flags(env
, env
->cc_op
);
687 env
->cc_op
= CC_OP_FLAGS
;
688 env
->sr
= (env
->sr
& 0xffe0)
689 | env
->cc_dest
| (env
->cc_x
<< 4);
690 cpu_dump_state(env
, logfile
, fprintf
, 0);
691 #elif defined(TARGET_MIPS)
692 cpu_dump_state(env
, logfile
, fprintf
, 0);
693 #elif defined(TARGET_SH4)
694 cpu_dump_state(env
, logfile
, fprintf
, 0);
695 #elif defined(TARGET_ALPHA)
696 cpu_dump_state(env
, logfile
, fprintf
, 0);
697 #elif defined(TARGET_CRIS)
698 cpu_dump_state(env
, logfile
, fprintf
, 0);
700 #error unsupported target CPU
706 if ((loglevel
& CPU_LOG_EXEC
)) {
707 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
708 (long)tb
->tc_ptr
, tb
->pc
,
709 lookup_symbol(tb
->pc
));
713 /* see if we can patch the calling TB. When the TB
714 spans two pages, we cannot safely do a direct
719 (env
->kqemu_enabled
!= 2) &&
721 tb
->page_addr
[1] == -1) {
723 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
724 spin_unlock(&tb_lock
);
728 env
->current_tb
= tb
;
729 /* execute the generated code */
730 gen_func
= (void *)tc_ptr
;
731 #if defined(__sparc__)
732 __asm__
__volatile__("call %0\n\t"
736 : "i0", "i1", "i2", "i3", "i4", "i5",
737 "o0", "o1", "o2", "o3", "o4", "o5",
738 "l0", "l1", "l2", "l3", "l4", "l5",
740 #elif defined(__arm__)
741 asm volatile ("mov pc, %0\n\t"
742 ".global exec_loop\n\t"
746 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
747 #elif defined(__ia64)
754 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
755 (*(void (*)(void)) &fp
)();
759 env
->current_tb
= NULL
;
760 /* reset soft MMU for next block (it can currently
761 only be set by a memory fault) */
762 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
763 if (env
->hflags
& HF_SOFTMMU_MASK
) {
764 env
->hflags
&= ~HF_SOFTMMU_MASK
;
765 /* do not allow linking to another block */
769 #if defined(USE_KQEMU)
770 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
771 if (kqemu_is_ok(env
) &&
772 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
783 #if defined(TARGET_I386)
784 /* restore flags in standard format */
785 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
786 #elif defined(TARGET_ARM)
787 /* XXX: Save/restore host fpu exception state?. */
788 #elif defined(TARGET_SPARC)
789 #if defined(reg_REGWPTR)
790 REGWPTR
= saved_regwptr
;
792 #elif defined(TARGET_PPC)
793 #elif defined(TARGET_M68K)
794 cpu_m68k_flush_flags(env
, env
->cc_op
);
795 env
->cc_op
= CC_OP_FLAGS
;
796 env
->sr
= (env
->sr
& 0xffe0)
797 | env
->cc_dest
| (env
->cc_x
<< 4);
798 #elif defined(TARGET_MIPS)
799 #elif defined(TARGET_SH4)
800 #elif defined(TARGET_IA64)
801 #elif defined(TARGET_ALPHA)
802 #elif defined(TARGET_CRIS)
805 #error unsupported target CPU
808 /* restore global registers */
810 #include "hostregs_helper.h"
812 /* fail safe : never use cpu_single_env outside cpu_exec() */
813 cpu_single_env
= NULL
;
817 /* must only be called from the generated code as an exception can be
819 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
821 /* XXX: cannot enable it yet because it yields to MMU exception
822 where NIP != read address on PowerPC */
824 target_ulong phys_addr
;
825 phys_addr
= get_phys_addr_code(env
, start
);
826 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
830 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
832 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
834 CPUX86State
*saved_env
;
838 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
840 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
841 (selector
<< 4), 0xffff, 0);
843 load_seg(seg_reg
, selector
);
848 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
850 CPUX86State
*saved_env
;
855 helper_fsave(ptr
, data32
);
860 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
862 CPUX86State
*saved_env
;
867 helper_frstor(ptr
, data32
);
872 #endif /* TARGET_I386 */
874 #if !defined(CONFIG_SOFTMMU)
876 #if defined(TARGET_I386)
878 /* 'pc' is the host PC at which the exception was raised. 'address' is
879 the effective address of the memory exception. 'is_write' is 1 if a
880 write caused the exception and otherwise 0'. 'old_set' is the
881 signal set which should be restored */
882 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
883 int is_write
, sigset_t
*old_set
,
886 TranslationBlock
*tb
;
890 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
891 #if defined(DEBUG_SIGNAL)
892 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
893 pc
, address
, is_write
, *(unsigned long *)old_set
);
895 /* XXX: locking issue */
896 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
900 /* see if it is an MMU fault */
901 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
903 return 0; /* not an MMU fault */
905 return 1; /* the MMU fault was handled without causing real CPU fault */
906 /* now we have a real cpu fault */
909 /* the PC is inside the translated code. It means that we have
910 a virtual CPU fault */
911 cpu_restore_state(tb
, env
, pc
, puc
);
915 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
916 env
->eip
, env
->cr
[2], env
->error_code
);
918 /* we restore the process signal mask as the sigreturn should
919 do it (XXX: use sigsetjmp) */
920 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
921 raise_exception_err(env
->exception_index
, env
->error_code
);
923 /* activate soft MMU for this block */
924 env
->hflags
|= HF_SOFTMMU_MASK
;
925 cpu_resume_from_signal(env
, puc
);
927 /* never comes here */
931 #elif defined(TARGET_ARM)
932 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
933 int is_write
, sigset_t
*old_set
,
936 TranslationBlock
*tb
;
940 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
941 #if defined(DEBUG_SIGNAL)
942 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
943 pc
, address
, is_write
, *(unsigned long *)old_set
);
945 /* XXX: locking issue */
946 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
949 /* see if it is an MMU fault */
950 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
952 return 0; /* not an MMU fault */
954 return 1; /* the MMU fault was handled without causing real CPU fault */
955 /* now we have a real cpu fault */
958 /* the PC is inside the translated code. It means that we have
959 a virtual CPU fault */
960 cpu_restore_state(tb
, env
, pc
, puc
);
962 /* we restore the process signal mask as the sigreturn should
963 do it (XXX: use sigsetjmp) */
964 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
967 #elif defined(TARGET_SPARC)
968 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
969 int is_write
, sigset_t
*old_set
,
972 TranslationBlock
*tb
;
976 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
977 #if defined(DEBUG_SIGNAL)
978 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
979 pc
, address
, is_write
, *(unsigned long *)old_set
);
981 /* XXX: locking issue */
982 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
985 /* see if it is an MMU fault */
986 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
988 return 0; /* not an MMU fault */
990 return 1; /* the MMU fault was handled without causing real CPU fault */
991 /* now we have a real cpu fault */
994 /* the PC is inside the translated code. It means that we have
995 a virtual CPU fault */
996 cpu_restore_state(tb
, env
, pc
, puc
);
998 /* we restore the process signal mask as the sigreturn should
999 do it (XXX: use sigsetjmp) */
1000 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1003 #elif defined (TARGET_PPC)
1004 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1005 int is_write
, sigset_t
*old_set
,
1008 TranslationBlock
*tb
;
1012 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1013 #if defined(DEBUG_SIGNAL)
1014 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1015 pc
, address
, is_write
, *(unsigned long *)old_set
);
1017 /* XXX: locking issue */
1018 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1022 /* see if it is an MMU fault */
1023 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1025 return 0; /* not an MMU fault */
1027 return 1; /* the MMU fault was handled without causing real CPU fault */
1029 /* now we have a real cpu fault */
1030 tb
= tb_find_pc(pc
);
1032 /* the PC is inside the translated code. It means that we have
1033 a virtual CPU fault */
1034 cpu_restore_state(tb
, env
, pc
, puc
);
1038 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1039 env
->nip
, env
->error_code
, tb
);
1041 /* we restore the process signal mask as the sigreturn should
1042 do it (XXX: use sigsetjmp) */
1043 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1044 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1046 /* activate soft MMU for this block */
1047 cpu_resume_from_signal(env
, puc
);
1049 /* never comes here */
1053 #elif defined(TARGET_M68K)
1054 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1055 int is_write
, sigset_t
*old_set
,
1058 TranslationBlock
*tb
;
1062 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1063 #if defined(DEBUG_SIGNAL)
1064 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1065 pc
, address
, is_write
, *(unsigned long *)old_set
);
1067 /* XXX: locking issue */
1068 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1071 /* see if it is an MMU fault */
1072 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1074 return 0; /* not an MMU fault */
1076 return 1; /* the MMU fault was handled without causing real CPU fault */
1077 /* now we have a real cpu fault */
1078 tb
= tb_find_pc(pc
);
1080 /* the PC is inside the translated code. It means that we have
1081 a virtual CPU fault */
1082 cpu_restore_state(tb
, env
, pc
, puc
);
1084 /* we restore the process signal mask as the sigreturn should
1085 do it (XXX: use sigsetjmp) */
1086 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1088 /* never comes here */
1092 #elif defined (TARGET_MIPS)
1093 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1094 int is_write
, sigset_t
*old_set
,
1097 TranslationBlock
*tb
;
1101 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1102 #if defined(DEBUG_SIGNAL)
1103 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1104 pc
, address
, is_write
, *(unsigned long *)old_set
);
1106 /* XXX: locking issue */
1107 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1111 /* see if it is an MMU fault */
1112 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1114 return 0; /* not an MMU fault */
1116 return 1; /* the MMU fault was handled without causing real CPU fault */
1118 /* now we have a real cpu fault */
1119 tb
= tb_find_pc(pc
);
1121 /* the PC is inside the translated code. It means that we have
1122 a virtual CPU fault */
1123 cpu_restore_state(tb
, env
, pc
, puc
);
1127 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1128 env
->PC
, env
->error_code
, tb
);
1130 /* we restore the process signal mask as the sigreturn should
1131 do it (XXX: use sigsetjmp) */
1132 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1133 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1135 /* activate soft MMU for this block */
1136 cpu_resume_from_signal(env
, puc
);
1138 /* never comes here */
1142 #elif defined (TARGET_SH4)
1143 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1144 int is_write
, sigset_t
*old_set
,
1147 TranslationBlock
*tb
;
1151 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1152 #if defined(DEBUG_SIGNAL)
1153 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1154 pc
, address
, is_write
, *(unsigned long *)old_set
);
1156 /* XXX: locking issue */
1157 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1161 /* see if it is an MMU fault */
1162 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1164 return 0; /* not an MMU fault */
1166 return 1; /* the MMU fault was handled without causing real CPU fault */
1168 /* now we have a real cpu fault */
1169 tb
= tb_find_pc(pc
);
1171 /* the PC is inside the translated code. It means that we have
1172 a virtual CPU fault */
1173 cpu_restore_state(tb
, env
, pc
, puc
);
1176 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1177 env
->nip
, env
->error_code
, tb
);
1179 /* we restore the process signal mask as the sigreturn should
1180 do it (XXX: use sigsetjmp) */
1181 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1183 /* never comes here */
1187 #elif defined (TARGET_ALPHA)
1188 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1189 int is_write
, sigset_t
*old_set
,
1192 TranslationBlock
*tb
;
1196 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1197 #if defined(DEBUG_SIGNAL)
1198 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1199 pc
, address
, is_write
, *(unsigned long *)old_set
);
1201 /* XXX: locking issue */
1202 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1206 /* see if it is an MMU fault */
1207 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1209 return 0; /* not an MMU fault */
1211 return 1; /* the MMU fault was handled without causing real CPU fault */
1213 /* now we have a real cpu fault */
1214 tb
= tb_find_pc(pc
);
1216 /* the PC is inside the translated code. It means that we have
1217 a virtual CPU fault */
1218 cpu_restore_state(tb
, env
, pc
, puc
);
1221 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1222 env
->nip
, env
->error_code
, tb
);
1224 /* we restore the process signal mask as the sigreturn should
1225 do it (XXX: use sigsetjmp) */
1226 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1228 /* never comes here */
1231 #elif defined (TARGET_CRIS)
1232 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1233 int is_write
, sigset_t
*old_set
,
1236 TranslationBlock
*tb
;
1240 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1241 #if defined(DEBUG_SIGNAL)
1242 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1243 pc
, address
, is_write
, *(unsigned long *)old_set
);
1245 /* XXX: locking issue */
1246 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1250 /* see if it is an MMU fault */
1251 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1253 return 0; /* not an MMU fault */
1255 return 1; /* the MMU fault was handled without causing real CPU fault */
1257 /* now we have a real cpu fault */
1258 tb
= tb_find_pc(pc
);
1260 /* the PC is inside the translated code. It means that we have
1261 a virtual CPU fault */
1262 cpu_restore_state(tb
, env
, pc
, puc
);
1265 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1266 env
->nip
, env
->error_code
, tb
);
1268 /* we restore the process signal mask as the sigreturn should
1269 do it (XXX: use sigsetjmp) */
1270 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1272 /* never comes here */
1277 #error unsupported target CPU
1280 #if defined(__i386__)
1282 #if defined(__APPLE__)
1283 # include <sys/ucontext.h>
1285 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1286 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1287 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1289 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1290 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1291 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1294 int cpu_signal_handler(int host_signum
, void *pinfo
,
1297 siginfo_t
*info
= pinfo
;
1298 struct ucontext
*uc
= puc
;
1306 #define REG_TRAPNO TRAPNO
1309 trapno
= TRAP_sig(uc
);
1310 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1312 (ERROR_sig(uc
) >> 1) & 1 : 0,
1313 &uc
->uc_sigmask
, puc
);
1316 #elif defined(__x86_64__)
1318 int cpu_signal_handler(int host_signum
, void *pinfo
,
1321 siginfo_t
*info
= pinfo
;
1322 struct ucontext
*uc
= puc
;
1325 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1326 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1327 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1328 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1329 &uc
->uc_sigmask
, puc
);
1332 #elif defined(__powerpc__)
1334 /***********************************************************************
1335 * signal context platform-specific definitions
1339 /* All Registers access - only for local access */
1340 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1341 /* Gpr Registers access */
1342 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1343 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1344 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1345 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1346 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1347 # define LR_sig(context) REG_sig(link, context) /* Link register */
1348 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1349 /* Float Registers access */
1350 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1351 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1352 /* Exception Registers access */
1353 # define DAR_sig(context) REG_sig(dar, context)
1354 # define DSISR_sig(context) REG_sig(dsisr, context)
1355 # define TRAP_sig(context) REG_sig(trap, context)
1359 # include <sys/ucontext.h>
1360 typedef struct ucontext SIGCONTEXT
;
1361 /* All Registers access - only for local access */
1362 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1363 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1364 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1365 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1366 /* Gpr Registers access */
1367 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1368 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1369 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1370 # define CTR_sig(context) REG_sig(ctr, context)
1371 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1372 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1373 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1374 /* Float Registers access */
1375 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1376 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1377 /* Exception Registers access */
1378 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1379 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1380 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1381 #endif /* __APPLE__ */
1383 int cpu_signal_handler(int host_signum
, void *pinfo
,
1386 siginfo_t
*info
= pinfo
;
1387 struct ucontext
*uc
= puc
;
1395 if (DSISR_sig(uc
) & 0x00800000)
1398 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1401 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1402 is_write
, &uc
->uc_sigmask
, puc
);
1405 #elif defined(__alpha__)
1407 int cpu_signal_handler(int host_signum
, void *pinfo
,
1410 siginfo_t
*info
= pinfo
;
1411 struct ucontext
*uc
= puc
;
1412 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1413 uint32_t insn
= *pc
;
1416 /* XXX: need kernel patch to get write flag faster */
1417 switch (insn
>> 26) {
1432 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1433 is_write
, &uc
->uc_sigmask
, puc
);
1435 #elif defined(__sparc__)
1437 int cpu_signal_handler(int host_signum
, void *pinfo
,
1440 siginfo_t
*info
= pinfo
;
1441 uint32_t *regs
= (uint32_t *)(info
+ 1);
1442 void *sigmask
= (regs
+ 20);
1447 /* XXX: is there a standard glibc define ? */
1449 /* XXX: need kernel patch to get write flag faster */
1451 insn
= *(uint32_t *)pc
;
1452 if ((insn
>> 30) == 3) {
1453 switch((insn
>> 19) & 0x3f) {
1465 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1466 is_write
, sigmask
, NULL
);
1469 #elif defined(__arm__)
1471 int cpu_signal_handler(int host_signum
, void *pinfo
,
1474 siginfo_t
*info
= pinfo
;
1475 struct ucontext
*uc
= puc
;
1479 pc
= uc
->uc_mcontext
.gregs
[R15
];
1480 /* XXX: compute is_write */
1482 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1484 &uc
->uc_sigmask
, puc
);
1487 #elif defined(__mc68000)
1489 int cpu_signal_handler(int host_signum
, void *pinfo
,
1492 siginfo_t
*info
= pinfo
;
1493 struct ucontext
*uc
= puc
;
1497 pc
= uc
->uc_mcontext
.gregs
[16];
1498 /* XXX: compute is_write */
1500 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1502 &uc
->uc_sigmask
, puc
);
1505 #elif defined(__ia64)
1508 /* This ought to be in <bits/siginfo.h>... */
1509 # define __ISR_VALID 1
1512 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1514 siginfo_t
*info
= pinfo
;
1515 struct ucontext
*uc
= puc
;
1519 ip
= uc
->uc_mcontext
.sc_ip
;
1520 switch (host_signum
) {
1526 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1527 /* ISR.W (write-access) is bit 33: */
1528 is_write
= (info
->si_isr
>> 33) & 1;
1534 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1536 &uc
->uc_sigmask
, puc
);
1539 #elif defined(__s390__)
1541 int cpu_signal_handler(int host_signum
, void *pinfo
,
1544 siginfo_t
*info
= pinfo
;
1545 struct ucontext
*uc
= puc
;
1549 pc
= uc
->uc_mcontext
.psw
.addr
;
1550 /* XXX: compute is_write */
1552 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1553 is_write
, &uc
->uc_sigmask
, puc
);
1556 #elif defined(__mips__)
1558 int cpu_signal_handler(int host_signum
, void *pinfo
,
1561 siginfo_t
*info
= pinfo
;
1562 struct ucontext
*uc
= puc
;
1563 greg_t pc
= uc
->uc_mcontext
.pc
;
1566 /* XXX: compute is_write */
1568 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1569 is_write
, &uc
->uc_sigmask
, puc
);
1574 #error host CPU specific signal handler needed
1578 #endif /* !defined(CONFIG_SOFTMMU) */