2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
40 int tb_invalidated_flag
;
43 //#define DEBUG_SIGNAL
45 #define SAVE_GLOBALS()
46 #define RESTORE_GLOBALS()
48 #if defined(__sparc__) && !defined(HOST_SOLARIS)
50 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
51 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
52 // Work around ugly bugs in glibc that mangle global register contents
54 static volatile void *saved_env
;
55 static volatile unsigned long saved_t0
, saved_i7
;
57 #define SAVE_GLOBALS() do { \
60 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
63 #undef RESTORE_GLOBALS
64 #define RESTORE_GLOBALS() do { \
65 env = (void *)saved_env; \
67 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
70 static int sparc_setjmp(jmp_buf buf
)
80 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
82 static void sparc_longjmp(jmp_buf buf
, int val
)
87 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
91 void cpu_loop_exit(void)
93 /* NOTE: the register at this point must be saved by hand because
94 longjmp restore them */
96 longjmp(env
->jmp_env
, 1);
99 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
103 /* exit the current TB from a signal handler. The host registers are
104 restored in a state compatible with the CPU emulator
106 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
108 #if !defined(CONFIG_SOFTMMU)
109 struct ucontext
*uc
= puc
;
114 /* XXX: restore cpu registers saved in host registers */
116 #if !defined(CONFIG_SOFTMMU)
118 /* XXX: use siglongjmp ? */
119 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
122 longjmp(env
->jmp_env
, 1);
125 static TranslationBlock
*tb_find_slow(target_ulong pc
,
126 target_ulong cs_base
,
129 TranslationBlock
*tb
, **ptb1
;
132 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
137 tb_invalidated_flag
= 0;
139 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
141 /* find translated block using physical mappings */
142 phys_pc
= get_phys_addr_code(env
, pc
);
143 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
145 h
= tb_phys_hash_func(phys_pc
);
146 ptb1
= &tb_phys_hash
[h
];
152 tb
->page_addr
[0] == phys_page1
&&
153 tb
->cs_base
== cs_base
&&
154 tb
->flags
== flags
) {
155 /* check next page if needed */
156 if (tb
->page_addr
[1] != -1) {
157 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
159 phys_page2
= get_phys_addr_code(env
, virt_page2
);
160 if (tb
->page_addr
[1] == phys_page2
)
166 ptb1
= &tb
->phys_hash_next
;
169 /* if no translated code available, then translate it now */
172 /* flush must be done */
174 /* cannot fail at this point */
176 /* don't forget to invalidate previous TB info */
177 tb_invalidated_flag
= 1;
179 tc_ptr
= code_gen_ptr
;
181 tb
->cs_base
= cs_base
;
184 cpu_gen_code(env
, tb
, &code_gen_size
);
186 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
188 /* check next page if needed */
189 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
191 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
192 phys_page2
= get_phys_addr_code(env
, virt_page2
);
194 tb_link_phys(tb
, phys_pc
, phys_page2
);
197 /* we add the TB in the virtual pc hash table */
198 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
199 spin_unlock(&tb_lock
);
203 static inline TranslationBlock
*tb_find_fast(void)
205 TranslationBlock
*tb
;
206 target_ulong cs_base
, pc
;
209 /* we record a subset of the CPU state. It will
210 always be the same before a given translated block
212 #if defined(TARGET_I386)
214 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
215 flags
|= env
->intercept
;
216 cs_base
= env
->segs
[R_CS
].base
;
217 pc
= cs_base
+ env
->eip
;
218 #elif defined(TARGET_ARM)
219 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
220 | (env
->vfp
.vec_stride
<< 4);
221 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
223 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
225 flags
|= (env
->condexec_bits
<< 8);
228 #elif defined(TARGET_SPARC)
229 #ifdef TARGET_SPARC64
230 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
231 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
232 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
234 // FPU enable . Supervisor
235 flags
= (env
->psref
<< 4) | env
->psrs
;
239 #elif defined(TARGET_PPC)
243 #elif defined(TARGET_MIPS)
244 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
246 pc
= env
->PC
[env
->current_tc
];
247 #elif defined(TARGET_M68K)
248 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
249 | (env
->sr
& SR_S
) /* Bit 13 */
250 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
253 #elif defined(TARGET_SH4)
257 #elif defined(TARGET_ALPHA)
261 #elif defined(TARGET_CRIS)
265 #elif defined(TARGET_IA64)
267 cs_base
= 0; /* XXXXX */
270 #error unsupported CPU
272 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
273 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
274 tb
->flags
!= flags
, 0)) {
275 tb
= tb_find_slow(pc
, cs_base
, flags
);
276 /* Note: we do it here to avoid a gcc bug on Mac OS X when
277 doing it in tb_find_slow */
278 if (tb_invalidated_flag
) {
279 /* as some TB could have been invalidated because
280 of memory exceptions while generating the code, we
281 must recompute the hash index here */
288 #define BREAK_CHAIN T0 = 0
290 /* main execution loop */
292 int cpu_exec(CPUState
*env1
)
294 #define DECLARE_HOST_REGS 1
295 #include "hostregs_helper.h"
296 #if defined(TARGET_SPARC)
297 #if defined(reg_REGWPTR)
298 uint32_t *saved_regwptr
;
301 int ret
, interrupt_request
;
302 long (*gen_func
)(void);
303 TranslationBlock
*tb
;
306 if (cpu_halted(env1
) == EXCP_HALTED
)
309 cpu_single_env
= env1
;
311 /* first we save global registers */
312 #define SAVE_HOST_REGS 1
313 #include "hostregs_helper.h"
318 #if defined(TARGET_I386)
319 /* put eflags in CPU temporary format */
320 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
321 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
322 CC_OP
= CC_OP_EFLAGS
;
323 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
324 #elif defined(TARGET_SPARC)
325 #if defined(reg_REGWPTR)
326 saved_regwptr
= REGWPTR
;
328 #elif defined(TARGET_M68K)
329 env
->cc_op
= CC_OP_FLAGS
;
330 env
->cc_dest
= env
->sr
& 0xf;
331 env
->cc_x
= (env
->sr
>> 4) & 1;
332 #elif defined(TARGET_ALPHA)
333 #elif defined(TARGET_ARM)
334 #elif defined(TARGET_PPC)
335 #elif defined(TARGET_MIPS)
336 #elif defined(TARGET_SH4)
337 #elif defined(TARGET_CRIS)
338 #elif defined(TARGET_IA64)
341 #error unsupported target CPU
343 env
->exception_index
= -1;
345 /* prepare setjmp context for exception handling */
347 if (setjmp(env
->jmp_env
) == 0) {
348 env
->current_tb
= NULL
;
349 /* if an exception is pending, we execute it here */
350 if (env
->exception_index
>= 0) {
351 if (env
->exception_index
>= EXCP_INTERRUPT
) {
352 /* exit request from the cpu execution loop */
353 ret
= env
->exception_index
;
355 } else if (env
->user_mode_only
) {
356 /* if user mode only, we simulate a fake exception
357 which will be handled outside the cpu execution
359 #if defined(TARGET_I386)
360 do_interrupt_user(env
->exception_index
,
361 env
->exception_is_int
,
363 env
->exception_next_eip
);
365 ret
= env
->exception_index
;
368 #if defined(TARGET_I386)
369 /* simulate a real cpu exception. On i386, it can
370 trigger new exceptions, but we do not handle
371 double or triple faults yet. */
372 do_interrupt(env
->exception_index
,
373 env
->exception_is_int
,
375 env
->exception_next_eip
, 0);
376 /* successfully delivered */
377 env
->old_exception
= -1;
378 #elif defined(TARGET_PPC)
380 #elif defined(TARGET_MIPS)
382 #elif defined(TARGET_SPARC)
383 do_interrupt(env
->exception_index
);
384 #elif defined(TARGET_ARM)
386 #elif defined(TARGET_SH4)
388 #elif defined(TARGET_ALPHA)
390 #elif defined(TARGET_CRIS)
392 #elif defined(TARGET_M68K)
394 #elif defined(TARGET_IA64)
398 env
->exception_index
= -1;
401 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
403 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
404 ret
= kqemu_cpu_exec(env
);
405 /* put eflags in CPU temporary format */
406 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
407 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
408 CC_OP
= CC_OP_EFLAGS
;
409 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
412 longjmp(env
->jmp_env
, 1);
413 } else if (ret
== 2) {
414 /* softmmu execution needed */
416 if (env
->interrupt_request
!= 0) {
417 /* hardware interrupt will be executed just after */
419 /* otherwise, we restart */
420 longjmp(env
->jmp_env
, 1);
428 longjmp(env
->jmp_env
, 1);
430 T0
= 0; /* force lookup of first TB */
433 interrupt_request
= env
->interrupt_request
;
434 if (__builtin_expect(interrupt_request
, 0)
435 #if defined(TARGET_I386)
436 && env
->hflags
& HF_GIF_MASK
439 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
440 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
441 env
->exception_index
= EXCP_DEBUG
;
444 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
445 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
446 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
447 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
449 env
->exception_index
= EXCP_HLT
;
453 #if defined(TARGET_I386)
454 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
455 !(env
->hflags
& HF_SMM_MASK
)) {
456 svm_check_intercept(SVM_EXIT_SMI
);
457 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
460 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
461 !(env
->hflags
& HF_NMI_MASK
)) {
462 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
463 env
->hflags
|= HF_NMI_MASK
;
464 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
466 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
467 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
468 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
470 svm_check_intercept(SVM_EXIT_INTR
);
471 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
472 intno
= cpu_get_pic_interrupt(env
);
473 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
474 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
476 do_interrupt(intno
, 0, 0, 0, 1);
477 /* ensure that no TB jump will be modified as
478 the program flow was changed */
480 #if !defined(CONFIG_USER_ONLY)
481 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
482 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
484 /* FIXME: this should respect TPR */
485 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
486 svm_check_intercept(SVM_EXIT_VINTR
);
487 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
488 if (loglevel
& CPU_LOG_TB_IN_ASM
)
489 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
490 do_interrupt(intno
, 0, 0, -1, 1);
491 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
492 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
496 #elif defined(TARGET_PPC)
498 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
502 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
503 ppc_hw_interrupt(env
);
504 if (env
->pending_interrupts
== 0)
505 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
508 #elif defined(TARGET_MIPS)
509 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
510 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
511 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
512 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
513 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
514 !(env
->hflags
& MIPS_HFLAG_DM
)) {
516 env
->exception_index
= EXCP_EXT_INTERRUPT
;
521 #elif defined(TARGET_SPARC)
522 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
524 int pil
= env
->interrupt_index
& 15;
525 int type
= env
->interrupt_index
& 0xf0;
527 if (((type
== TT_EXTINT
) &&
528 (pil
== 15 || pil
> env
->psrpil
)) ||
530 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
531 do_interrupt(env
->interrupt_index
);
532 env
->interrupt_index
= 0;
533 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
538 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
539 //do_interrupt(0, 0, 0, 0, 0);
540 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
542 #elif defined(TARGET_ARM)
543 if (interrupt_request
& CPU_INTERRUPT_FIQ
544 && !(env
->uncached_cpsr
& CPSR_F
)) {
545 env
->exception_index
= EXCP_FIQ
;
549 /* ARMv7-M interrupt return works by loading a magic value
550 into the PC. On real hardware the load causes the
551 return to occur. The qemu implementation performs the
552 jump normally, then does the exception return when the
553 CPU tries to execute code at the magic address.
554 This will cause the magic PC value to be pushed to
555 the stack if an interrupt occured at the wrong time.
556 We avoid this by disabling interrupts when
557 pc contains a magic address. */
558 if (interrupt_request
& CPU_INTERRUPT_HARD
559 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
560 || !(env
->uncached_cpsr
& CPSR_I
))) {
561 env
->exception_index
= EXCP_IRQ
;
565 #elif defined(TARGET_SH4)
566 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
570 #elif defined(TARGET_ALPHA)
571 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
575 #elif defined(TARGET_CRIS)
576 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
580 #elif defined(TARGET_M68K)
581 if (interrupt_request
& CPU_INTERRUPT_HARD
582 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
583 < env
->pending_level
) {
584 /* Real hardware gets the interrupt vector via an
585 IACK cycle at this point. Current emulated
586 hardware doesn't rely on this, so we
587 provide/save the vector when the interrupt is
589 env
->exception_index
= env
->pending_vector
;
594 /* Don't use the cached interupt_request value,
595 do_interrupt may have updated the EXITTB flag. */
596 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
597 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
598 /* ensure that no TB jump will be modified as
599 the program flow was changed */
602 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
603 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
604 env
->exception_index
= EXCP_INTERRUPT
;
609 if ((loglevel
& CPU_LOG_TB_CPU
)) {
610 /* restore flags in standard format */
612 #if defined(TARGET_I386)
613 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
614 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
615 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
616 #elif defined(TARGET_ARM)
617 cpu_dump_state(env
, logfile
, fprintf
, 0);
618 #elif defined(TARGET_SPARC)
619 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
620 env
->regwptr
= REGWPTR
;
621 cpu_dump_state(env
, logfile
, fprintf
, 0);
622 #elif defined(TARGET_PPC)
623 cpu_dump_state(env
, logfile
, fprintf
, 0);
624 #elif defined(TARGET_M68K)
625 cpu_m68k_flush_flags(env
, env
->cc_op
);
626 env
->cc_op
= CC_OP_FLAGS
;
627 env
->sr
= (env
->sr
& 0xffe0)
628 | env
->cc_dest
| (env
->cc_x
<< 4);
629 cpu_dump_state(env
, logfile
, fprintf
, 0);
630 #elif defined(TARGET_MIPS)
631 cpu_dump_state(env
, logfile
, fprintf
, 0);
632 #elif defined(TARGET_SH4)
633 cpu_dump_state(env
, logfile
, fprintf
, 0);
634 #elif defined(TARGET_ALPHA)
635 cpu_dump_state(env
, logfile
, fprintf
, 0);
636 #elif defined(TARGET_CRIS)
637 cpu_dump_state(env
, logfile
, fprintf
, 0);
639 #error unsupported target CPU
645 if ((loglevel
& CPU_LOG_EXEC
)) {
646 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
647 (long)tb
->tc_ptr
, tb
->pc
,
648 lookup_symbol(tb
->pc
));
652 /* see if we can patch the calling TB. When the TB
653 spans two pages, we cannot safely do a direct
658 (env
->kqemu_enabled
!= 2) &&
660 tb
->page_addr
[1] == -1) {
662 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
663 spin_unlock(&tb_lock
);
667 env
->current_tb
= tb
;
668 /* execute the generated code */
669 gen_func
= (void *)tc_ptr
;
670 #if defined(__sparc__)
671 __asm__
__volatile__("call %0\n\t"
675 : "i0", "i1", "i2", "i3", "i4", "i5",
676 "o0", "o1", "o2", "o3", "o4", "o5",
677 "l0", "l1", "l2", "l3", "l4", "l5",
679 #elif defined(__hppa__)
680 asm volatile ("ble 0(%%sr4,%1)\n"
685 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
686 "r8", "r9", "r10", "r11", "r12", "r13",
687 "r18", "r19", "r20", "r21", "r22", "r23",
688 "r24", "r25", "r26", "r27", "r28", "r29",
690 #elif defined(__arm__)
691 asm volatile ("mov pc, %0\n\t"
692 ".global exec_loop\n\t"
696 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
697 #elif defined(__ia64)
704 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
705 (*(void (*)(void)) &fp
)();
709 env
->current_tb
= NULL
;
710 /* reset soft MMU for next block (it can currently
711 only be set by a memory fault) */
712 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
713 if (env
->hflags
& HF_SOFTMMU_MASK
) {
714 env
->hflags
&= ~HF_SOFTMMU_MASK
;
715 /* do not allow linking to another block */
719 #if defined(USE_KQEMU)
720 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
721 if (kqemu_is_ok(env
) &&
722 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
733 #if defined(TARGET_I386)
734 /* restore flags in standard format */
735 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
736 #elif defined(TARGET_ARM)
737 /* XXX: Save/restore host fpu exception state?. */
738 #elif defined(TARGET_SPARC)
739 #if defined(reg_REGWPTR)
740 REGWPTR
= saved_regwptr
;
742 #elif defined(TARGET_PPC)
743 #elif defined(TARGET_M68K)
744 cpu_m68k_flush_flags(env
, env
->cc_op
);
745 env
->cc_op
= CC_OP_FLAGS
;
746 env
->sr
= (env
->sr
& 0xffe0)
747 | env
->cc_dest
| (env
->cc_x
<< 4);
748 #elif defined(TARGET_MIPS)
749 #elif defined(TARGET_SH4)
750 #elif defined(TARGET_IA64)
751 #elif defined(TARGET_ALPHA)
752 #elif defined(TARGET_CRIS)
755 #error unsupported target CPU
758 /* restore global registers */
760 #include "hostregs_helper.h"
762 /* fail safe : never use cpu_single_env outside cpu_exec() */
763 cpu_single_env
= NULL
;
767 /* must only be called from the generated code as an exception can be
769 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
771 /* XXX: cannot enable it yet because it yields to MMU exception
772 where NIP != read address on PowerPC */
774 target_ulong phys_addr
;
775 phys_addr
= get_phys_addr_code(env
, start
);
776 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
780 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
782 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
784 CPUX86State
*saved_env
;
788 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
790 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
791 (selector
<< 4), 0xffff, 0);
793 load_seg(seg_reg
, selector
);
798 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
800 CPUX86State
*saved_env
;
805 helper_fsave(ptr
, data32
);
810 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
812 CPUX86State
*saved_env
;
817 helper_frstor(ptr
, data32
);
822 #endif /* TARGET_I386 */
824 #if !defined(CONFIG_SOFTMMU)
826 #if defined(TARGET_I386)
828 /* 'pc' is the host PC at which the exception was raised. 'address' is
829 the effective address of the memory exception. 'is_write' is 1 if a
830 write caused the exception and otherwise 0'. 'old_set' is the
831 signal set which should be restored */
832 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
833 int is_write
, sigset_t
*old_set
,
836 TranslationBlock
*tb
;
840 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
841 #if defined(DEBUG_SIGNAL)
842 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
843 pc
, address
, is_write
, *(unsigned long *)old_set
);
845 /* XXX: locking issue */
846 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
850 /* see if it is an MMU fault */
851 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
853 return 0; /* not an MMU fault */
855 return 1; /* the MMU fault was handled without causing real CPU fault */
856 /* now we have a real cpu fault */
859 /* the PC is inside the translated code. It means that we have
860 a virtual CPU fault */
861 cpu_restore_state(tb
, env
, pc
, puc
);
865 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
866 env
->eip
, env
->cr
[2], env
->error_code
);
868 /* we restore the process signal mask as the sigreturn should
869 do it (XXX: use sigsetjmp) */
870 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
871 raise_exception_err(env
->exception_index
, env
->error_code
);
873 /* activate soft MMU for this block */
874 env
->hflags
|= HF_SOFTMMU_MASK
;
875 cpu_resume_from_signal(env
, puc
);
877 /* never comes here */
881 #elif defined(TARGET_ARM)
882 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
883 int is_write
, sigset_t
*old_set
,
886 TranslationBlock
*tb
;
890 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
891 #if defined(DEBUG_SIGNAL)
892 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
893 pc
, address
, is_write
, *(unsigned long *)old_set
);
895 /* XXX: locking issue */
896 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
899 /* see if it is an MMU fault */
900 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
902 return 0; /* not an MMU fault */
904 return 1; /* the MMU fault was handled without causing real CPU fault */
905 /* now we have a real cpu fault */
908 /* the PC is inside the translated code. It means that we have
909 a virtual CPU fault */
910 cpu_restore_state(tb
, env
, pc
, puc
);
912 /* we restore the process signal mask as the sigreturn should
913 do it (XXX: use sigsetjmp) */
914 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
916 /* never comes here */
919 #elif defined(TARGET_SPARC)
920 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
921 int is_write
, sigset_t
*old_set
,
924 TranslationBlock
*tb
;
928 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
929 #if defined(DEBUG_SIGNAL)
930 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
931 pc
, address
, is_write
, *(unsigned long *)old_set
);
933 /* XXX: locking issue */
934 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
937 /* see if it is an MMU fault */
938 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
940 return 0; /* not an MMU fault */
942 return 1; /* the MMU fault was handled without causing real CPU fault */
943 /* now we have a real cpu fault */
946 /* the PC is inside the translated code. It means that we have
947 a virtual CPU fault */
948 cpu_restore_state(tb
, env
, pc
, puc
);
950 /* we restore the process signal mask as the sigreturn should
951 do it (XXX: use sigsetjmp) */
952 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
954 /* never comes here */
957 #elif defined (TARGET_PPC)
958 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
959 int is_write
, sigset_t
*old_set
,
962 TranslationBlock
*tb
;
966 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
967 #if defined(DEBUG_SIGNAL)
968 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
969 pc
, address
, is_write
, *(unsigned long *)old_set
);
971 /* XXX: locking issue */
972 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
976 /* see if it is an MMU fault */
977 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
979 return 0; /* not an MMU fault */
981 return 1; /* the MMU fault was handled without causing real CPU fault */
983 /* now we have a real cpu fault */
986 /* the PC is inside the translated code. It means that we have
987 a virtual CPU fault */
988 cpu_restore_state(tb
, env
, pc
, puc
);
992 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
993 env
->nip
, env
->error_code
, tb
);
995 /* we restore the process signal mask as the sigreturn should
996 do it (XXX: use sigsetjmp) */
997 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
998 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1000 /* activate soft MMU for this block */
1001 cpu_resume_from_signal(env
, puc
);
1003 /* never comes here */
1007 #elif defined(TARGET_M68K)
1008 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1009 int is_write
, sigset_t
*old_set
,
1012 TranslationBlock
*tb
;
1016 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1017 #if defined(DEBUG_SIGNAL)
1018 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1019 pc
, address
, is_write
, *(unsigned long *)old_set
);
1021 /* XXX: locking issue */
1022 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1025 /* see if it is an MMU fault */
1026 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1028 return 0; /* not an MMU fault */
1030 return 1; /* the MMU fault was handled without causing real CPU fault */
1031 /* now we have a real cpu fault */
1032 tb
= tb_find_pc(pc
);
1034 /* the PC is inside the translated code. It means that we have
1035 a virtual CPU fault */
1036 cpu_restore_state(tb
, env
, pc
, puc
);
1038 /* we restore the process signal mask as the sigreturn should
1039 do it (XXX: use sigsetjmp) */
1040 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1042 /* never comes here */
1046 #elif defined (TARGET_MIPS)
1047 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1048 int is_write
, sigset_t
*old_set
,
1051 TranslationBlock
*tb
;
1055 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1056 #if defined(DEBUG_SIGNAL)
1057 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1058 pc
, address
, is_write
, *(unsigned long *)old_set
);
1060 /* XXX: locking issue */
1061 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1065 /* see if it is an MMU fault */
1066 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1068 return 0; /* not an MMU fault */
1070 return 1; /* the MMU fault was handled without causing real CPU fault */
1072 /* now we have a real cpu fault */
1073 tb
= tb_find_pc(pc
);
1075 /* the PC is inside the translated code. It means that we have
1076 a virtual CPU fault */
1077 cpu_restore_state(tb
, env
, pc
, puc
);
1081 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1082 env
->PC
, env
->error_code
, tb
);
1084 /* we restore the process signal mask as the sigreturn should
1085 do it (XXX: use sigsetjmp) */
1086 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1087 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1089 /* activate soft MMU for this block */
1090 cpu_resume_from_signal(env
, puc
);
1092 /* never comes here */
1096 #elif defined (TARGET_SH4)
1097 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1098 int is_write
, sigset_t
*old_set
,
1101 TranslationBlock
*tb
;
1105 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1106 #if defined(DEBUG_SIGNAL)
1107 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1108 pc
, address
, is_write
, *(unsigned long *)old_set
);
1110 /* XXX: locking issue */
1111 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1115 /* see if it is an MMU fault */
1116 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1118 return 0; /* not an MMU fault */
1120 return 1; /* the MMU fault was handled without causing real CPU fault */
1122 /* now we have a real cpu fault */
1123 tb
= tb_find_pc(pc
);
1125 /* the PC is inside the translated code. It means that we have
1126 a virtual CPU fault */
1127 cpu_restore_state(tb
, env
, pc
, puc
);
1130 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1131 env
->nip
, env
->error_code
, tb
);
1133 /* we restore the process signal mask as the sigreturn should
1134 do it (XXX: use sigsetjmp) */
1135 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1137 /* never comes here */
1141 #elif defined (TARGET_ALPHA)
1142 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1143 int is_write
, sigset_t
*old_set
,
1146 TranslationBlock
*tb
;
1150 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1151 #if defined(DEBUG_SIGNAL)
1152 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1153 pc
, address
, is_write
, *(unsigned long *)old_set
);
1155 /* XXX: locking issue */
1156 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1160 /* see if it is an MMU fault */
1161 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1163 return 0; /* not an MMU fault */
1165 return 1; /* the MMU fault was handled without causing real CPU fault */
1167 /* now we have a real cpu fault */
1168 tb
= tb_find_pc(pc
);
1170 /* the PC is inside the translated code. It means that we have
1171 a virtual CPU fault */
1172 cpu_restore_state(tb
, env
, pc
, puc
);
1175 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1176 env
->nip
, env
->error_code
, tb
);
1178 /* we restore the process signal mask as the sigreturn should
1179 do it (XXX: use sigsetjmp) */
1180 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1182 /* never comes here */
1185 #elif defined (TARGET_CRIS)
1186 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1187 int is_write
, sigset_t
*old_set
,
1190 TranslationBlock
*tb
;
1194 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1195 #if defined(DEBUG_SIGNAL)
1196 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1197 pc
, address
, is_write
, *(unsigned long *)old_set
);
1199 /* XXX: locking issue */
1200 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1204 /* see if it is an MMU fault */
1205 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1207 return 0; /* not an MMU fault */
1209 return 1; /* the MMU fault was handled without causing real CPU fault */
1211 /* now we have a real cpu fault */
1212 tb
= tb_find_pc(pc
);
1214 /* the PC is inside the translated code. It means that we have
1215 a virtual CPU fault */
1216 cpu_restore_state(tb
, env
, pc
, puc
);
1218 /* we restore the process signal mask as the sigreturn should
1219 do it (XXX: use sigsetjmp) */
1220 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1222 /* never comes here */
1227 #error unsupported target CPU
1230 #if defined(__i386__)
1232 #if defined(__APPLE__)
1233 # include <sys/ucontext.h>
1235 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1236 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1237 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1239 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1240 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1241 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1244 int cpu_signal_handler(int host_signum
, void *pinfo
,
1247 siginfo_t
*info
= pinfo
;
1248 struct ucontext
*uc
= puc
;
1256 #define REG_TRAPNO TRAPNO
1259 trapno
= TRAP_sig(uc
);
1260 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1262 (ERROR_sig(uc
) >> 1) & 1 : 0,
1263 &uc
->uc_sigmask
, puc
);
1266 #elif defined(__x86_64__)
1268 int cpu_signal_handler(int host_signum
, void *pinfo
,
1271 siginfo_t
*info
= pinfo
;
1272 struct ucontext
*uc
= puc
;
1275 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1276 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1277 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1278 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1279 &uc
->uc_sigmask
, puc
);
1282 #elif defined(__powerpc__)
1284 /***********************************************************************
1285 * signal context platform-specific definitions
1289 /* All Registers access - only for local access */
1290 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1291 /* Gpr Registers access */
1292 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1293 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1294 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1295 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1296 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1297 # define LR_sig(context) REG_sig(link, context) /* Link register */
1298 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1299 /* Float Registers access */
1300 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1301 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1302 /* Exception Registers access */
1303 # define DAR_sig(context) REG_sig(dar, context)
1304 # define DSISR_sig(context) REG_sig(dsisr, context)
1305 # define TRAP_sig(context) REG_sig(trap, context)
1309 # include <sys/ucontext.h>
1310 typedef struct ucontext SIGCONTEXT
;
1311 /* All Registers access - only for local access */
1312 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1313 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1314 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1315 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1316 /* Gpr Registers access */
1317 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1318 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1319 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1320 # define CTR_sig(context) REG_sig(ctr, context)
1321 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1322 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1323 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1324 /* Float Registers access */
1325 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1326 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1327 /* Exception Registers access */
1328 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1329 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1330 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1331 #endif /* __APPLE__ */
1333 int cpu_signal_handler(int host_signum
, void *pinfo
,
1336 siginfo_t
*info
= pinfo
;
1337 struct ucontext
*uc
= puc
;
1345 if (DSISR_sig(uc
) & 0x00800000)
1348 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1351 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1352 is_write
, &uc
->uc_sigmask
, puc
);
1355 #elif defined(__alpha__)
1357 int cpu_signal_handler(int host_signum
, void *pinfo
,
1360 siginfo_t
*info
= pinfo
;
1361 struct ucontext
*uc
= puc
;
1362 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1363 uint32_t insn
= *pc
;
1366 /* XXX: need kernel patch to get write flag faster */
1367 switch (insn
>> 26) {
1382 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1383 is_write
, &uc
->uc_sigmask
, puc
);
1385 #elif defined(__sparc__)
1387 int cpu_signal_handler(int host_signum
, void *pinfo
,
1390 siginfo_t
*info
= pinfo
;
1391 uint32_t *regs
= (uint32_t *)(info
+ 1);
1392 void *sigmask
= (regs
+ 20);
1397 /* XXX: is there a standard glibc define ? */
1399 /* XXX: need kernel patch to get write flag faster */
1401 insn
= *(uint32_t *)pc
;
1402 if ((insn
>> 30) == 3) {
1403 switch((insn
>> 19) & 0x3f) {
1415 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1416 is_write
, sigmask
, NULL
);
1419 #elif defined(__arm__)
1421 int cpu_signal_handler(int host_signum
, void *pinfo
,
1424 siginfo_t
*info
= pinfo
;
1425 struct ucontext
*uc
= puc
;
1429 pc
= uc
->uc_mcontext
.gregs
[R15
];
1430 /* XXX: compute is_write */
1432 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1434 &uc
->uc_sigmask
, puc
);
1437 #elif defined(__mc68000)
1439 int cpu_signal_handler(int host_signum
, void *pinfo
,
1442 siginfo_t
*info
= pinfo
;
1443 struct ucontext
*uc
= puc
;
1447 pc
= uc
->uc_mcontext
.gregs
[16];
1448 /* XXX: compute is_write */
1450 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1452 &uc
->uc_sigmask
, puc
);
1455 #elif defined(__ia64)
1458 /* This ought to be in <bits/siginfo.h>... */
1459 # define __ISR_VALID 1
1462 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1464 siginfo_t
*info
= pinfo
;
1465 struct ucontext
*uc
= puc
;
1469 ip
= uc
->uc_mcontext
.sc_ip
;
1470 switch (host_signum
) {
1476 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1477 /* ISR.W (write-access) is bit 33: */
1478 is_write
= (info
->si_isr
>> 33) & 1;
1484 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1486 &uc
->uc_sigmask
, puc
);
1489 #elif defined(__s390__)
1491 int cpu_signal_handler(int host_signum
, void *pinfo
,
1494 siginfo_t
*info
= pinfo
;
1495 struct ucontext
*uc
= puc
;
1499 pc
= uc
->uc_mcontext
.psw
.addr
;
1500 /* XXX: compute is_write */
1502 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1503 is_write
, &uc
->uc_sigmask
, puc
);
1506 #elif defined(__mips__)
1508 int cpu_signal_handler(int host_signum
, void *pinfo
,
1511 siginfo_t
*info
= pinfo
;
1512 struct ucontext
*uc
= puc
;
1513 greg_t pc
= uc
->uc_mcontext
.pc
;
1516 /* XXX: compute is_write */
1518 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1519 is_write
, &uc
->uc_sigmask
, puc
);
1522 #elif defined(__hppa__)
1524 int cpu_signal_handler(int host_signum
, void *pinfo
,
1527 struct siginfo
*info
= pinfo
;
1528 struct ucontext
*uc
= puc
;
1532 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1533 /* FIXME: compute is_write */
1535 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1537 &uc
->uc_sigmask
, puc
);
1542 #error host CPU specific signal handler needed
1546 #endif /* !defined(CONFIG_SOFTMMU) */