2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag
;
41 //#define DEBUG_SIGNAL
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K) || \
45 /* XXX: unify with i386 target */
46 void cpu_loop_exit(void)
48 longjmp(env
->jmp_env
, 1);
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
58 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext
*uc
= puc
;
66 /* XXX: restore cpu registers saved in host registers */
68 #if !defined(CONFIG_SOFTMMU)
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
74 longjmp(env
->jmp_env
, 1);
78 static TranslationBlock
*tb_find_slow(target_ulong pc
,
82 TranslationBlock
*tb
, **ptb1
;
85 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
90 tb_invalidated_flag
= 0;
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
94 /* find translated block using physical mappings */
95 phys_pc
= get_phys_addr_code(env
, pc
);
96 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
98 h
= tb_phys_hash_func(phys_pc
);
99 ptb1
= &tb_phys_hash
[h
];
105 tb
->page_addr
[0] == phys_page1
&&
106 tb
->cs_base
== cs_base
&&
107 tb
->flags
== flags
) {
108 /* check next page if needed */
109 if (tb
->page_addr
[1] != -1) {
110 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
112 phys_page2
= get_phys_addr_code(env
, virt_page2
);
113 if (tb
->page_addr
[1] == phys_page2
)
119 ptb1
= &tb
->phys_hash_next
;
122 /* if no translated code available, then translate it now */
125 /* flush must be done */
127 /* cannot fail at this point */
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag
= 1;
132 tc_ptr
= code_gen_ptr
;
134 tb
->cs_base
= cs_base
;
136 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
137 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
139 /* check next page if needed */
140 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
142 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
143 phys_page2
= get_phys_addr_code(env
, virt_page2
);
145 tb_link_phys(tb
, phys_pc
, phys_page2
);
148 /* we add the TB in the virtual pc hash table */
149 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
150 spin_unlock(&tb_lock
);
154 static inline TranslationBlock
*tb_find_fast(void)
156 TranslationBlock
*tb
;
157 target_ulong cs_base
, pc
;
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
163 #if defined(TARGET_I386)
165 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
166 cs_base
= env
->segs
[R_CS
].base
;
167 pc
= cs_base
+ env
->eip
;
168 #elif defined(TARGET_ARM)
169 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
170 | (env
->vfp
.vec_stride
<< 4);
171 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
173 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
177 #elif defined(TARGET_SPARC)
178 #ifdef TARGET_SPARC64
179 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
180 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
181 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
183 // FPU enable . MMU enabled . MMU no-fault . Supervisor
184 flags
= (env
->psref
<< 3) | ((env
->mmuregs
[0] & (MMU_E
| MMU_NF
)) << 1)
189 #elif defined(TARGET_PPC)
190 flags
= (msr_pr
<< MSR_PR
) | (msr_fp
<< MSR_FP
) |
191 (msr_se
<< MSR_SE
) | (msr_le
<< MSR_LE
);
194 #elif defined(TARGET_MIPS)
195 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
198 #elif defined(TARGET_M68K)
199 flags
= env
->fpcr
& M68K_FPCR_PREC
;
202 #elif defined(TARGET_SH4)
203 flags
= env
->sr
& (SR_MD
| SR_RB
);
204 cs_base
= 0; /* XXXXX */
206 #elif defined(TARGET_ALPHA)
211 #error unsupported CPU
213 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
214 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
215 tb
->flags
!= flags
, 0)) {
216 tb
= tb_find_slow(pc
, cs_base
, flags
);
217 /* Note: we do it here to avoid a gcc bug on Mac OS X when
218 doing it in tb_find_slow */
219 if (tb_invalidated_flag
) {
220 /* as some TB could have been invalidated because
221 of memory exceptions while generating the code, we
222 must recompute the hash index here */
230 /* main execution loop */
232 int cpu_exec(CPUState
*env1
)
234 #define DECLARE_HOST_REGS 1
235 #include "hostregs_helper.h"
236 #if defined(TARGET_SPARC)
237 #if defined(reg_REGWPTR)
238 uint32_t *saved_regwptr
;
241 #if defined(__sparc__) && !defined(HOST_SOLARIS)
245 int ret
, interrupt_request
;
246 void (*gen_func
)(void);
247 TranslationBlock
*tb
;
250 #if defined(TARGET_I386)
251 /* handle exit of HALTED state */
252 if (env1
->hflags
& HF_HALTED_MASK
) {
253 /* disable halt condition */
254 if ((env1
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
255 (env1
->eflags
& IF_MASK
)) {
256 env1
->hflags
&= ~HF_HALTED_MASK
;
261 #elif defined(TARGET_PPC)
263 if (env1
->msr
[MSR_EE
] &&
264 (env1
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
270 #elif defined(TARGET_SPARC)
272 if ((env1
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
273 (env1
->psret
!= 0)) {
279 #elif defined(TARGET_ARM)
281 /* An interrupt wakes the CPU even if the I and F CPSR bits are
283 if (env1
->interrupt_request
284 & (CPU_INTERRUPT_FIQ
| CPU_INTERRUPT_HARD
)) {
290 #elif defined(TARGET_MIPS)
292 if (env1
->interrupt_request
&
293 (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_TIMER
)) {
299 #elif defined(TARGET_ALPHA)
301 if (env1
->interrupt_request
& CPU_INTERRUPT_HARD
) {
309 cpu_single_env
= env1
;
311 /* first we save global registers */
312 #define SAVE_HOST_REGS 1
313 #include "hostregs_helper.h"
315 #if defined(__sparc__) && !defined(HOST_SOLARIS)
316 /* we also save i7 because longjmp may not restore it */
317 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
320 #if defined(TARGET_I386)
322 /* put eflags in CPU temporary format */
323 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
324 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
325 CC_OP
= CC_OP_EFLAGS
;
326 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_SPARC)
329 #if defined(reg_REGWPTR)
330 saved_regwptr
= REGWPTR
;
332 #elif defined(TARGET_PPC)
333 #elif defined(TARGET_M68K)
334 env
->cc_op
= CC_OP_FLAGS
;
335 env
->cc_dest
= env
->sr
& 0xf;
336 env
->cc_x
= (env
->sr
>> 4) & 1;
337 #elif defined(TARGET_MIPS)
338 #elif defined(TARGET_SH4)
340 #elif defined(TARGET_ALPHA)
343 #error unsupported target CPU
345 env
->exception_index
= -1;
347 /* prepare setjmp context for exception handling */
349 if (setjmp(env
->jmp_env
) == 0) {
350 env
->current_tb
= NULL
;
351 /* if an exception is pending, we execute it here */
352 if (env
->exception_index
>= 0) {
353 if (env
->exception_index
>= EXCP_INTERRUPT
) {
354 /* exit request from the cpu execution loop */
355 ret
= env
->exception_index
;
357 } else if (env
->user_mode_only
) {
358 /* if user mode only, we simulate a fake exception
359 which will be handled outside the cpu execution
361 #if defined(TARGET_I386)
362 do_interrupt_user(env
->exception_index
,
363 env
->exception_is_int
,
365 env
->exception_next_eip
);
367 ret
= env
->exception_index
;
370 #if defined(TARGET_I386)
371 /* simulate a real cpu exception. On i386, it can
372 trigger new exceptions, but we do not handle
373 double or triple faults yet. */
374 do_interrupt(env
->exception_index
,
375 env
->exception_is_int
,
377 env
->exception_next_eip
, 0);
378 /* successfully delivered */
379 env
->old_exception
= -1;
380 #elif defined(TARGET_PPC)
382 #elif defined(TARGET_MIPS)
384 #elif defined(TARGET_SPARC)
385 do_interrupt(env
->exception_index
);
386 #elif defined(TARGET_ARM)
388 #elif defined(TARGET_SH4)
390 #elif defined(TARGET_ALPHA)
394 env
->exception_index
= -1;
397 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
399 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
400 ret
= kqemu_cpu_exec(env
);
401 /* put eflags in CPU temporary format */
402 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
403 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
404 CC_OP
= CC_OP_EFLAGS
;
405 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
408 longjmp(env
->jmp_env
, 1);
409 } else if (ret
== 2) {
410 /* softmmu execution needed */
412 if (env
->interrupt_request
!= 0) {
413 /* hardware interrupt will be executed just after */
415 /* otherwise, we restart */
416 longjmp(env
->jmp_env
, 1);
422 T0
= 0; /* force lookup of first TB */
424 #if defined(__sparc__) && !defined(HOST_SOLARIS)
425 /* g1 can be modified by some libc? functions */
428 interrupt_request
= env
->interrupt_request
;
429 if (__builtin_expect(interrupt_request
, 0)) {
430 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
431 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
432 env
->exception_index
= EXCP_DEBUG
;
435 #if defined(TARGET_I386)
436 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
437 !(env
->hflags
& HF_SMM_MASK
)) {
438 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
440 #if defined(__sparc__) && !defined(HOST_SOLARIS)
445 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
446 (env
->eflags
& IF_MASK
) &&
447 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
449 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
450 intno
= cpu_get_pic_interrupt(env
);
451 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
452 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
454 do_interrupt(intno
, 0, 0, 0, 1);
455 /* ensure that no TB jump will be modified as
456 the program flow was changed */
457 #if defined(__sparc__) && !defined(HOST_SOLARIS)
463 #elif defined(TARGET_PPC)
465 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
469 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
470 if (ppc_hw_interrupt(env
) == 1) {
471 /* Some exception was raised */
472 if (env
->pending_interrupts
== 0)
473 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
474 #if defined(__sparc__) && !defined(HOST_SOLARIS)
481 #elif defined(TARGET_MIPS)
482 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
483 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
484 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
485 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
486 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
487 !(env
->hflags
& MIPS_HFLAG_DM
)) {
489 env
->exception_index
= EXCP_EXT_INTERRUPT
;
492 #if defined(__sparc__) && !defined(HOST_SOLARIS)
498 #elif defined(TARGET_SPARC)
499 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
501 int pil
= env
->interrupt_index
& 15;
502 int type
= env
->interrupt_index
& 0xf0;
504 if (((type
== TT_EXTINT
) &&
505 (pil
== 15 || pil
> env
->psrpil
)) ||
507 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
508 do_interrupt(env
->interrupt_index
);
509 env
->interrupt_index
= 0;
510 #if defined(__sparc__) && !defined(HOST_SOLARIS)
516 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
517 //do_interrupt(0, 0, 0, 0, 0);
518 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
519 } else if (interrupt_request
& CPU_INTERRUPT_HALT
) {
520 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
522 env
->exception_index
= EXCP_HLT
;
525 #elif defined(TARGET_ARM)
526 if (interrupt_request
& CPU_INTERRUPT_FIQ
527 && !(env
->uncached_cpsr
& CPSR_F
)) {
528 env
->exception_index
= EXCP_FIQ
;
531 if (interrupt_request
& CPU_INTERRUPT_HARD
532 && !(env
->uncached_cpsr
& CPSR_I
)) {
533 env
->exception_index
= EXCP_IRQ
;
536 #elif defined(TARGET_SH4)
538 #elif defined(TARGET_ALPHA)
539 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
543 /* Don't use the cached interupt_request value,
544 do_interrupt may have updated the EXITTB flag. */
545 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
546 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
547 /* ensure that no TB jump will be modified as
548 the program flow was changed */
549 #if defined(__sparc__) && !defined(HOST_SOLARIS)
555 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
556 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
557 env
->exception_index
= EXCP_INTERRUPT
;
562 if ((loglevel
& CPU_LOG_TB_CPU
)) {
563 #if defined(TARGET_I386)
564 /* restore flags in standard format */
566 env
->regs
[R_EAX
] = EAX
;
569 env
->regs
[R_EBX
] = EBX
;
572 env
->regs
[R_ECX
] = ECX
;
575 env
->regs
[R_EDX
] = EDX
;
578 env
->regs
[R_ESI
] = ESI
;
581 env
->regs
[R_EDI
] = EDI
;
584 env
->regs
[R_EBP
] = EBP
;
587 env
->regs
[R_ESP
] = ESP
;
589 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
590 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
591 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
592 #elif defined(TARGET_ARM)
593 cpu_dump_state(env
, logfile
, fprintf
, 0);
594 #elif defined(TARGET_SPARC)
595 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
596 env
->regwptr
= REGWPTR
;
597 cpu_dump_state(env
, logfile
, fprintf
, 0);
598 #elif defined(TARGET_PPC)
599 cpu_dump_state(env
, logfile
, fprintf
, 0);
600 #elif defined(TARGET_M68K)
601 cpu_m68k_flush_flags(env
, env
->cc_op
);
602 env
->cc_op
= CC_OP_FLAGS
;
603 env
->sr
= (env
->sr
& 0xffe0)
604 | env
->cc_dest
| (env
->cc_x
<< 4);
605 cpu_dump_state(env
, logfile
, fprintf
, 0);
606 #elif defined(TARGET_MIPS)
607 cpu_dump_state(env
, logfile
, fprintf
, 0);
608 #elif defined(TARGET_SH4)
609 cpu_dump_state(env
, logfile
, fprintf
, 0);
610 #elif defined(TARGET_ALPHA)
611 cpu_dump_state(env
, logfile
, fprintf
, 0);
613 #error unsupported target CPU
619 if ((loglevel
& CPU_LOG_EXEC
)) {
620 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
621 (long)tb
->tc_ptr
, tb
->pc
,
622 lookup_symbol(tb
->pc
));
625 #if defined(__sparc__) && !defined(HOST_SOLARIS)
628 /* see if we can patch the calling TB. When the TB
629 spans two pages, we cannot safely do a direct
634 (env
->kqemu_enabled
!= 2) &&
636 tb
->page_addr
[1] == -1
637 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
638 && (tb
->cflags
& CF_CODE_COPY
) ==
639 (((TranslationBlock
*)(T0
& ~3))->cflags
& CF_CODE_COPY
)
643 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
644 #if defined(USE_CODE_COPY)
645 /* propagates the FP use info */
646 ((TranslationBlock
*)(T0
& ~3))->cflags
|=
647 (tb
->cflags
& CF_FP_USED
);
649 spin_unlock(&tb_lock
);
653 env
->current_tb
= tb
;
654 /* execute the generated code */
655 gen_func
= (void *)tc_ptr
;
656 #if defined(__sparc__)
657 __asm__
__volatile__("call %0\n\t"
661 : "i0", "i1", "i2", "i3", "i4", "i5",
662 "o0", "o1", "o2", "o3", "o4", "o5",
663 "l0", "l1", "l2", "l3", "l4", "l5",
665 #elif defined(__arm__)
666 asm volatile ("mov pc, %0\n\t"
667 ".global exec_loop\n\t"
671 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
672 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
674 if (!(tb
->cflags
& CF_CODE_COPY
)) {
675 if ((tb
->cflags
& CF_FP_USED
) && env
->native_fp_regs
) {
676 save_native_fp_state(env
);
680 if ((tb
->cflags
& CF_FP_USED
) && !env
->native_fp_regs
) {
681 restore_native_fp_state(env
);
683 /* we work with native eflags */
684 CC_SRC
= cc_table
[CC_OP
].compute_all();
685 CC_OP
= CC_OP_EFLAGS
;
686 asm(".globl exec_loop\n"
691 " fs movl %11, %%eax\n"
692 " andl $0x400, %%eax\n"
693 " fs orl %8, %%eax\n"
696 " fs movl %%esp, %12\n"
697 " fs movl %0, %%eax\n"
698 " fs movl %1, %%ecx\n"
699 " fs movl %2, %%edx\n"
700 " fs movl %3, %%ebx\n"
701 " fs movl %4, %%esp\n"
702 " fs movl %5, %%ebp\n"
703 " fs movl %6, %%esi\n"
704 " fs movl %7, %%edi\n"
707 " fs movl %%esp, %4\n"
708 " fs movl %12, %%esp\n"
709 " fs movl %%eax, %0\n"
710 " fs movl %%ecx, %1\n"
711 " fs movl %%edx, %2\n"
712 " fs movl %%ebx, %3\n"
713 " fs movl %%ebp, %5\n"
714 " fs movl %%esi, %6\n"
715 " fs movl %%edi, %7\n"
718 " movl %%eax, %%ecx\n"
719 " andl $0x400, %%ecx\n"
721 " andl $0x8d5, %%eax\n"
722 " fs movl %%eax, %8\n"
724 " subl %%ecx, %%eax\n"
725 " fs movl %%eax, %11\n"
726 " fs movl %9, %%ebx\n" /* get T0 value */
729 : "m" (*(uint8_t *)offsetof(CPUState
, regs
[0])),
730 "m" (*(uint8_t *)offsetof(CPUState
, regs
[1])),
731 "m" (*(uint8_t *)offsetof(CPUState
, regs
[2])),
732 "m" (*(uint8_t *)offsetof(CPUState
, regs
[3])),
733 "m" (*(uint8_t *)offsetof(CPUState
, regs
[4])),
734 "m" (*(uint8_t *)offsetof(CPUState
, regs
[5])),
735 "m" (*(uint8_t *)offsetof(CPUState
, regs
[6])),
736 "m" (*(uint8_t *)offsetof(CPUState
, regs
[7])),
737 "m" (*(uint8_t *)offsetof(CPUState
, cc_src
)),
738 "m" (*(uint8_t *)offsetof(CPUState
, tmp0
)),
740 "m" (*(uint8_t *)offsetof(CPUState
, df
)),
741 "m" (*(uint8_t *)offsetof(CPUState
, saved_esp
))
746 #elif defined(__ia64)
753 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
754 (*(void (*)(void)) &fp
)();
758 env
->current_tb
= NULL
;
759 /* reset soft MMU for next block (it can currently
760 only be set by a memory fault) */
761 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
762 if (env
->hflags
& HF_SOFTMMU_MASK
) {
763 env
->hflags
&= ~HF_SOFTMMU_MASK
;
764 /* do not allow linking to another block */
768 #if defined(USE_KQEMU)
769 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
770 if (kqemu_is_ok(env
) &&
771 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
782 #if defined(TARGET_I386)
783 #if defined(USE_CODE_COPY)
784 if (env
->native_fp_regs
) {
785 save_native_fp_state(env
);
788 /* restore flags in standard format */
789 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
790 #elif defined(TARGET_ARM)
791 /* XXX: Save/restore host fpu exception state?. */
792 #elif defined(TARGET_SPARC)
793 #if defined(reg_REGWPTR)
794 REGWPTR
= saved_regwptr
;
796 #elif defined(TARGET_PPC)
797 #elif defined(TARGET_M68K)
798 cpu_m68k_flush_flags(env
, env
->cc_op
);
799 env
->cc_op
= CC_OP_FLAGS
;
800 env
->sr
= (env
->sr
& 0xffe0)
801 | env
->cc_dest
| (env
->cc_x
<< 4);
802 #elif defined(TARGET_MIPS)
803 #elif defined(TARGET_SH4)
804 #elif defined(TARGET_ALPHA)
807 #error unsupported target CPU
810 /* restore global registers */
811 #if defined(__sparc__) && !defined(HOST_SOLARIS)
812 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
814 #include "hostregs_helper.h"
816 /* fail safe : never use cpu_single_env outside cpu_exec() */
817 cpu_single_env
= NULL
;
821 /* must only be called from the generated code as an exception can be
823 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
825 /* XXX: cannot enable it yet because it yields to MMU exception
826 where NIP != read address on PowerPC */
828 target_ulong phys_addr
;
829 phys_addr
= get_phys_addr_code(env
, start
);
830 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
834 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
836 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
838 CPUX86State
*saved_env
;
842 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
844 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
845 (selector
<< 4), 0xffff, 0);
847 load_seg(seg_reg
, selector
);
852 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
854 CPUX86State
*saved_env
;
859 helper_fsave((target_ulong
)ptr
, data32
);
864 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
866 CPUX86State
*saved_env
;
871 helper_frstor((target_ulong
)ptr
, data32
);
876 #endif /* TARGET_I386 */
878 #if !defined(CONFIG_SOFTMMU)
880 #if defined(TARGET_I386)
882 /* 'pc' is the host PC at which the exception was raised. 'address' is
883 the effective address of the memory exception. 'is_write' is 1 if a
884 write caused the exception and otherwise 0'. 'old_set' is the
885 signal set which should be restored */
886 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
887 int is_write
, sigset_t
*old_set
,
890 TranslationBlock
*tb
;
894 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
895 #if defined(DEBUG_SIGNAL)
896 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
897 pc
, address
, is_write
, *(unsigned long *)old_set
);
899 /* XXX: locking issue */
900 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
904 /* see if it is an MMU fault */
905 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
,
906 ((env
->hflags
& HF_CPL_MASK
) == 3), 0);
908 return 0; /* not an MMU fault */
910 return 1; /* the MMU fault was handled without causing real CPU fault */
911 /* now we have a real cpu fault */
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
916 cpu_restore_state(tb
, env
, pc
, puc
);
920 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
921 env
->eip
, env
->cr
[2], env
->error_code
);
923 /* we restore the process signal mask as the sigreturn should
924 do it (XXX: use sigsetjmp) */
925 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
926 raise_exception_err(env
->exception_index
, env
->error_code
);
928 /* activate soft MMU for this block */
929 env
->hflags
|= HF_SOFTMMU_MASK
;
930 cpu_resume_from_signal(env
, puc
);
932 /* never comes here */
936 #elif defined(TARGET_ARM)
937 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
938 int is_write
, sigset_t
*old_set
,
941 TranslationBlock
*tb
;
945 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
946 #if defined(DEBUG_SIGNAL)
947 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
948 pc
, address
, is_write
, *(unsigned long *)old_set
);
950 /* XXX: locking issue */
951 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
954 /* see if it is an MMU fault */
955 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, 1, 0);
957 return 0; /* not an MMU fault */
959 return 1; /* the MMU fault was handled without causing real CPU fault */
960 /* now we have a real cpu fault */
963 /* the PC is inside the translated code. It means that we have
964 a virtual CPU fault */
965 cpu_restore_state(tb
, env
, pc
, puc
);
967 /* we restore the process signal mask as the sigreturn should
968 do it (XXX: use sigsetjmp) */
969 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
972 #elif defined(TARGET_SPARC)
973 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
974 int is_write
, sigset_t
*old_set
,
977 TranslationBlock
*tb
;
981 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
982 #if defined(DEBUG_SIGNAL)
983 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
984 pc
, address
, is_write
, *(unsigned long *)old_set
);
986 /* XXX: locking issue */
987 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
990 /* see if it is an MMU fault */
991 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, 1, 0);
993 return 0; /* not an MMU fault */
995 return 1; /* the MMU fault was handled without causing real CPU fault */
996 /* now we have a real cpu fault */
999 /* the PC is inside the translated code. It means that we have
1000 a virtual CPU fault */
1001 cpu_restore_state(tb
, env
, pc
, puc
);
1003 /* we restore the process signal mask as the sigreturn should
1004 do it (XXX: use sigsetjmp) */
1005 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1008 #elif defined (TARGET_PPC)
1009 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1010 int is_write
, sigset_t
*old_set
,
1013 TranslationBlock
*tb
;
1017 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1018 #if defined(DEBUG_SIGNAL)
1019 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1020 pc
, address
, is_write
, *(unsigned long *)old_set
);
1022 /* XXX: locking issue */
1023 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1027 /* see if it is an MMU fault */
1028 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, msr_pr
, 0);
1030 return 0; /* not an MMU fault */
1032 return 1; /* the MMU fault was handled without causing real CPU fault */
1034 /* now we have a real cpu fault */
1035 tb
= tb_find_pc(pc
);
1037 /* the PC is inside the translated code. It means that we have
1038 a virtual CPU fault */
1039 cpu_restore_state(tb
, env
, pc
, puc
);
1043 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1044 env
->nip
, env
->error_code
, tb
);
1046 /* we restore the process signal mask as the sigreturn should
1047 do it (XXX: use sigsetjmp) */
1048 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1049 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1051 /* activate soft MMU for this block */
1052 cpu_resume_from_signal(env
, puc
);
1054 /* never comes here */
1058 #elif defined(TARGET_M68K)
1059 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1060 int is_write
, sigset_t
*old_set
,
1063 TranslationBlock
*tb
;
1067 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1068 #if defined(DEBUG_SIGNAL)
1069 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1070 pc
, address
, is_write
, *(unsigned long *)old_set
);
1072 /* XXX: locking issue */
1073 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1076 /* see if it is an MMU fault */
1077 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1079 return 0; /* not an MMU fault */
1081 return 1; /* the MMU fault was handled without causing real CPU fault */
1082 /* now we have a real cpu fault */
1083 tb
= tb_find_pc(pc
);
1085 /* the PC is inside the translated code. It means that we have
1086 a virtual CPU fault */
1087 cpu_restore_state(tb
, env
, pc
, puc
);
1089 /* we restore the process signal mask as the sigreturn should
1090 do it (XXX: use sigsetjmp) */
1091 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1093 /* never comes here */
1097 #elif defined (TARGET_MIPS)
1098 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1099 int is_write
, sigset_t
*old_set
,
1102 TranslationBlock
*tb
;
1106 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1107 #if defined(DEBUG_SIGNAL)
1108 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1109 pc
, address
, is_write
, *(unsigned long *)old_set
);
1111 /* XXX: locking issue */
1112 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1116 /* see if it is an MMU fault */
1117 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1119 return 0; /* not an MMU fault */
1121 return 1; /* the MMU fault was handled without causing real CPU fault */
1123 /* now we have a real cpu fault */
1124 tb
= tb_find_pc(pc
);
1126 /* the PC is inside the translated code. It means that we have
1127 a virtual CPU fault */
1128 cpu_restore_state(tb
, env
, pc
, puc
);
1132 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1133 env
->nip
, env
->error_code
, tb
);
1135 /* we restore the process signal mask as the sigreturn should
1136 do it (XXX: use sigsetjmp) */
1137 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1138 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1140 /* activate soft MMU for this block */
1141 cpu_resume_from_signal(env
, puc
);
1143 /* never comes here */
1147 #elif defined (TARGET_SH4)
1148 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1149 int is_write
, sigset_t
*old_set
,
1152 TranslationBlock
*tb
;
1156 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1157 #if defined(DEBUG_SIGNAL)
1158 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1159 pc
, address
, is_write
, *(unsigned long *)old_set
);
1161 /* XXX: locking issue */
1162 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1166 /* see if it is an MMU fault */
1167 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1169 return 0; /* not an MMU fault */
1171 return 1; /* the MMU fault was handled without causing real CPU fault */
1173 /* now we have a real cpu fault */
1174 tb
= tb_find_pc(pc
);
1176 /* the PC is inside the translated code. It means that we have
1177 a virtual CPU fault */
1178 cpu_restore_state(tb
, env
, pc
, puc
);
1181 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1182 env
->nip
, env
->error_code
, tb
);
1184 /* we restore the process signal mask as the sigreturn should
1185 do it (XXX: use sigsetjmp) */
1186 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1188 /* never comes here */
1192 #elif defined (TARGET_ALPHA)
1193 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1194 int is_write
, sigset_t
*old_set
,
1197 TranslationBlock
*tb
;
1201 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1202 #if defined(DEBUG_SIGNAL)
1203 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1204 pc
, address
, is_write
, *(unsigned long *)old_set
);
1206 /* XXX: locking issue */
1207 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1211 /* see if it is an MMU fault */
1212 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1214 return 0; /* not an MMU fault */
1216 return 1; /* the MMU fault was handled without causing real CPU fault */
1218 /* now we have a real cpu fault */
1219 tb
= tb_find_pc(pc
);
1221 /* the PC is inside the translated code. It means that we have
1222 a virtual CPU fault */
1223 cpu_restore_state(tb
, env
, pc
, puc
);
1226 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1227 env
->nip
, env
->error_code
, tb
);
1229 /* we restore the process signal mask as the sigreturn should
1230 do it (XXX: use sigsetjmp) */
1231 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1233 /* never comes here */
1237 #error unsupported target CPU
1240 #if defined(__i386__)
1242 #if defined(__APPLE__)
1243 # include <sys/ucontext.h>
1245 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1246 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1247 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1249 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1250 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1251 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1254 #if defined(USE_CODE_COPY)
1255 static void cpu_send_trap(unsigned long pc
, int trap
,
1256 struct ucontext
*uc
)
1258 TranslationBlock
*tb
;
1261 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1262 /* now we have a real cpu fault */
1263 tb
= tb_find_pc(pc
);
1265 /* the PC is inside the translated code. It means that we have
1266 a virtual CPU fault */
1267 cpu_restore_state(tb
, env
, pc
, uc
);
1269 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
1270 raise_exception_err(trap
, env
->error_code
);
1274 int cpu_signal_handler(int host_signum
, void *pinfo
,
1277 siginfo_t
*info
= pinfo
;
1278 struct ucontext
*uc
= puc
;
1286 #define REG_TRAPNO TRAPNO
1289 trapno
= TRAP_sig(uc
);
1290 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1291 if (trapno
== 0x00 || trapno
== 0x05) {
1292 /* send division by zero or bound exception */
1293 cpu_send_trap(pc
, trapno
, uc
);
1297 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1299 (ERROR_sig(uc
) >> 1) & 1 : 0,
1300 &uc
->uc_sigmask
, puc
);
1303 #elif defined(__x86_64__)
1305 int cpu_signal_handler(int host_signum
, void *pinfo
,
1308 siginfo_t
*info
= pinfo
;
1309 struct ucontext
*uc
= puc
;
1312 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1313 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1314 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1315 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1316 &uc
->uc_sigmask
, puc
);
1319 #elif defined(__powerpc__)
1321 /***********************************************************************
1322 * signal context platform-specific definitions
1326 /* All Registers access - only for local access */
1327 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1328 /* Gpr Registers access */
1329 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1330 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1331 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1332 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1333 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1334 # define LR_sig(context) REG_sig(link, context) /* Link register */
1335 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1336 /* Float Registers access */
1337 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1338 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1339 /* Exception Registers access */
1340 # define DAR_sig(context) REG_sig(dar, context)
1341 # define DSISR_sig(context) REG_sig(dsisr, context)
1342 # define TRAP_sig(context) REG_sig(trap, context)
1346 # include <sys/ucontext.h>
1347 typedef struct ucontext SIGCONTEXT
;
1348 /* All Registers access - only for local access */
1349 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1350 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1351 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1352 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1353 /* Gpr Registers access */
1354 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1355 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1356 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1357 # define CTR_sig(context) REG_sig(ctr, context)
1358 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1359 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1360 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1361 /* Float Registers access */
1362 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1363 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1364 /* Exception Registers access */
1365 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1366 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1367 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1368 #endif /* __APPLE__ */
1370 int cpu_signal_handler(int host_signum
, void *pinfo
,
1373 siginfo_t
*info
= pinfo
;
1374 struct ucontext
*uc
= puc
;
1382 if (DSISR_sig(uc
) & 0x00800000)
1385 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1388 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1389 is_write
, &uc
->uc_sigmask
, puc
);
1392 #elif defined(__alpha__)
1394 int cpu_signal_handler(int host_signum
, void *pinfo
,
1397 siginfo_t
*info
= pinfo
;
1398 struct ucontext
*uc
= puc
;
1399 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1400 uint32_t insn
= *pc
;
1403 /* XXX: need kernel patch to get write flag faster */
1404 switch (insn
>> 26) {
1419 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1420 is_write
, &uc
->uc_sigmask
, puc
);
1422 #elif defined(__sparc__)
1424 int cpu_signal_handler(int host_signum
, void *pinfo
,
1427 siginfo_t
*info
= pinfo
;
1428 uint32_t *regs
= (uint32_t *)(info
+ 1);
1429 void *sigmask
= (regs
+ 20);
1434 /* XXX: is there a standard glibc define ? */
1436 /* XXX: need kernel patch to get write flag faster */
1438 insn
= *(uint32_t *)pc
;
1439 if ((insn
>> 30) == 3) {
1440 switch((insn
>> 19) & 0x3f) {
1452 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1453 is_write
, sigmask
, NULL
);
1456 #elif defined(__arm__)
1458 int cpu_signal_handler(int host_signum
, void *pinfo
,
1461 siginfo_t
*info
= pinfo
;
1462 struct ucontext
*uc
= puc
;
1466 pc
= uc
->uc_mcontext
.gregs
[R15
];
1467 /* XXX: compute is_write */
1469 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1471 &uc
->uc_sigmask
, puc
);
1474 #elif defined(__mc68000)
1476 int cpu_signal_handler(int host_signum
, void *pinfo
,
1479 siginfo_t
*info
= pinfo
;
1480 struct ucontext
*uc
= puc
;
1484 pc
= uc
->uc_mcontext
.gregs
[16];
1485 /* XXX: compute is_write */
1487 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1489 &uc
->uc_sigmask
, puc
);
1492 #elif defined(__ia64)
1495 /* This ought to be in <bits/siginfo.h>... */
1496 # define __ISR_VALID 1
1499 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1501 siginfo_t
*info
= pinfo
;
1502 struct ucontext
*uc
= puc
;
1506 ip
= uc
->uc_mcontext
.sc_ip
;
1507 switch (host_signum
) {
1513 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1514 /* ISR.W (write-access) is bit 33: */
1515 is_write
= (info
->si_isr
>> 33) & 1;
1521 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1523 &uc
->uc_sigmask
, puc
);
1526 #elif defined(__s390__)
1528 int cpu_signal_handler(int host_signum
, void *pinfo
,
1531 siginfo_t
*info
= pinfo
;
1532 struct ucontext
*uc
= puc
;
1536 pc
= uc
->uc_mcontext
.psw
.addr
;
1537 /* XXX: compute is_write */
1539 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1541 &uc
->uc_sigmask
, puc
);
1546 #error host CPU specific signal handler needed
1550 #endif /* !defined(CONFIG_SOFTMMU) */