2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag
;
41 //#define DEBUG_SIGNAL
43 #define SAVE_GLOBALS()
44 #define RESTORE_GLOBALS()
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50 // Work around ugly bugs in glibc that mangle global register contents
52 static volatile void *saved_env
;
53 static volatile unsigned long saved_t0
, saved_i7
;
55 #define SAVE_GLOBALS() do { \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
61 #undef RESTORE_GLOBALS
62 #define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
68 static int sparc_setjmp(jmp_buf buf
)
78 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
80 static void sparc_longjmp(jmp_buf buf
, int val
)
85 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
89 void cpu_loop_exit(void)
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
94 longjmp(env
->jmp_env
, 1);
97 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
101 /* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
104 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
106 #if !defined(CONFIG_SOFTMMU)
107 struct ucontext
*uc
= puc
;
112 /* XXX: restore cpu registers saved in host registers */
114 #if !defined(CONFIG_SOFTMMU)
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
120 longjmp(env
->jmp_env
, 1);
124 static TranslationBlock
*tb_find_slow(target_ulong pc
,
125 target_ulong cs_base
,
128 TranslationBlock
*tb
, **ptb1
;
131 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
136 tb_invalidated_flag
= 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc
= get_phys_addr_code(env
, pc
);
142 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
144 h
= tb_phys_hash_func(phys_pc
);
145 ptb1
= &tb_phys_hash
[h
];
151 tb
->page_addr
[0] == phys_page1
&&
152 tb
->cs_base
== cs_base
&&
153 tb
->flags
== flags
) {
154 /* check next page if needed */
155 if (tb
->page_addr
[1] != -1) {
156 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
158 phys_page2
= get_phys_addr_code(env
, virt_page2
);
159 if (tb
->page_addr
[1] == phys_page2
)
165 ptb1
= &tb
->phys_hash_next
;
168 /* if no translated code available, then translate it now */
171 /* flush must be done */
173 /* cannot fail at this point */
175 /* don't forget to invalidate previous TB info */
176 tb_invalidated_flag
= 1;
178 tc_ptr
= code_gen_ptr
;
180 tb
->cs_base
= cs_base
;
183 cpu_gen_code(env
, tb
, &code_gen_size
);
185 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
187 /* check next page if needed */
188 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
190 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
191 phys_page2
= get_phys_addr_code(env
, virt_page2
);
193 tb_link_phys(tb
, phys_pc
, phys_page2
);
196 /* we add the TB in the virtual pc hash table */
197 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
198 spin_unlock(&tb_lock
);
202 static inline TranslationBlock
*tb_find_fast(void)
204 TranslationBlock
*tb
;
205 target_ulong cs_base
, pc
;
208 /* we record a subset of the CPU state. It will
209 always be the same before a given translated block
211 #if defined(TARGET_I386)
213 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
214 flags
|= env
->intercept
;
215 cs_base
= env
->segs
[R_CS
].base
;
216 pc
= cs_base
+ env
->eip
;
217 #elif defined(TARGET_ARM)
218 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
219 | (env
->vfp
.vec_stride
<< 4);
220 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
222 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
224 flags
|= (env
->condexec_bits
<< 8);
227 #elif defined(TARGET_SPARC)
228 #ifdef TARGET_SPARC64
229 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
230 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
231 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
233 // FPU enable . Supervisor
234 flags
= (env
->psref
<< 4) | env
->psrs
;
238 #elif defined(TARGET_PPC)
242 #elif defined(TARGET_MIPS)
243 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
245 pc
= env
->PC
[env
->current_tc
];
246 #elif defined(TARGET_M68K)
247 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
248 | (env
->sr
& SR_S
) /* Bit 13 */
249 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
252 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_ALPHA)
260 #elif defined(TARGET_CRIS)
265 #error unsupported CPU
267 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
268 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
269 tb
->flags
!= flags
, 0)) {
270 tb
= tb_find_slow(pc
, cs_base
, flags
);
271 /* Note: we do it here to avoid a gcc bug on Mac OS X when
272 doing it in tb_find_slow */
273 if (tb_invalidated_flag
) {
274 /* as some TB could have been invalidated because
275 of memory exceptions while generating the code, we
276 must recompute the hash index here */
283 #define BREAK_CHAIN T0 = 0
285 /* main execution loop */
287 int cpu_exec(CPUState
*env1
)
289 #define DECLARE_HOST_REGS 1
290 #include "hostregs_helper.h"
291 #if defined(TARGET_SPARC)
292 #if defined(reg_REGWPTR)
293 uint32_t *saved_regwptr
;
296 int ret
, interrupt_request
;
297 void (*gen_func
)(void);
298 TranslationBlock
*tb
;
301 if (cpu_halted(env1
) == EXCP_HALTED
)
304 cpu_single_env
= env1
;
306 /* first we save global registers */
307 #define SAVE_HOST_REGS 1
308 #include "hostregs_helper.h"
313 #if defined(TARGET_I386)
314 /* put eflags in CPU temporary format */
315 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
316 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
317 CC_OP
= CC_OP_EFLAGS
;
318 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
319 #elif defined(TARGET_SPARC)
320 #if defined(reg_REGWPTR)
321 saved_regwptr
= REGWPTR
;
323 #elif defined(TARGET_M68K)
324 env
->cc_op
= CC_OP_FLAGS
;
325 env
->cc_dest
= env
->sr
& 0xf;
326 env
->cc_x
= (env
->sr
>> 4) & 1;
327 #elif defined(TARGET_ALPHA)
328 #elif defined(TARGET_ARM)
329 #elif defined(TARGET_PPC)
330 #elif defined(TARGET_MIPS)
331 #elif defined(TARGET_SH4)
332 #elif defined(TARGET_CRIS)
335 #error unsupported target CPU
337 env
->exception_index
= -1;
339 /* prepare setjmp context for exception handling */
341 if (setjmp(env
->jmp_env
) == 0) {
342 env
->current_tb
= NULL
;
343 /* if an exception is pending, we execute it here */
344 if (env
->exception_index
>= 0) {
345 if (env
->exception_index
>= EXCP_INTERRUPT
) {
346 /* exit request from the cpu execution loop */
347 ret
= env
->exception_index
;
349 } else if (env
->user_mode_only
) {
350 /* if user mode only, we simulate a fake exception
351 which will be handled outside the cpu execution
353 #if defined(TARGET_I386)
354 do_interrupt_user(env
->exception_index
,
355 env
->exception_is_int
,
357 env
->exception_next_eip
);
359 ret
= env
->exception_index
;
362 #if defined(TARGET_I386)
363 /* simulate a real cpu exception. On i386, it can
364 trigger new exceptions, but we do not handle
365 double or triple faults yet. */
366 do_interrupt(env
->exception_index
,
367 env
->exception_is_int
,
369 env
->exception_next_eip
, 0);
370 /* successfully delivered */
371 env
->old_exception
= -1;
372 #elif defined(TARGET_PPC)
374 #elif defined(TARGET_MIPS)
376 #elif defined(TARGET_SPARC)
377 do_interrupt(env
->exception_index
);
378 #elif defined(TARGET_ARM)
380 #elif defined(TARGET_SH4)
382 #elif defined(TARGET_ALPHA)
384 #elif defined(TARGET_CRIS)
386 #elif defined(TARGET_M68K)
390 env
->exception_index
= -1;
393 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
395 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
396 ret
= kqemu_cpu_exec(env
);
397 /* put eflags in CPU temporary format */
398 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
399 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
400 CC_OP
= CC_OP_EFLAGS
;
401 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
404 longjmp(env
->jmp_env
, 1);
405 } else if (ret
== 2) {
406 /* softmmu execution needed */
408 if (env
->interrupt_request
!= 0) {
409 /* hardware interrupt will be executed just after */
411 /* otherwise, we restart */
412 longjmp(env
->jmp_env
, 1);
418 T0
= 0; /* force lookup of first TB */
421 interrupt_request
= env
->interrupt_request
;
422 if (__builtin_expect(interrupt_request
, 0)
423 #if defined(TARGET_I386)
424 && env
->hflags
& HF_GIF_MASK
427 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
428 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
429 env
->exception_index
= EXCP_DEBUG
;
432 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
433 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
434 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
435 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
437 env
->exception_index
= EXCP_HLT
;
441 #if defined(TARGET_I386)
442 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
443 !(env
->hflags
& HF_SMM_MASK
)) {
444 svm_check_intercept(SVM_EXIT_SMI
);
445 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
448 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
449 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
450 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
452 svm_check_intercept(SVM_EXIT_INTR
);
453 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
454 intno
= cpu_get_pic_interrupt(env
);
455 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
456 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
458 do_interrupt(intno
, 0, 0, 0, 1);
459 /* ensure that no TB jump will be modified as
460 the program flow was changed */
462 #if !defined(CONFIG_USER_ONLY)
463 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
464 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
466 /* FIXME: this should respect TPR */
467 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
468 svm_check_intercept(SVM_EXIT_VINTR
);
469 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
470 if (loglevel
& CPU_LOG_TB_IN_ASM
)
471 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
472 do_interrupt(intno
, 0, 0, -1, 1);
473 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
474 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
478 #elif defined(TARGET_PPC)
480 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
484 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
485 ppc_hw_interrupt(env
);
486 if (env
->pending_interrupts
== 0)
487 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
490 #elif defined(TARGET_MIPS)
491 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
492 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
493 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
494 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
495 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
496 !(env
->hflags
& MIPS_HFLAG_DM
)) {
498 env
->exception_index
= EXCP_EXT_INTERRUPT
;
503 #elif defined(TARGET_SPARC)
504 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
506 int pil
= env
->interrupt_index
& 15;
507 int type
= env
->interrupt_index
& 0xf0;
509 if (((type
== TT_EXTINT
) &&
510 (pil
== 15 || pil
> env
->psrpil
)) ||
512 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
513 do_interrupt(env
->interrupt_index
);
514 env
->interrupt_index
= 0;
515 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
520 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
521 //do_interrupt(0, 0, 0, 0, 0);
522 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
524 #elif defined(TARGET_ARM)
525 if (interrupt_request
& CPU_INTERRUPT_FIQ
526 && !(env
->uncached_cpsr
& CPSR_F
)) {
527 env
->exception_index
= EXCP_FIQ
;
531 /* ARMv7-M interrupt return works by loading a magic value
532 into the PC. On real hardware the load causes the
533 return to occur. The qemu implementation performs the
534 jump normally, then does the exception return when the
535 CPU tries to execute code at the magic address.
536 This will cause the magic PC value to be pushed to
537 the stack if an interrupt occured at the wrong time.
538 We avoid this by disabling interrupts when
539 pc contains a magic address. */
540 if (interrupt_request
& CPU_INTERRUPT_HARD
541 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
542 || !(env
->uncached_cpsr
& CPSR_I
))) {
543 env
->exception_index
= EXCP_IRQ
;
547 #elif defined(TARGET_SH4)
548 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
552 #elif defined(TARGET_ALPHA)
553 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
557 #elif defined(TARGET_CRIS)
558 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
560 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
563 #elif defined(TARGET_M68K)
564 if (interrupt_request
& CPU_INTERRUPT_HARD
565 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
566 < env
->pending_level
) {
567 /* Real hardware gets the interrupt vector via an
568 IACK cycle at this point. Current emulated
569 hardware doesn't rely on this, so we
570 provide/save the vector when the interrupt is
572 env
->exception_index
= env
->pending_vector
;
577 /* Don't use the cached interupt_request value,
578 do_interrupt may have updated the EXITTB flag. */
579 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
580 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
581 /* ensure that no TB jump will be modified as
582 the program flow was changed */
585 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
586 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
587 env
->exception_index
= EXCP_INTERRUPT
;
592 if ((loglevel
& CPU_LOG_TB_CPU
)) {
593 /* restore flags in standard format */
595 #if defined(TARGET_I386)
596 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
597 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
598 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
599 #elif defined(TARGET_ARM)
600 cpu_dump_state(env
, logfile
, fprintf
, 0);
601 #elif defined(TARGET_SPARC)
602 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
603 env
->regwptr
= REGWPTR
;
604 cpu_dump_state(env
, logfile
, fprintf
, 0);
605 #elif defined(TARGET_PPC)
606 cpu_dump_state(env
, logfile
, fprintf
, 0);
607 #elif defined(TARGET_M68K)
608 cpu_m68k_flush_flags(env
, env
->cc_op
);
609 env
->cc_op
= CC_OP_FLAGS
;
610 env
->sr
= (env
->sr
& 0xffe0)
611 | env
->cc_dest
| (env
->cc_x
<< 4);
612 cpu_dump_state(env
, logfile
, fprintf
, 0);
613 #elif defined(TARGET_MIPS)
614 cpu_dump_state(env
, logfile
, fprintf
, 0);
615 #elif defined(TARGET_SH4)
616 cpu_dump_state(env
, logfile
, fprintf
, 0);
617 #elif defined(TARGET_ALPHA)
618 cpu_dump_state(env
, logfile
, fprintf
, 0);
619 #elif defined(TARGET_CRIS)
620 cpu_dump_state(env
, logfile
, fprintf
, 0);
622 #error unsupported target CPU
628 if ((loglevel
& CPU_LOG_EXEC
)) {
629 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
630 (long)tb
->tc_ptr
, tb
->pc
,
631 lookup_symbol(tb
->pc
));
635 /* see if we can patch the calling TB. When the TB
636 spans two pages, we cannot safely do a direct
641 (env
->kqemu_enabled
!= 2) &&
643 tb
->page_addr
[1] == -1) {
645 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
646 spin_unlock(&tb_lock
);
650 env
->current_tb
= tb
;
651 /* execute the generated code */
652 gen_func
= (void *)tc_ptr
;
653 #if defined(__sparc__)
654 __asm__
__volatile__("call %0\n\t"
658 : "i0", "i1", "i2", "i3", "i4", "i5",
659 "o0", "o1", "o2", "o3", "o4", "o5",
660 "l0", "l1", "l2", "l3", "l4", "l5",
662 #elif defined(__arm__)
663 asm volatile ("mov pc, %0\n\t"
664 ".global exec_loop\n\t"
668 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
669 #elif defined(__ia64)
676 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
677 (*(void (*)(void)) &fp
)();
681 env
->current_tb
= NULL
;
682 /* reset soft MMU for next block (it can currently
683 only be set by a memory fault) */
684 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
685 if (env
->hflags
& HF_SOFTMMU_MASK
) {
686 env
->hflags
&= ~HF_SOFTMMU_MASK
;
687 /* do not allow linking to another block */
691 #if defined(USE_KQEMU)
692 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
693 if (kqemu_is_ok(env
) &&
694 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
705 #if defined(TARGET_I386)
706 /* restore flags in standard format */
707 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
708 #elif defined(TARGET_ARM)
709 /* XXX: Save/restore host fpu exception state?. */
710 #elif defined(TARGET_SPARC)
711 #if defined(reg_REGWPTR)
712 REGWPTR
= saved_regwptr
;
714 #elif defined(TARGET_PPC)
715 #elif defined(TARGET_M68K)
716 cpu_m68k_flush_flags(env
, env
->cc_op
);
717 env
->cc_op
= CC_OP_FLAGS
;
718 env
->sr
= (env
->sr
& 0xffe0)
719 | env
->cc_dest
| (env
->cc_x
<< 4);
720 #elif defined(TARGET_MIPS)
721 #elif defined(TARGET_SH4)
722 #elif defined(TARGET_ALPHA)
723 #elif defined(TARGET_CRIS)
726 #error unsupported target CPU
729 /* restore global registers */
731 #include "hostregs_helper.h"
733 /* fail safe : never use cpu_single_env outside cpu_exec() */
734 cpu_single_env
= NULL
;
738 /* must only be called from the generated code as an exception can be
740 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
742 /* XXX: cannot enable it yet because it yields to MMU exception
743 where NIP != read address on PowerPC */
745 target_ulong phys_addr
;
746 phys_addr
= get_phys_addr_code(env
, start
);
747 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
751 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
753 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
755 CPUX86State
*saved_env
;
759 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
761 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
762 (selector
<< 4), 0xffff, 0);
764 load_seg(seg_reg
, selector
);
769 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
771 CPUX86State
*saved_env
;
776 helper_fsave(ptr
, data32
);
781 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
783 CPUX86State
*saved_env
;
788 helper_frstor(ptr
, data32
);
793 #endif /* TARGET_I386 */
795 #if !defined(CONFIG_SOFTMMU)
797 #if defined(TARGET_I386)
799 /* 'pc' is the host PC at which the exception was raised. 'address' is
800 the effective address of the memory exception. 'is_write' is 1 if a
801 write caused the exception and otherwise 0'. 'old_set' is the
802 signal set which should be restored */
803 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
804 int is_write
, sigset_t
*old_set
,
807 TranslationBlock
*tb
;
811 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
812 #if defined(DEBUG_SIGNAL)
813 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
814 pc
, address
, is_write
, *(unsigned long *)old_set
);
816 /* XXX: locking issue */
817 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
821 /* see if it is an MMU fault */
822 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
824 return 0; /* not an MMU fault */
826 return 1; /* the MMU fault was handled without causing real CPU fault */
827 /* now we have a real cpu fault */
830 /* the PC is inside the translated code. It means that we have
831 a virtual CPU fault */
832 cpu_restore_state(tb
, env
, pc
, puc
);
836 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
837 env
->eip
, env
->cr
[2], env
->error_code
);
839 /* we restore the process signal mask as the sigreturn should
840 do it (XXX: use sigsetjmp) */
841 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
842 raise_exception_err(env
->exception_index
, env
->error_code
);
844 /* activate soft MMU for this block */
845 env
->hflags
|= HF_SOFTMMU_MASK
;
846 cpu_resume_from_signal(env
, puc
);
848 /* never comes here */
852 #elif defined(TARGET_ARM)
853 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
854 int is_write
, sigset_t
*old_set
,
857 TranslationBlock
*tb
;
861 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
862 #if defined(DEBUG_SIGNAL)
863 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
864 pc
, address
, is_write
, *(unsigned long *)old_set
);
866 /* XXX: locking issue */
867 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
870 /* see if it is an MMU fault */
871 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
873 return 0; /* not an MMU fault */
875 return 1; /* the MMU fault was handled without causing real CPU fault */
876 /* now we have a real cpu fault */
879 /* the PC is inside the translated code. It means that we have
880 a virtual CPU fault */
881 cpu_restore_state(tb
, env
, pc
, puc
);
883 /* we restore the process signal mask as the sigreturn should
884 do it (XXX: use sigsetjmp) */
885 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
888 #elif defined(TARGET_SPARC)
889 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
890 int is_write
, sigset_t
*old_set
,
893 TranslationBlock
*tb
;
897 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
898 #if defined(DEBUG_SIGNAL)
899 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
900 pc
, address
, is_write
, *(unsigned long *)old_set
);
902 /* XXX: locking issue */
903 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
906 /* see if it is an MMU fault */
907 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
909 return 0; /* not an MMU fault */
911 return 1; /* the MMU fault was handled without causing real CPU fault */
912 /* now we have a real cpu fault */
915 /* the PC is inside the translated code. It means that we have
916 a virtual CPU fault */
917 cpu_restore_state(tb
, env
, pc
, puc
);
919 /* we restore the process signal mask as the sigreturn should
920 do it (XXX: use sigsetjmp) */
921 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
924 #elif defined (TARGET_PPC)
925 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
926 int is_write
, sigset_t
*old_set
,
929 TranslationBlock
*tb
;
933 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
934 #if defined(DEBUG_SIGNAL)
935 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
936 pc
, address
, is_write
, *(unsigned long *)old_set
);
938 /* XXX: locking issue */
939 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
943 /* see if it is an MMU fault */
944 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
946 return 0; /* not an MMU fault */
948 return 1; /* the MMU fault was handled without causing real CPU fault */
950 /* now we have a real cpu fault */
953 /* the PC is inside the translated code. It means that we have
954 a virtual CPU fault */
955 cpu_restore_state(tb
, env
, pc
, puc
);
959 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
960 env
->nip
, env
->error_code
, tb
);
962 /* we restore the process signal mask as the sigreturn should
963 do it (XXX: use sigsetjmp) */
964 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
965 do_raise_exception_err(env
->exception_index
, env
->error_code
);
967 /* activate soft MMU for this block */
968 cpu_resume_from_signal(env
, puc
);
970 /* never comes here */
974 #elif defined(TARGET_M68K)
975 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
976 int is_write
, sigset_t
*old_set
,
979 TranslationBlock
*tb
;
983 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
984 #if defined(DEBUG_SIGNAL)
985 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
986 pc
, address
, is_write
, *(unsigned long *)old_set
);
988 /* XXX: locking issue */
989 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
992 /* see if it is an MMU fault */
993 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
995 return 0; /* not an MMU fault */
997 return 1; /* the MMU fault was handled without causing real CPU fault */
998 /* now we have a real cpu fault */
1001 /* the PC is inside the translated code. It means that we have
1002 a virtual CPU fault */
1003 cpu_restore_state(tb
, env
, pc
, puc
);
1005 /* we restore the process signal mask as the sigreturn should
1006 do it (XXX: use sigsetjmp) */
1007 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1009 /* never comes here */
1013 #elif defined (TARGET_MIPS)
1014 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1015 int is_write
, sigset_t
*old_set
,
1018 TranslationBlock
*tb
;
1022 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1023 #if defined(DEBUG_SIGNAL)
1024 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1025 pc
, address
, is_write
, *(unsigned long *)old_set
);
1027 /* XXX: locking issue */
1028 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1032 /* see if it is an MMU fault */
1033 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1035 return 0; /* not an MMU fault */
1037 return 1; /* the MMU fault was handled without causing real CPU fault */
1039 /* now we have a real cpu fault */
1040 tb
= tb_find_pc(pc
);
1042 /* the PC is inside the translated code. It means that we have
1043 a virtual CPU fault */
1044 cpu_restore_state(tb
, env
, pc
, puc
);
1048 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1049 env
->PC
, env
->error_code
, tb
);
1051 /* we restore the process signal mask as the sigreturn should
1052 do it (XXX: use sigsetjmp) */
1053 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1054 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1056 /* activate soft MMU for this block */
1057 cpu_resume_from_signal(env
, puc
);
1059 /* never comes here */
1063 #elif defined (TARGET_SH4)
1064 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1065 int is_write
, sigset_t
*old_set
,
1068 TranslationBlock
*tb
;
1072 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1073 #if defined(DEBUG_SIGNAL)
1074 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1075 pc
, address
, is_write
, *(unsigned long *)old_set
);
1077 /* XXX: locking issue */
1078 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1082 /* see if it is an MMU fault */
1083 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1085 return 0; /* not an MMU fault */
1087 return 1; /* the MMU fault was handled without causing real CPU fault */
1089 /* now we have a real cpu fault */
1090 tb
= tb_find_pc(pc
);
1092 /* the PC is inside the translated code. It means that we have
1093 a virtual CPU fault */
1094 cpu_restore_state(tb
, env
, pc
, puc
);
1097 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1098 env
->nip
, env
->error_code
, tb
);
1100 /* we restore the process signal mask as the sigreturn should
1101 do it (XXX: use sigsetjmp) */
1102 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1104 /* never comes here */
1108 #elif defined (TARGET_ALPHA)
1109 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1110 int is_write
, sigset_t
*old_set
,
1113 TranslationBlock
*tb
;
1117 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1118 #if defined(DEBUG_SIGNAL)
1119 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1120 pc
, address
, is_write
, *(unsigned long *)old_set
);
1122 /* XXX: locking issue */
1123 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1127 /* see if it is an MMU fault */
1128 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1130 return 0; /* not an MMU fault */
1132 return 1; /* the MMU fault was handled without causing real CPU fault */
1134 /* now we have a real cpu fault */
1135 tb
= tb_find_pc(pc
);
1137 /* the PC is inside the translated code. It means that we have
1138 a virtual CPU fault */
1139 cpu_restore_state(tb
, env
, pc
, puc
);
1142 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1143 env
->nip
, env
->error_code
, tb
);
1145 /* we restore the process signal mask as the sigreturn should
1146 do it (XXX: use sigsetjmp) */
1147 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1149 /* never comes here */
1152 #elif defined (TARGET_CRIS)
1153 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1154 int is_write
, sigset_t
*old_set
,
1157 TranslationBlock
*tb
;
1161 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1162 #if defined(DEBUG_SIGNAL)
1163 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1164 pc
, address
, is_write
, *(unsigned long *)old_set
);
1166 /* XXX: locking issue */
1167 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1171 /* see if it is an MMU fault */
1172 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1174 return 0; /* not an MMU fault */
1176 return 1; /* the MMU fault was handled without causing real CPU fault */
1178 /* now we have a real cpu fault */
1179 tb
= tb_find_pc(pc
);
1181 /* the PC is inside the translated code. It means that we have
1182 a virtual CPU fault */
1183 cpu_restore_state(tb
, env
, pc
, puc
);
1186 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1187 env
->nip
, env
->error_code
, tb
);
1189 /* we restore the process signal mask as the sigreturn should
1190 do it (XXX: use sigsetjmp) */
1191 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1193 /* never comes here */
1198 #error unsupported target CPU
1201 #if defined(__i386__)
1203 #if defined(__APPLE__)
1204 # include <sys/ucontext.h>
1206 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1207 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1208 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1210 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1211 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1212 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1215 int cpu_signal_handler(int host_signum
, void *pinfo
,
1218 siginfo_t
*info
= pinfo
;
1219 struct ucontext
*uc
= puc
;
1227 #define REG_TRAPNO TRAPNO
1230 trapno
= TRAP_sig(uc
);
1231 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1233 (ERROR_sig(uc
) >> 1) & 1 : 0,
1234 &uc
->uc_sigmask
, puc
);
1237 #elif defined(__x86_64__)
1239 int cpu_signal_handler(int host_signum
, void *pinfo
,
1242 siginfo_t
*info
= pinfo
;
1243 struct ucontext
*uc
= puc
;
1246 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1247 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1248 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1249 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1250 &uc
->uc_sigmask
, puc
);
1253 #elif defined(__powerpc__)
1255 /***********************************************************************
1256 * signal context platform-specific definitions
1260 /* All Registers access - only for local access */
1261 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1262 /* Gpr Registers access */
1263 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1264 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1265 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1266 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1267 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1268 # define LR_sig(context) REG_sig(link, context) /* Link register */
1269 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1270 /* Float Registers access */
1271 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1272 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1273 /* Exception Registers access */
1274 # define DAR_sig(context) REG_sig(dar, context)
1275 # define DSISR_sig(context) REG_sig(dsisr, context)
1276 # define TRAP_sig(context) REG_sig(trap, context)
1280 # include <sys/ucontext.h>
1281 typedef struct ucontext SIGCONTEXT
;
1282 /* All Registers access - only for local access */
1283 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1284 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1285 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1286 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1287 /* Gpr Registers access */
1288 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1289 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1290 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1291 # define CTR_sig(context) REG_sig(ctr, context)
1292 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1293 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1294 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1295 /* Float Registers access */
1296 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1297 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1298 /* Exception Registers access */
1299 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1300 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1301 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1302 #endif /* __APPLE__ */
1304 int cpu_signal_handler(int host_signum
, void *pinfo
,
1307 siginfo_t
*info
= pinfo
;
1308 struct ucontext
*uc
= puc
;
1316 if (DSISR_sig(uc
) & 0x00800000)
1319 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1322 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1323 is_write
, &uc
->uc_sigmask
, puc
);
1326 #elif defined(__alpha__)
1328 int cpu_signal_handler(int host_signum
, void *pinfo
,
1331 siginfo_t
*info
= pinfo
;
1332 struct ucontext
*uc
= puc
;
1333 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1334 uint32_t insn
= *pc
;
1337 /* XXX: need kernel patch to get write flag faster */
1338 switch (insn
>> 26) {
1353 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1354 is_write
, &uc
->uc_sigmask
, puc
);
1356 #elif defined(__sparc__)
1358 int cpu_signal_handler(int host_signum
, void *pinfo
,
1361 siginfo_t
*info
= pinfo
;
1362 uint32_t *regs
= (uint32_t *)(info
+ 1);
1363 void *sigmask
= (regs
+ 20);
1368 /* XXX: is there a standard glibc define ? */
1370 /* XXX: need kernel patch to get write flag faster */
1372 insn
= *(uint32_t *)pc
;
1373 if ((insn
>> 30) == 3) {
1374 switch((insn
>> 19) & 0x3f) {
1386 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1387 is_write
, sigmask
, NULL
);
1390 #elif defined(__arm__)
1392 int cpu_signal_handler(int host_signum
, void *pinfo
,
1395 siginfo_t
*info
= pinfo
;
1396 struct ucontext
*uc
= puc
;
1400 pc
= uc
->uc_mcontext
.gregs
[R15
];
1401 /* XXX: compute is_write */
1403 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1405 &uc
->uc_sigmask
, puc
);
1408 #elif defined(__mc68000)
1410 int cpu_signal_handler(int host_signum
, void *pinfo
,
1413 siginfo_t
*info
= pinfo
;
1414 struct ucontext
*uc
= puc
;
1418 pc
= uc
->uc_mcontext
.gregs
[16];
1419 /* XXX: compute is_write */
1421 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1423 &uc
->uc_sigmask
, puc
);
1426 #elif defined(__ia64)
1429 /* This ought to be in <bits/siginfo.h>... */
1430 # define __ISR_VALID 1
1433 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1435 siginfo_t
*info
= pinfo
;
1436 struct ucontext
*uc
= puc
;
1440 ip
= uc
->uc_mcontext
.sc_ip
;
1441 switch (host_signum
) {
1447 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1448 /* ISR.W (write-access) is bit 33: */
1449 is_write
= (info
->si_isr
>> 33) & 1;
1455 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1457 &uc
->uc_sigmask
, puc
);
1460 #elif defined(__s390__)
1462 int cpu_signal_handler(int host_signum
, void *pinfo
,
1465 siginfo_t
*info
= pinfo
;
1466 struct ucontext
*uc
= puc
;
1470 pc
= uc
->uc_mcontext
.psw
.addr
;
1471 /* XXX: compute is_write */
1473 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1474 is_write
, &uc
->uc_sigmask
, puc
);
1477 #elif defined(__mips__)
1479 int cpu_signal_handler(int host_signum
, void *pinfo
,
1482 siginfo_t
*info
= pinfo
;
1483 struct ucontext
*uc
= puc
;
1484 greg_t pc
= uc
->uc_mcontext
.pc
;
1487 /* XXX: compute is_write */
1489 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1490 is_write
, &uc
->uc_sigmask
, puc
);
1495 #error host CPU specific signal handler needed
1499 #endif /* !defined(CONFIG_SOFTMMU) */