2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag
;
41 //#define DEBUG_SIGNAL
43 void cpu_loop_exit(void)
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
48 longjmp(env
->jmp_env
, 1);
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
58 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext
*uc
= puc
;
66 /* XXX: restore cpu registers saved in host registers */
68 #if !defined(CONFIG_SOFTMMU)
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
74 longjmp(env
->jmp_env
, 1);
78 static TranslationBlock
*tb_find_slow(target_ulong pc
,
82 TranslationBlock
*tb
, **ptb1
;
85 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
90 tb_invalidated_flag
= 0;
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
94 /* find translated block using physical mappings */
95 phys_pc
= get_phys_addr_code(env
, pc
);
96 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
98 h
= tb_phys_hash_func(phys_pc
);
99 ptb1
= &tb_phys_hash
[h
];
105 tb
->page_addr
[0] == phys_page1
&&
106 tb
->cs_base
== cs_base
&&
107 tb
->flags
== flags
) {
108 /* check next page if needed */
109 if (tb
->page_addr
[1] != -1) {
110 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
112 phys_page2
= get_phys_addr_code(env
, virt_page2
);
113 if (tb
->page_addr
[1] == phys_page2
)
119 ptb1
= &tb
->phys_hash_next
;
122 /* if no translated code available, then translate it now */
125 /* flush must be done */
127 /* cannot fail at this point */
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag
= 1;
132 tc_ptr
= code_gen_ptr
;
134 tb
->cs_base
= cs_base
;
136 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
137 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
139 /* check next page if needed */
140 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
142 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
143 phys_page2
= get_phys_addr_code(env
, virt_page2
);
145 tb_link_phys(tb
, phys_pc
, phys_page2
);
148 /* we add the TB in the virtual pc hash table */
149 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
150 spin_unlock(&tb_lock
);
154 static inline TranslationBlock
*tb_find_fast(void)
156 TranslationBlock
*tb
;
157 target_ulong cs_base
, pc
;
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
163 #if defined(TARGET_I386)
165 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
166 flags
|= env
->intercept
;
167 cs_base
= env
->segs
[R_CS
].base
;
168 pc
= cs_base
+ env
->eip
;
169 #elif defined(TARGET_ARM)
170 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
171 | (env
->vfp
.vec_stride
<< 4);
172 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
174 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
176 flags
|= (env
->condexec_bits
<< 8);
179 #elif defined(TARGET_SPARC)
180 #ifdef TARGET_SPARC64
181 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
182 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
183 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
185 // FPU enable . Supervisor
186 flags
= (env
->psref
<< 4) | env
->psrs
;
190 #elif defined(TARGET_PPC)
194 #elif defined(TARGET_MIPS)
195 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
197 pc
= env
->PC
[env
->current_tc
];
198 #elif defined(TARGET_M68K)
199 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
200 | (env
->sr
& SR_S
) /* Bit 13 */
201 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
204 #elif defined(TARGET_SH4)
205 flags
= env
->sr
& (SR_MD
| SR_RB
);
206 cs_base
= 0; /* XXXXX */
208 #elif defined(TARGET_ALPHA)
212 #elif defined(TARGET_CRIS)
217 #error unsupported CPU
219 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
220 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
221 tb
->flags
!= flags
, 0)) {
222 tb
= tb_find_slow(pc
, cs_base
, flags
);
223 /* Note: we do it here to avoid a gcc bug on Mac OS X when
224 doing it in tb_find_slow */
225 if (tb_invalidated_flag
) {
226 /* as some TB could have been invalidated because
227 of memory exceptions while generating the code, we
228 must recompute the hash index here */
236 /* main execution loop */
238 int cpu_exec(CPUState
*env1
)
240 #define DECLARE_HOST_REGS 1
241 #include "hostregs_helper.h"
242 #if defined(TARGET_SPARC)
243 #if defined(reg_REGWPTR)
244 uint32_t *saved_regwptr
;
247 #if defined(__sparc__) && !defined(HOST_SOLARIS)
251 int ret
, interrupt_request
;
252 void (*gen_func
)(void);
253 TranslationBlock
*tb
;
256 if (cpu_halted(env1
) == EXCP_HALTED
)
259 cpu_single_env
= env1
;
261 /* first we save global registers */
262 #define SAVE_HOST_REGS 1
263 #include "hostregs_helper.h"
265 #if defined(__sparc__) && !defined(HOST_SOLARIS)
266 /* we also save i7 because longjmp may not restore it */
267 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
271 #if defined(TARGET_I386)
272 /* put eflags in CPU temporary format */
273 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
274 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
275 CC_OP
= CC_OP_EFLAGS
;
276 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
277 #elif defined(TARGET_SPARC)
278 #if defined(reg_REGWPTR)
279 saved_regwptr
= REGWPTR
;
281 #elif defined(TARGET_M68K)
282 env
->cc_op
= CC_OP_FLAGS
;
283 env
->cc_dest
= env
->sr
& 0xf;
284 env
->cc_x
= (env
->sr
>> 4) & 1;
285 #elif defined(TARGET_ALPHA)
286 #elif defined(TARGET_ARM)
287 #elif defined(TARGET_PPC)
288 #elif defined(TARGET_MIPS)
289 #elif defined(TARGET_SH4)
290 #elif defined(TARGET_CRIS)
293 #error unsupported target CPU
295 env
->exception_index
= -1;
297 /* prepare setjmp context for exception handling */
299 if (setjmp(env
->jmp_env
) == 0) {
300 env
->current_tb
= NULL
;
301 /* if an exception is pending, we execute it here */
302 if (env
->exception_index
>= 0) {
303 if (env
->exception_index
>= EXCP_INTERRUPT
) {
304 /* exit request from the cpu execution loop */
305 ret
= env
->exception_index
;
307 } else if (env
->user_mode_only
) {
308 /* if user mode only, we simulate a fake exception
309 which will be handled outside the cpu execution
311 #if defined(TARGET_I386)
312 do_interrupt_user(env
->exception_index
,
313 env
->exception_is_int
,
315 env
->exception_next_eip
);
317 ret
= env
->exception_index
;
320 #if defined(TARGET_I386)
321 /* simulate a real cpu exception. On i386, it can
322 trigger new exceptions, but we do not handle
323 double or triple faults yet. */
324 do_interrupt(env
->exception_index
,
325 env
->exception_is_int
,
327 env
->exception_next_eip
, 0);
328 /* successfully delivered */
329 env
->old_exception
= -1;
330 #elif defined(TARGET_PPC)
332 #elif defined(TARGET_MIPS)
334 #elif defined(TARGET_SPARC)
335 do_interrupt(env
->exception_index
);
336 #elif defined(TARGET_ARM)
338 #elif defined(TARGET_SH4)
340 #elif defined(TARGET_ALPHA)
342 #elif defined(TARGET_CRIS)
344 #elif defined(TARGET_M68K)
348 env
->exception_index
= -1;
351 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
353 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
354 ret
= kqemu_cpu_exec(env
);
355 /* put eflags in CPU temporary format */
356 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
357 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
358 CC_OP
= CC_OP_EFLAGS
;
359 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
362 longjmp(env
->jmp_env
, 1);
363 } else if (ret
== 2) {
364 /* softmmu execution needed */
366 if (env
->interrupt_request
!= 0) {
367 /* hardware interrupt will be executed just after */
369 /* otherwise, we restart */
370 longjmp(env
->jmp_env
, 1);
376 T0
= 0; /* force lookup of first TB */
378 #if defined(__sparc__) && !defined(HOST_SOLARIS)
379 /* g1 can be modified by some libc? functions */
382 interrupt_request
= env
->interrupt_request
;
383 if (__builtin_expect(interrupt_request
, 0)
384 #if defined(TARGET_I386)
385 && env
->hflags
& HF_GIF_MASK
388 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
389 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
390 env
->exception_index
= EXCP_DEBUG
;
393 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
394 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
395 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
396 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
398 env
->exception_index
= EXCP_HLT
;
402 #if defined(TARGET_I386)
403 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
404 !(env
->hflags
& HF_SMM_MASK
)) {
405 svm_check_intercept(SVM_EXIT_SMI
);
406 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
408 #if defined(__sparc__) && !defined(HOST_SOLARIS)
413 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
414 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
415 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
417 svm_check_intercept(SVM_EXIT_INTR
);
418 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
419 intno
= cpu_get_pic_interrupt(env
);
420 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
421 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
423 do_interrupt(intno
, 0, 0, 0, 1);
424 /* ensure that no TB jump will be modified as
425 the program flow was changed */
426 #if defined(__sparc__) && !defined(HOST_SOLARIS)
431 #if !defined(CONFIG_USER_ONLY)
432 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
433 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
435 /* FIXME: this should respect TPR */
436 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
437 svm_check_intercept(SVM_EXIT_VINTR
);
438 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
439 if (loglevel
& CPU_LOG_TB_IN_ASM
)
440 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
441 do_interrupt(intno
, 0, 0, -1, 1);
442 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
443 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
444 #if defined(__sparc__) && !defined(HOST_SOLARIS)
451 #elif defined(TARGET_PPC)
453 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
457 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
458 ppc_hw_interrupt(env
);
459 if (env
->pending_interrupts
== 0)
460 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
461 #if defined(__sparc__) && !defined(HOST_SOLARIS)
467 #elif defined(TARGET_MIPS)
468 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
469 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
470 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
471 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
472 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
473 !(env
->hflags
& MIPS_HFLAG_DM
)) {
475 env
->exception_index
= EXCP_EXT_INTERRUPT
;
478 #if defined(__sparc__) && !defined(HOST_SOLARIS)
484 #elif defined(TARGET_SPARC)
485 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
487 int pil
= env
->interrupt_index
& 15;
488 int type
= env
->interrupt_index
& 0xf0;
490 if (((type
== TT_EXTINT
) &&
491 (pil
== 15 || pil
> env
->psrpil
)) ||
493 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
494 do_interrupt(env
->interrupt_index
);
495 env
->interrupt_index
= 0;
496 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
499 #if defined(__sparc__) && !defined(HOST_SOLARIS)
505 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
506 //do_interrupt(0, 0, 0, 0, 0);
507 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
509 #elif defined(TARGET_ARM)
510 if (interrupt_request
& CPU_INTERRUPT_FIQ
511 && !(env
->uncached_cpsr
& CPSR_F
)) {
512 env
->exception_index
= EXCP_FIQ
;
515 /* ARMv7-M interrupt return works by loading a magic value
516 into the PC. On real hardware the load causes the
517 return to occur. The qemu implementation performs the
518 jump normally, then does the exception return when the
519 CPU tries to execute code at the magic address.
520 This will cause the magic PC value to be pushed to
521 the stack if an interrupt occured at the wrong time.
522 We avoid this by disabling interrupts when
523 pc contains a magic address. */
524 if (interrupt_request
& CPU_INTERRUPT_HARD
525 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
526 || !(env
->uncached_cpsr
& CPSR_I
))) {
527 env
->exception_index
= EXCP_IRQ
;
530 #elif defined(TARGET_SH4)
532 #elif defined(TARGET_ALPHA)
533 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
536 #elif defined(TARGET_CRIS)
537 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
539 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
541 #elif defined(TARGET_M68K)
542 if (interrupt_request
& CPU_INTERRUPT_HARD
543 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
544 < env
->pending_level
) {
545 /* Real hardware gets the interrupt vector via an
546 IACK cycle at this point. Current emulated
547 hardware doesn't rely on this, so we
548 provide/save the vector when the interrupt is
550 env
->exception_index
= env
->pending_vector
;
554 /* Don't use the cached interupt_request value,
555 do_interrupt may have updated the EXITTB flag. */
556 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
557 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
558 /* ensure that no TB jump will be modified as
559 the program flow was changed */
560 #if defined(__sparc__) && !defined(HOST_SOLARIS)
566 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
567 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
568 env
->exception_index
= EXCP_INTERRUPT
;
573 if ((loglevel
& CPU_LOG_TB_CPU
)) {
574 /* restore flags in standard format */
576 #if defined(TARGET_I386)
577 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
578 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
579 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
580 #elif defined(TARGET_ARM)
581 cpu_dump_state(env
, logfile
, fprintf
, 0);
582 #elif defined(TARGET_SPARC)
583 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
584 env
->regwptr
= REGWPTR
;
585 cpu_dump_state(env
, logfile
, fprintf
, 0);
586 #elif defined(TARGET_PPC)
587 cpu_dump_state(env
, logfile
, fprintf
, 0);
588 #elif defined(TARGET_M68K)
589 cpu_m68k_flush_flags(env
, env
->cc_op
);
590 env
->cc_op
= CC_OP_FLAGS
;
591 env
->sr
= (env
->sr
& 0xffe0)
592 | env
->cc_dest
| (env
->cc_x
<< 4);
593 cpu_dump_state(env
, logfile
, fprintf
, 0);
594 #elif defined(TARGET_MIPS)
595 cpu_dump_state(env
, logfile
, fprintf
, 0);
596 #elif defined(TARGET_SH4)
597 cpu_dump_state(env
, logfile
, fprintf
, 0);
598 #elif defined(TARGET_ALPHA)
599 cpu_dump_state(env
, logfile
, fprintf
, 0);
600 #elif defined(TARGET_CRIS)
601 cpu_dump_state(env
, logfile
, fprintf
, 0);
603 #error unsupported target CPU
609 if ((loglevel
& CPU_LOG_EXEC
)) {
610 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
611 (long)tb
->tc_ptr
, tb
->pc
,
612 lookup_symbol(tb
->pc
));
615 #if defined(__sparc__) && !defined(HOST_SOLARIS)
618 /* see if we can patch the calling TB. When the TB
619 spans two pages, we cannot safely do a direct
624 (env
->kqemu_enabled
!= 2) &&
626 tb
->page_addr
[1] == -1) {
628 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
629 spin_unlock(&tb_lock
);
633 env
->current_tb
= tb
;
634 /* execute the generated code */
635 gen_func
= (void *)tc_ptr
;
636 #if defined(__sparc__)
637 __asm__
__volatile__("call %0\n\t"
641 : "i0", "i1", "i2", "i3", "i4", "i5",
642 "o0", "o1", "o2", "o3", "o4", "o5",
643 "l0", "l1", "l2", "l3", "l4", "l5",
645 #elif defined(__arm__)
646 asm volatile ("mov pc, %0\n\t"
647 ".global exec_loop\n\t"
651 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
652 #elif defined(__ia64)
659 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
660 (*(void (*)(void)) &fp
)();
664 env
->current_tb
= NULL
;
665 /* reset soft MMU for next block (it can currently
666 only be set by a memory fault) */
667 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
668 if (env
->hflags
& HF_SOFTMMU_MASK
) {
669 env
->hflags
&= ~HF_SOFTMMU_MASK
;
670 /* do not allow linking to another block */
674 #if defined(USE_KQEMU)
675 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
676 if (kqemu_is_ok(env
) &&
677 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
688 #if defined(TARGET_I386)
689 /* restore flags in standard format */
690 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
691 #elif defined(TARGET_ARM)
692 /* XXX: Save/restore host fpu exception state?. */
693 #elif defined(TARGET_SPARC)
694 #if defined(reg_REGWPTR)
695 REGWPTR
= saved_regwptr
;
697 #elif defined(TARGET_PPC)
698 #elif defined(TARGET_M68K)
699 cpu_m68k_flush_flags(env
, env
->cc_op
);
700 env
->cc_op
= CC_OP_FLAGS
;
701 env
->sr
= (env
->sr
& 0xffe0)
702 | env
->cc_dest
| (env
->cc_x
<< 4);
703 #elif defined(TARGET_MIPS)
704 #elif defined(TARGET_SH4)
705 #elif defined(TARGET_ALPHA)
706 #elif defined(TARGET_CRIS)
709 #error unsupported target CPU
712 /* restore global registers */
713 #if defined(__sparc__) && !defined(HOST_SOLARIS)
714 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
716 #include "hostregs_helper.h"
718 /* fail safe : never use cpu_single_env outside cpu_exec() */
719 cpu_single_env
= NULL
;
723 /* must only be called from the generated code as an exception can be
725 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
727 /* XXX: cannot enable it yet because it yields to MMU exception
728 where NIP != read address on PowerPC */
730 target_ulong phys_addr
;
731 phys_addr
= get_phys_addr_code(env
, start
);
732 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
736 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
738 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
740 CPUX86State
*saved_env
;
744 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
746 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
747 (selector
<< 4), 0xffff, 0);
749 load_seg(seg_reg
, selector
);
754 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
756 CPUX86State
*saved_env
;
761 helper_fsave((target_ulong
)ptr
, data32
);
766 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
768 CPUX86State
*saved_env
;
773 helper_frstor((target_ulong
)ptr
, data32
);
778 #endif /* TARGET_I386 */
780 #if !defined(CONFIG_SOFTMMU)
782 #if defined(TARGET_I386)
784 /* 'pc' is the host PC at which the exception was raised. 'address' is
785 the effective address of the memory exception. 'is_write' is 1 if a
786 write caused the exception and otherwise 0'. 'old_set' is the
787 signal set which should be restored */
788 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
789 int is_write
, sigset_t
*old_set
,
792 TranslationBlock
*tb
;
796 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
797 #if defined(DEBUG_SIGNAL)
798 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
799 pc
, address
, is_write
, *(unsigned long *)old_set
);
801 /* XXX: locking issue */
802 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
806 /* see if it is an MMU fault */
807 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
809 return 0; /* not an MMU fault */
811 return 1; /* the MMU fault was handled without causing real CPU fault */
812 /* now we have a real cpu fault */
815 /* the PC is inside the translated code. It means that we have
816 a virtual CPU fault */
817 cpu_restore_state(tb
, env
, pc
, puc
);
821 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
822 env
->eip
, env
->cr
[2], env
->error_code
);
824 /* we restore the process signal mask as the sigreturn should
825 do it (XXX: use sigsetjmp) */
826 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
827 raise_exception_err(env
->exception_index
, env
->error_code
);
829 /* activate soft MMU for this block */
830 env
->hflags
|= HF_SOFTMMU_MASK
;
831 cpu_resume_from_signal(env
, puc
);
833 /* never comes here */
837 #elif defined(TARGET_ARM)
838 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
839 int is_write
, sigset_t
*old_set
,
842 TranslationBlock
*tb
;
846 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
847 #if defined(DEBUG_SIGNAL)
848 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
849 pc
, address
, is_write
, *(unsigned long *)old_set
);
851 /* XXX: locking issue */
852 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
855 /* see if it is an MMU fault */
856 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
858 return 0; /* not an MMU fault */
860 return 1; /* the MMU fault was handled without causing real CPU fault */
861 /* now we have a real cpu fault */
864 /* the PC is inside the translated code. It means that we have
865 a virtual CPU fault */
866 cpu_restore_state(tb
, env
, pc
, puc
);
868 /* we restore the process signal mask as the sigreturn should
869 do it (XXX: use sigsetjmp) */
870 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
873 #elif defined(TARGET_SPARC)
874 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
875 int is_write
, sigset_t
*old_set
,
878 TranslationBlock
*tb
;
882 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
883 #if defined(DEBUG_SIGNAL)
884 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
885 pc
, address
, is_write
, *(unsigned long *)old_set
);
887 /* XXX: locking issue */
888 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
891 /* see if it is an MMU fault */
892 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
894 return 0; /* not an MMU fault */
896 return 1; /* the MMU fault was handled without causing real CPU fault */
897 /* now we have a real cpu fault */
900 /* the PC is inside the translated code. It means that we have
901 a virtual CPU fault */
902 cpu_restore_state(tb
, env
, pc
, puc
);
904 /* we restore the process signal mask as the sigreturn should
905 do it (XXX: use sigsetjmp) */
906 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
909 #elif defined (TARGET_PPC)
910 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
911 int is_write
, sigset_t
*old_set
,
914 TranslationBlock
*tb
;
918 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
919 #if defined(DEBUG_SIGNAL)
920 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
921 pc
, address
, is_write
, *(unsigned long *)old_set
);
923 /* XXX: locking issue */
924 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
928 /* see if it is an MMU fault */
929 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
931 return 0; /* not an MMU fault */
933 return 1; /* the MMU fault was handled without causing real CPU fault */
935 /* now we have a real cpu fault */
938 /* the PC is inside the translated code. It means that we have
939 a virtual CPU fault */
940 cpu_restore_state(tb
, env
, pc
, puc
);
944 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
945 env
->nip
, env
->error_code
, tb
);
947 /* we restore the process signal mask as the sigreturn should
948 do it (XXX: use sigsetjmp) */
949 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
950 do_raise_exception_err(env
->exception_index
, env
->error_code
);
952 /* activate soft MMU for this block */
953 cpu_resume_from_signal(env
, puc
);
955 /* never comes here */
959 #elif defined(TARGET_M68K)
960 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
961 int is_write
, sigset_t
*old_set
,
964 TranslationBlock
*tb
;
968 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
969 #if defined(DEBUG_SIGNAL)
970 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
971 pc
, address
, is_write
, *(unsigned long *)old_set
);
973 /* XXX: locking issue */
974 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
977 /* see if it is an MMU fault */
978 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
980 return 0; /* not an MMU fault */
982 return 1; /* the MMU fault was handled without causing real CPU fault */
983 /* now we have a real cpu fault */
986 /* the PC is inside the translated code. It means that we have
987 a virtual CPU fault */
988 cpu_restore_state(tb
, env
, pc
, puc
);
990 /* we restore the process signal mask as the sigreturn should
991 do it (XXX: use sigsetjmp) */
992 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
994 /* never comes here */
998 #elif defined (TARGET_MIPS)
999 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1000 int is_write
, sigset_t
*old_set
,
1003 TranslationBlock
*tb
;
1007 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1008 #if defined(DEBUG_SIGNAL)
1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1010 pc
, address
, is_write
, *(unsigned long *)old_set
);
1012 /* XXX: locking issue */
1013 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1017 /* see if it is an MMU fault */
1018 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1020 return 0; /* not an MMU fault */
1022 return 1; /* the MMU fault was handled without causing real CPU fault */
1024 /* now we have a real cpu fault */
1025 tb
= tb_find_pc(pc
);
1027 /* the PC is inside the translated code. It means that we have
1028 a virtual CPU fault */
1029 cpu_restore_state(tb
, env
, pc
, puc
);
1033 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1034 env
->PC
, env
->error_code
, tb
);
1036 /* we restore the process signal mask as the sigreturn should
1037 do it (XXX: use sigsetjmp) */
1038 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1039 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1041 /* activate soft MMU for this block */
1042 cpu_resume_from_signal(env
, puc
);
1044 /* never comes here */
1048 #elif defined (TARGET_SH4)
1049 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1050 int is_write
, sigset_t
*old_set
,
1053 TranslationBlock
*tb
;
1057 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1058 #if defined(DEBUG_SIGNAL)
1059 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1060 pc
, address
, is_write
, *(unsigned long *)old_set
);
1062 /* XXX: locking issue */
1063 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1067 /* see if it is an MMU fault */
1068 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1070 return 0; /* not an MMU fault */
1072 return 1; /* the MMU fault was handled without causing real CPU fault */
1074 /* now we have a real cpu fault */
1075 tb
= tb_find_pc(pc
);
1077 /* the PC is inside the translated code. It means that we have
1078 a virtual CPU fault */
1079 cpu_restore_state(tb
, env
, pc
, puc
);
1082 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1083 env
->nip
, env
->error_code
, tb
);
1085 /* we restore the process signal mask as the sigreturn should
1086 do it (XXX: use sigsetjmp) */
1087 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1089 /* never comes here */
1093 #elif defined (TARGET_ALPHA)
1094 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1095 int is_write
, sigset_t
*old_set
,
1098 TranslationBlock
*tb
;
1102 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1103 #if defined(DEBUG_SIGNAL)
1104 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1105 pc
, address
, is_write
, *(unsigned long *)old_set
);
1107 /* XXX: locking issue */
1108 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1112 /* see if it is an MMU fault */
1113 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1115 return 0; /* not an MMU fault */
1117 return 1; /* the MMU fault was handled without causing real CPU fault */
1119 /* now we have a real cpu fault */
1120 tb
= tb_find_pc(pc
);
1122 /* the PC is inside the translated code. It means that we have
1123 a virtual CPU fault */
1124 cpu_restore_state(tb
, env
, pc
, puc
);
1127 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1128 env
->nip
, env
->error_code
, tb
);
1130 /* we restore the process signal mask as the sigreturn should
1131 do it (XXX: use sigsetjmp) */
1132 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1134 /* never comes here */
1137 #elif defined (TARGET_CRIS)
1138 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1139 int is_write
, sigset_t
*old_set
,
1142 TranslationBlock
*tb
;
1146 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1147 #if defined(DEBUG_SIGNAL)
1148 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1149 pc
, address
, is_write
, *(unsigned long *)old_set
);
1151 /* XXX: locking issue */
1152 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1156 /* see if it is an MMU fault */
1157 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1159 return 0; /* not an MMU fault */
1161 return 1; /* the MMU fault was handled without causing real CPU fault */
1163 /* now we have a real cpu fault */
1164 tb
= tb_find_pc(pc
);
1166 /* the PC is inside the translated code. It means that we have
1167 a virtual CPU fault */
1168 cpu_restore_state(tb
, env
, pc
, puc
);
1171 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1172 env
->nip
, env
->error_code
, tb
);
1174 /* we restore the process signal mask as the sigreturn should
1175 do it (XXX: use sigsetjmp) */
1176 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1178 /* never comes here */
1183 #error unsupported target CPU
1186 #if defined(__i386__)
1188 #if defined(__APPLE__)
1189 # include <sys/ucontext.h>
1191 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1192 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1193 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1195 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1196 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1197 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1200 int cpu_signal_handler(int host_signum
, void *pinfo
,
1203 siginfo_t
*info
= pinfo
;
1204 struct ucontext
*uc
= puc
;
1212 #define REG_TRAPNO TRAPNO
1215 trapno
= TRAP_sig(uc
);
1216 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1218 (ERROR_sig(uc
) >> 1) & 1 : 0,
1219 &uc
->uc_sigmask
, puc
);
1222 #elif defined(__x86_64__)
1224 int cpu_signal_handler(int host_signum
, void *pinfo
,
1227 siginfo_t
*info
= pinfo
;
1228 struct ucontext
*uc
= puc
;
1231 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1232 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1233 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1234 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1235 &uc
->uc_sigmask
, puc
);
1238 #elif defined(__powerpc__)
1240 /***********************************************************************
1241 * signal context platform-specific definitions
1245 /* All Registers access - only for local access */
1246 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1247 /* Gpr Registers access */
1248 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1249 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1250 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1251 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1252 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1253 # define LR_sig(context) REG_sig(link, context) /* Link register */
1254 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1255 /* Float Registers access */
1256 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1257 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1258 /* Exception Registers access */
1259 # define DAR_sig(context) REG_sig(dar, context)
1260 # define DSISR_sig(context) REG_sig(dsisr, context)
1261 # define TRAP_sig(context) REG_sig(trap, context)
1265 # include <sys/ucontext.h>
1266 typedef struct ucontext SIGCONTEXT
;
1267 /* All Registers access - only for local access */
1268 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1269 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1270 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1271 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1272 /* Gpr Registers access */
1273 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1274 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1275 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1276 # define CTR_sig(context) REG_sig(ctr, context)
1277 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1278 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1279 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1280 /* Float Registers access */
1281 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1282 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1283 /* Exception Registers access */
1284 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1285 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1286 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1287 #endif /* __APPLE__ */
1289 int cpu_signal_handler(int host_signum
, void *pinfo
,
1292 siginfo_t
*info
= pinfo
;
1293 struct ucontext
*uc
= puc
;
1301 if (DSISR_sig(uc
) & 0x00800000)
1304 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1307 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1308 is_write
, &uc
->uc_sigmask
, puc
);
1311 #elif defined(__alpha__)
1313 int cpu_signal_handler(int host_signum
, void *pinfo
,
1316 siginfo_t
*info
= pinfo
;
1317 struct ucontext
*uc
= puc
;
1318 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1319 uint32_t insn
= *pc
;
1322 /* XXX: need kernel patch to get write flag faster */
1323 switch (insn
>> 26) {
1338 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1339 is_write
, &uc
->uc_sigmask
, puc
);
1341 #elif defined(__sparc__)
1343 int cpu_signal_handler(int host_signum
, void *pinfo
,
1346 siginfo_t
*info
= pinfo
;
1347 uint32_t *regs
= (uint32_t *)(info
+ 1);
1348 void *sigmask
= (regs
+ 20);
1353 /* XXX: is there a standard glibc define ? */
1355 /* XXX: need kernel patch to get write flag faster */
1357 insn
= *(uint32_t *)pc
;
1358 if ((insn
>> 30) == 3) {
1359 switch((insn
>> 19) & 0x3f) {
1371 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1372 is_write
, sigmask
, NULL
);
1375 #elif defined(__arm__)
1377 int cpu_signal_handler(int host_signum
, void *pinfo
,
1380 siginfo_t
*info
= pinfo
;
1381 struct ucontext
*uc
= puc
;
1385 pc
= uc
->uc_mcontext
.gregs
[R15
];
1386 /* XXX: compute is_write */
1388 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1390 &uc
->uc_sigmask
, puc
);
1393 #elif defined(__mc68000)
1395 int cpu_signal_handler(int host_signum
, void *pinfo
,
1398 siginfo_t
*info
= pinfo
;
1399 struct ucontext
*uc
= puc
;
1403 pc
= uc
->uc_mcontext
.gregs
[16];
1404 /* XXX: compute is_write */
1406 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1408 &uc
->uc_sigmask
, puc
);
1411 #elif defined(__ia64)
1414 /* This ought to be in <bits/siginfo.h>... */
1415 # define __ISR_VALID 1
1418 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1420 siginfo_t
*info
= pinfo
;
1421 struct ucontext
*uc
= puc
;
1425 ip
= uc
->uc_mcontext
.sc_ip
;
1426 switch (host_signum
) {
1432 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1433 /* ISR.W (write-access) is bit 33: */
1434 is_write
= (info
->si_isr
>> 33) & 1;
1440 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1442 &uc
->uc_sigmask
, puc
);
1445 #elif defined(__s390__)
1447 int cpu_signal_handler(int host_signum
, void *pinfo
,
1450 siginfo_t
*info
= pinfo
;
1451 struct ucontext
*uc
= puc
;
1455 pc
= uc
->uc_mcontext
.psw
.addr
;
1456 /* XXX: compute is_write */
1458 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1459 is_write
, &uc
->uc_sigmask
, puc
);
1462 #elif defined(__mips__)
1464 int cpu_signal_handler(int host_signum
, void *pinfo
,
1467 siginfo_t
*info
= pinfo
;
1468 struct ucontext
*uc
= puc
;
1469 greg_t pc
= uc
->uc_mcontext
.pc
;
1472 /* XXX: compute is_write */
1474 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1475 is_write
, &uc
->uc_sigmask
, puc
);
1480 #error host CPU specific signal handler needed
1484 #endif /* !defined(CONFIG_SOFTMMU) */