2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
26 #if !defined(CONFIG_SOFTMMU)
38 #include <sys/ucontext.h>
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
45 #define env cpu_single_env
48 int tb_invalidated_flag
;
51 //#define DEBUG_SIGNAL
53 void cpu_loop_exit(void)
55 /* NOTE: the register at this point must be saved by hand because
56 longjmp restore them */
58 longjmp(env
->jmp_env
, 1);
61 /* exit the current TB from a signal handler. The host registers are
62 restored in a state compatible with the CPU emulator
64 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
66 #if !defined(CONFIG_SOFTMMU)
68 struct ucontext
*uc
= puc
;
69 #elif defined(__OpenBSD__)
70 struct sigcontext
*uc
= puc
;
76 /* XXX: restore cpu registers saved in host registers */
78 #if !defined(CONFIG_SOFTMMU)
80 /* XXX: use siglongjmp ? */
82 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
83 #elif defined(__OpenBSD__)
84 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
88 env
->exception_index
= -1;
89 longjmp(env
->jmp_env
, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
96 unsigned long next_tb
;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles
> CF_COUNT_MASK
)
102 max_cycles
= CF_COUNT_MASK
;
104 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
106 env
->current_tb
= tb
;
107 /* execute the generated code */
108 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
110 if ((next_tb
& 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 cpu_pc_from_tb(env
, tb
);
115 tb_phys_invalidate(tb
, -1);
119 static TranslationBlock
*tb_find_slow(target_ulong pc
,
120 target_ulong cs_base
,
123 TranslationBlock
*tb
, **ptb1
;
125 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
127 tb_invalidated_flag
= 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc
= get_phys_addr_code(env
, pc
);
133 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
135 h
= tb_phys_hash_func(phys_pc
);
136 ptb1
= &tb_phys_hash
[h
];
142 tb
->page_addr
[0] == phys_page1
&&
143 tb
->cs_base
== cs_base
&&
144 tb
->flags
== flags
) {
145 /* check next page if needed */
146 if (tb
->page_addr
[1] != -1) {
147 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
149 phys_page2
= get_phys_addr_code(env
, virt_page2
);
150 if (tb
->page_addr
[1] == phys_page2
)
156 ptb1
= &tb
->phys_hash_next
;
159 /* if no translated code available, then translate it now */
160 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
163 /* we add the TB in the virtual pc hash table */
164 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
168 static inline TranslationBlock
*tb_find_fast(void)
170 TranslationBlock
*tb
;
171 target_ulong cs_base
, pc
;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
177 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
178 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
179 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
180 tb
->flags
!= flags
)) {
181 tb
= tb_find_slow(pc
, cs_base
, flags
);
186 static CPUDebugExcpHandler
*debug_excp_handler
;
188 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
190 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
192 debug_excp_handler
= handler
;
196 static void cpu_handle_debug_exception(CPUState
*env
)
200 if (!env
->watchpoint_hit
)
201 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
202 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
204 if (debug_excp_handler
)
205 debug_excp_handler(env
);
208 /* main execution loop */
210 int cpu_exec(CPUState
*env1
)
212 #define DECLARE_HOST_REGS 1
213 #include "hostregs_helper.h"
214 int ret
, interrupt_request
;
215 TranslationBlock
*tb
;
217 unsigned long next_tb
;
219 if (cpu_halted(env1
) == EXCP_HALTED
)
222 cpu_single_env
= env1
;
224 /* first we save global registers */
225 #define SAVE_HOST_REGS 1
226 #include "hostregs_helper.h"
230 #if defined(TARGET_I386)
231 /* put eflags in CPU temporary format */
232 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
233 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
234 CC_OP
= CC_OP_EFLAGS
;
235 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
236 #elif defined(TARGET_SPARC)
237 #elif defined(TARGET_M68K)
238 env
->cc_op
= CC_OP_FLAGS
;
239 env
->cc_dest
= env
->sr
& 0xf;
240 env
->cc_x
= (env
->sr
>> 4) & 1;
241 #elif defined(TARGET_ALPHA)
242 #elif defined(TARGET_ARM)
243 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MIPS)
245 #elif defined(TARGET_SH4)
246 #elif defined(TARGET_CRIS)
249 #error unsupported target CPU
251 env
->exception_index
= -1;
253 /* prepare setjmp context for exception handling */
255 if (setjmp(env
->jmp_env
) == 0) {
256 env
->current_tb
= NULL
;
257 /* if an exception is pending, we execute it here */
258 if (env
->exception_index
>= 0) {
259 if (env
->exception_index
>= EXCP_INTERRUPT
) {
260 /* exit request from the cpu execution loop */
261 ret
= env
->exception_index
;
262 if (ret
== EXCP_DEBUG
)
263 cpu_handle_debug_exception(env
);
266 #if defined(CONFIG_USER_ONLY)
267 /* if user mode only, we simulate a fake exception
268 which will be handled outside the cpu execution
270 #if defined(TARGET_I386)
271 do_interrupt_user(env
->exception_index
,
272 env
->exception_is_int
,
274 env
->exception_next_eip
);
275 /* successfully delivered */
276 env
->old_exception
= -1;
278 ret
= env
->exception_index
;
281 #if defined(TARGET_I386)
282 /* simulate a real cpu exception. On i386, it can
283 trigger new exceptions, but we do not handle
284 double or triple faults yet. */
285 do_interrupt(env
->exception_index
,
286 env
->exception_is_int
,
288 env
->exception_next_eip
, 0);
289 /* successfully delivered */
290 env
->old_exception
= -1;
291 #elif defined(TARGET_PPC)
293 #elif defined(TARGET_MIPS)
295 #elif defined(TARGET_SPARC)
297 #elif defined(TARGET_ARM)
299 #elif defined(TARGET_SH4)
301 #elif defined(TARGET_ALPHA)
303 #elif defined(TARGET_CRIS)
305 #elif defined(TARGET_M68K)
310 env
->exception_index
= -1;
313 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
315 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
316 ret
= kqemu_cpu_exec(env
);
317 /* put eflags in CPU temporary format */
318 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
319 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
320 CC_OP
= CC_OP_EFLAGS
;
321 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
324 longjmp(env
->jmp_env
, 1);
325 } else if (ret
== 2) {
326 /* softmmu execution needed */
328 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
329 /* hardware interrupt will be executed just after */
331 /* otherwise, we restart */
332 longjmp(env
->jmp_env
, 1);
340 longjmp(env
->jmp_env
, 1);
343 next_tb
= 0; /* force lookup of first TB */
345 interrupt_request
= env
->interrupt_request
;
346 if (unlikely(interrupt_request
)) {
347 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
348 /* Mask out external interrupts for this step. */
349 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
354 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
355 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
356 env
->exception_index
= EXCP_DEBUG
;
359 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
360 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
361 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
362 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
364 env
->exception_index
= EXCP_HLT
;
368 #if defined(TARGET_I386)
369 if (env
->hflags2
& HF2_GIF_MASK
) {
370 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
371 !(env
->hflags
& HF_SMM_MASK
)) {
372 svm_check_intercept(SVM_EXIT_SMI
);
373 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
376 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
377 !(env
->hflags2
& HF2_NMI_MASK
)) {
378 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
379 env
->hflags2
|= HF2_NMI_MASK
;
380 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
382 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
383 (((env
->hflags2
& HF2_VINTR_MASK
) &&
384 (env
->hflags2
& HF2_HIF_MASK
)) ||
385 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
386 (env
->eflags
& IF_MASK
&&
387 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
389 svm_check_intercept(SVM_EXIT_INTR
);
390 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
391 intno
= cpu_get_pic_interrupt(env
);
392 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
393 do_interrupt(intno
, 0, 0, 0, 1);
394 /* ensure that no TB jump will be modified as
395 the program flow was changed */
397 #if !defined(CONFIG_USER_ONLY)
398 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
399 (env
->eflags
& IF_MASK
) &&
400 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
402 /* FIXME: this should respect TPR */
403 svm_check_intercept(SVM_EXIT_VINTR
);
404 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
405 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
406 do_interrupt(intno
, 0, 0, 0, 1);
407 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
412 #elif defined(TARGET_PPC)
414 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
418 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
419 ppc_hw_interrupt(env
);
420 if (env
->pending_interrupts
== 0)
421 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
424 #elif defined(TARGET_MIPS)
425 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
426 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
427 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
428 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
429 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
430 !(env
->hflags
& MIPS_HFLAG_DM
)) {
432 env
->exception_index
= EXCP_EXT_INTERRUPT
;
437 #elif defined(TARGET_SPARC)
438 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
440 int pil
= env
->interrupt_index
& 15;
441 int type
= env
->interrupt_index
& 0xf0;
443 if (((type
== TT_EXTINT
) &&
444 (pil
== 15 || pil
> env
->psrpil
)) ||
446 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
447 env
->exception_index
= env
->interrupt_index
;
449 env
->interrupt_index
= 0;
450 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
455 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
456 //do_interrupt(0, 0, 0, 0, 0);
457 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
459 #elif defined(TARGET_ARM)
460 if (interrupt_request
& CPU_INTERRUPT_FIQ
461 && !(env
->uncached_cpsr
& CPSR_F
)) {
462 env
->exception_index
= EXCP_FIQ
;
466 /* ARMv7-M interrupt return works by loading a magic value
467 into the PC. On real hardware the load causes the
468 return to occur. The qemu implementation performs the
469 jump normally, then does the exception return when the
470 CPU tries to execute code at the magic address.
471 This will cause the magic PC value to be pushed to
472 the stack if an interrupt occured at the wrong time.
473 We avoid this by disabling interrupts when
474 pc contains a magic address. */
475 if (interrupt_request
& CPU_INTERRUPT_HARD
476 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
477 || !(env
->uncached_cpsr
& CPSR_I
))) {
478 env
->exception_index
= EXCP_IRQ
;
482 #elif defined(TARGET_SH4)
483 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
487 #elif defined(TARGET_ALPHA)
488 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
492 #elif defined(TARGET_CRIS)
493 if (interrupt_request
& CPU_INTERRUPT_HARD
494 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
495 env
->exception_index
= EXCP_IRQ
;
499 if (interrupt_request
& CPU_INTERRUPT_NMI
500 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
501 env
->exception_index
= EXCP_NMI
;
505 #elif defined(TARGET_M68K)
506 if (interrupt_request
& CPU_INTERRUPT_HARD
507 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
508 < env
->pending_level
) {
509 /* Real hardware gets the interrupt vector via an
510 IACK cycle at this point. Current emulated
511 hardware doesn't rely on this, so we
512 provide/save the vector when the interrupt is
514 env
->exception_index
= env
->pending_vector
;
519 /* Don't use the cached interupt_request value,
520 do_interrupt may have updated the EXITTB flag. */
521 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
522 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
523 /* ensure that no TB jump will be modified as
524 the program flow was changed */
528 if (unlikely(env
->exit_request
)) {
529 env
->exit_request
= 0;
530 env
->exception_index
= EXCP_INTERRUPT
;
534 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
535 /* restore flags in standard format */
537 #if defined(TARGET_I386)
538 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
539 log_cpu_state(env
, X86_DUMP_CCOP
);
540 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
541 #elif defined(TARGET_ARM)
542 log_cpu_state(env
, 0);
543 #elif defined(TARGET_SPARC)
544 log_cpu_state(env
, 0);
545 #elif defined(TARGET_PPC)
546 log_cpu_state(env
, 0);
547 #elif defined(TARGET_M68K)
548 cpu_m68k_flush_flags(env
, env
->cc_op
);
549 env
->cc_op
= CC_OP_FLAGS
;
550 env
->sr
= (env
->sr
& 0xffe0)
551 | env
->cc_dest
| (env
->cc_x
<< 4);
552 log_cpu_state(env
, 0);
553 #elif defined(TARGET_MIPS)
554 log_cpu_state(env
, 0);
555 #elif defined(TARGET_SH4)
556 log_cpu_state(env
, 0);
557 #elif defined(TARGET_ALPHA)
558 log_cpu_state(env
, 0);
559 #elif defined(TARGET_CRIS)
560 log_cpu_state(env
, 0);
562 #error unsupported target CPU
568 /* Note: we do it here to avoid a gcc bug on Mac OS X when
569 doing it in tb_find_slow */
570 if (tb_invalidated_flag
) {
571 /* as some TB could have been invalidated because
572 of memory exceptions while generating the code, we
573 must recompute the hash index here */
575 tb_invalidated_flag
= 0;
578 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
579 (long)tb
->tc_ptr
, tb
->pc
,
580 lookup_symbol(tb
->pc
));
582 /* see if we can patch the calling TB. When the TB
583 spans two pages, we cannot safely do a direct
588 (env
->kqemu_enabled
!= 2) &&
590 tb
->page_addr
[1] == -1) {
591 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
594 spin_unlock(&tb_lock
);
595 env
->current_tb
= tb
;
597 /* cpu_interrupt might be called while translating the
598 TB, but before it is linked into a potentially
599 infinite loop and becomes env->current_tb. Avoid
600 starting execution if there is a pending interrupt. */
601 if (unlikely (env
->exit_request
))
602 env
->current_tb
= NULL
;
604 while (env
->current_tb
) {
606 /* execute the generated code */
607 #if defined(__sparc__) && !defined(HOST_SOLARIS)
609 env
= cpu_single_env
;
610 #define env cpu_single_env
612 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
613 env
->current_tb
= NULL
;
614 if ((next_tb
& 3) == 2) {
615 /* Instruction counter expired. */
617 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
619 cpu_pc_from_tb(env
, tb
);
620 insns_left
= env
->icount_decr
.u32
;
621 if (env
->icount_extra
&& insns_left
>= 0) {
622 /* Refill decrementer and continue execution. */
623 env
->icount_extra
+= insns_left
;
624 if (env
->icount_extra
> 0xffff) {
627 insns_left
= env
->icount_extra
;
629 env
->icount_extra
-= insns_left
;
630 env
->icount_decr
.u16
.low
= insns_left
;
632 if (insns_left
> 0) {
633 /* Execute remaining instructions. */
634 cpu_exec_nocache(insns_left
, tb
);
636 env
->exception_index
= EXCP_INTERRUPT
;
642 /* reset soft MMU for next block (it can currently
643 only be set by a memory fault) */
644 #if defined(USE_KQEMU)
645 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
646 if (kqemu_is_ok(env
) &&
647 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
658 #if defined(TARGET_I386)
659 /* restore flags in standard format */
660 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
661 #elif defined(TARGET_ARM)
662 /* XXX: Save/restore host fpu exception state?. */
663 #elif defined(TARGET_SPARC)
664 #elif defined(TARGET_PPC)
665 #elif defined(TARGET_M68K)
666 cpu_m68k_flush_flags(env
, env
->cc_op
);
667 env
->cc_op
= CC_OP_FLAGS
;
668 env
->sr
= (env
->sr
& 0xffe0)
669 | env
->cc_dest
| (env
->cc_x
<< 4);
670 #elif defined(TARGET_MIPS)
671 #elif defined(TARGET_SH4)
672 #elif defined(TARGET_ALPHA)
673 #elif defined(TARGET_CRIS)
676 #error unsupported target CPU
679 /* restore global registers */
680 #include "hostregs_helper.h"
682 /* fail safe : never use cpu_single_env outside cpu_exec() */
683 cpu_single_env
= NULL
;
687 /* must only be called from the generated code as an exception can be
689 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
691 /* XXX: cannot enable it yet because it yields to MMU exception
692 where NIP != read address on PowerPC */
694 target_ulong phys_addr
;
695 phys_addr
= get_phys_addr_code(env
, start
);
696 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
700 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
702 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
704 CPUX86State
*saved_env
;
708 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
710 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
711 (selector
<< 4), 0xffff, 0);
713 helper_load_seg(seg_reg
, selector
);
718 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
720 CPUX86State
*saved_env
;
725 helper_fsave(ptr
, data32
);
730 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
732 CPUX86State
*saved_env
;
737 helper_frstor(ptr
, data32
);
742 #endif /* TARGET_I386 */
744 #if !defined(CONFIG_SOFTMMU)
746 #if defined(TARGET_I386)
748 /* 'pc' is the host PC at which the exception was raised. 'address' is
749 the effective address of the memory exception. 'is_write' is 1 if a
750 write caused the exception and otherwise 0'. 'old_set' is the
751 signal set which should be restored */
752 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
753 int is_write
, sigset_t
*old_set
,
756 TranslationBlock
*tb
;
760 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
761 #if defined(DEBUG_SIGNAL)
762 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
763 pc
, address
, is_write
, *(unsigned long *)old_set
);
765 /* XXX: locking issue */
766 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
770 /* see if it is an MMU fault */
771 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
773 return 0; /* not an MMU fault */
775 return 1; /* the MMU fault was handled without causing real CPU fault */
776 /* now we have a real cpu fault */
779 /* the PC is inside the translated code. It means that we have
780 a virtual CPU fault */
781 cpu_restore_state(tb
, env
, pc
, puc
);
785 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
786 env
->eip
, env
->cr
[2], env
->error_code
);
788 /* we restore the process signal mask as the sigreturn should
789 do it (XXX: use sigsetjmp) */
790 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
791 raise_exception_err(env
->exception_index
, env
->error_code
);
793 /* activate soft MMU for this block */
794 env
->hflags
|= HF_SOFTMMU_MASK
;
795 cpu_resume_from_signal(env
, puc
);
797 /* never comes here */
801 #elif defined(TARGET_ARM)
802 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
803 int is_write
, sigset_t
*old_set
,
806 TranslationBlock
*tb
;
810 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
811 #if defined(DEBUG_SIGNAL)
812 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
813 pc
, address
, is_write
, *(unsigned long *)old_set
);
815 /* XXX: locking issue */
816 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
819 /* see if it is an MMU fault */
820 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
822 return 0; /* not an MMU fault */
824 return 1; /* the MMU fault was handled without causing real CPU fault */
825 /* now we have a real cpu fault */
828 /* the PC is inside the translated code. It means that we have
829 a virtual CPU fault */
830 cpu_restore_state(tb
, env
, pc
, puc
);
832 /* we restore the process signal mask as the sigreturn should
833 do it (XXX: use sigsetjmp) */
834 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
836 /* never comes here */
839 #elif defined(TARGET_SPARC)
840 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
841 int is_write
, sigset_t
*old_set
,
844 TranslationBlock
*tb
;
848 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
849 #if defined(DEBUG_SIGNAL)
850 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
851 pc
, address
, is_write
, *(unsigned long *)old_set
);
853 /* XXX: locking issue */
854 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
857 /* see if it is an MMU fault */
858 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
860 return 0; /* not an MMU fault */
862 return 1; /* the MMU fault was handled without causing real CPU fault */
863 /* now we have a real cpu fault */
866 /* the PC is inside the translated code. It means that we have
867 a virtual CPU fault */
868 cpu_restore_state(tb
, env
, pc
, puc
);
870 /* we restore the process signal mask as the sigreturn should
871 do it (XXX: use sigsetjmp) */
872 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
874 /* never comes here */
877 #elif defined (TARGET_PPC)
878 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
879 int is_write
, sigset_t
*old_set
,
882 TranslationBlock
*tb
;
886 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
887 #if defined(DEBUG_SIGNAL)
888 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
889 pc
, address
, is_write
, *(unsigned long *)old_set
);
891 /* XXX: locking issue */
892 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
896 /* see if it is an MMU fault */
897 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
899 return 0; /* not an MMU fault */
901 return 1; /* the MMU fault was handled without causing real CPU fault */
903 /* now we have a real cpu fault */
906 /* the PC is inside the translated code. It means that we have
907 a virtual CPU fault */
908 cpu_restore_state(tb
, env
, pc
, puc
);
912 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
913 env
->nip
, env
->error_code
, tb
);
915 /* we restore the process signal mask as the sigreturn should
916 do it (XXX: use sigsetjmp) */
917 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
920 /* activate soft MMU for this block */
921 cpu_resume_from_signal(env
, puc
);
923 /* never comes here */
927 #elif defined(TARGET_M68K)
928 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
929 int is_write
, sigset_t
*old_set
,
932 TranslationBlock
*tb
;
936 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
937 #if defined(DEBUG_SIGNAL)
938 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
939 pc
, address
, is_write
, *(unsigned long *)old_set
);
941 /* XXX: locking issue */
942 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
945 /* see if it is an MMU fault */
946 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
948 return 0; /* not an MMU fault */
950 return 1; /* the MMU fault was handled without causing real CPU fault */
951 /* now we have a real cpu fault */
954 /* the PC is inside the translated code. It means that we have
955 a virtual CPU fault */
956 cpu_restore_state(tb
, env
, pc
, puc
);
958 /* we restore the process signal mask as the sigreturn should
959 do it (XXX: use sigsetjmp) */
960 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
962 /* never comes here */
966 #elif defined (TARGET_MIPS)
967 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
968 int is_write
, sigset_t
*old_set
,
971 TranslationBlock
*tb
;
975 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
976 #if defined(DEBUG_SIGNAL)
977 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
978 pc
, address
, is_write
, *(unsigned long *)old_set
);
980 /* XXX: locking issue */
981 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
985 /* see if it is an MMU fault */
986 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
988 return 0; /* not an MMU fault */
990 return 1; /* the MMU fault was handled without causing real CPU fault */
992 /* now we have a real cpu fault */
995 /* the PC is inside the translated code. It means that we have
996 a virtual CPU fault */
997 cpu_restore_state(tb
, env
, pc
, puc
);
1001 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1002 env
->PC
, env
->error_code
, tb
);
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1009 /* activate soft MMU for this block */
1010 cpu_resume_from_signal(env
, puc
);
1012 /* never comes here */
1016 #elif defined (TARGET_SH4)
1017 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1018 int is_write
, sigset_t
*old_set
,
1021 TranslationBlock
*tb
;
1025 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1026 #if defined(DEBUG_SIGNAL)
1027 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1028 pc
, address
, is_write
, *(unsigned long *)old_set
);
1030 /* XXX: locking issue */
1031 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1035 /* see if it is an MMU fault */
1036 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1038 return 0; /* not an MMU fault */
1040 return 1; /* the MMU fault was handled without causing real CPU fault */
1042 /* now we have a real cpu fault */
1043 tb
= tb_find_pc(pc
);
1045 /* the PC is inside the translated code. It means that we have
1046 a virtual CPU fault */
1047 cpu_restore_state(tb
, env
, pc
, puc
);
1050 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1051 env
->nip
, env
->error_code
, tb
);
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1057 /* never comes here */
1061 #elif defined (TARGET_ALPHA)
1062 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1063 int is_write
, sigset_t
*old_set
,
1066 TranslationBlock
*tb
;
1070 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1071 #if defined(DEBUG_SIGNAL)
1072 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1073 pc
, address
, is_write
, *(unsigned long *)old_set
);
1075 /* XXX: locking issue */
1076 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1080 /* see if it is an MMU fault */
1081 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1083 return 0; /* not an MMU fault */
1085 return 1; /* the MMU fault was handled without causing real CPU fault */
1087 /* now we have a real cpu fault */
1088 tb
= tb_find_pc(pc
);
1090 /* the PC is inside the translated code. It means that we have
1091 a virtual CPU fault */
1092 cpu_restore_state(tb
, env
, pc
, puc
);
1095 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1096 env
->nip
, env
->error_code
, tb
);
1098 /* we restore the process signal mask as the sigreturn should
1099 do it (XXX: use sigsetjmp) */
1100 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1102 /* never comes here */
1105 #elif defined (TARGET_CRIS)
1106 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1107 int is_write
, sigset_t
*old_set
,
1110 TranslationBlock
*tb
;
1114 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1115 #if defined(DEBUG_SIGNAL)
1116 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1117 pc
, address
, is_write
, *(unsigned long *)old_set
);
1119 /* XXX: locking issue */
1120 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1124 /* see if it is an MMU fault */
1125 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1127 return 0; /* not an MMU fault */
1129 return 1; /* the MMU fault was handled without causing real CPU fault */
1131 /* now we have a real cpu fault */
1132 tb
= tb_find_pc(pc
);
1134 /* the PC is inside the translated code. It means that we have
1135 a virtual CPU fault */
1136 cpu_restore_state(tb
, env
, pc
, puc
);
1138 /* we restore the process signal mask as the sigreturn should
1139 do it (XXX: use sigsetjmp) */
1140 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1142 /* never comes here */
1147 #error unsupported target CPU
1150 #if defined(__i386__)
1152 #if defined(__APPLE__)
1153 # include <sys/ucontext.h>
1155 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1156 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1157 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1159 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1160 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1161 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1164 int cpu_signal_handler(int host_signum
, void *pinfo
,
1167 siginfo_t
*info
= pinfo
;
1168 struct ucontext
*uc
= puc
;
1176 #define REG_TRAPNO TRAPNO
1179 trapno
= TRAP_sig(uc
);
1180 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1182 (ERROR_sig(uc
) >> 1) & 1 : 0,
1183 &uc
->uc_sigmask
, puc
);
1186 #elif defined(__x86_64__)
1189 #define REG_ERR _REG_ERR
1190 #define REG_TRAPNO _REG_TRAPNO
1192 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1193 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1195 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1196 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1199 int cpu_signal_handler(int host_signum
, void *pinfo
,
1202 siginfo_t
*info
= pinfo
;
1205 ucontext_t
*uc
= puc
;
1207 struct ucontext
*uc
= puc
;
1210 pc
= QEMU_UC_MACHINE_PC(uc
);
1211 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1212 QEMU_UC_MCONTEXT_GREGS(uc
, REG_TRAPNO
) == 0xe ?
1213 (QEMU_UC_MCONTEXT_GREGS(uc
, REG_ERR
) >> 1) & 1 : 0,
1214 &uc
->uc_sigmask
, puc
);
1217 #elif defined(_ARCH_PPC)
1219 /***********************************************************************
1220 * signal context platform-specific definitions
1224 /* All Registers access - only for local access */
1225 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1226 /* Gpr Registers access */
1227 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1228 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1229 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1230 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1231 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1232 # define LR_sig(context) REG_sig(link, context) /* Link register */
1233 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1234 /* Float Registers access */
1235 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1236 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1237 /* Exception Registers access */
1238 # define DAR_sig(context) REG_sig(dar, context)
1239 # define DSISR_sig(context) REG_sig(dsisr, context)
1240 # define TRAP_sig(context) REG_sig(trap, context)
1244 # include <sys/ucontext.h>
1245 typedef struct ucontext SIGCONTEXT
;
1246 /* All Registers access - only for local access */
1247 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1248 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1249 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1250 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1251 /* Gpr Registers access */
1252 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1253 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1254 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1255 # define CTR_sig(context) REG_sig(ctr, context)
1256 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1257 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1258 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1259 /* Float Registers access */
1260 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1261 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1262 /* Exception Registers access */
1263 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1264 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1265 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1266 #endif /* __APPLE__ */
1268 int cpu_signal_handler(int host_signum
, void *pinfo
,
1271 siginfo_t
*info
= pinfo
;
1272 struct ucontext
*uc
= puc
;
1280 if (DSISR_sig(uc
) & 0x00800000)
1283 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1286 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1287 is_write
, &uc
->uc_sigmask
, puc
);
1290 #elif defined(__alpha__)
1292 int cpu_signal_handler(int host_signum
, void *pinfo
,
1295 siginfo_t
*info
= pinfo
;
1296 struct ucontext
*uc
= puc
;
1297 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1298 uint32_t insn
= *pc
;
1301 /* XXX: need kernel patch to get write flag faster */
1302 switch (insn
>> 26) {
1317 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1318 is_write
, &uc
->uc_sigmask
, puc
);
1320 #elif defined(__sparc__)
1322 int cpu_signal_handler(int host_signum
, void *pinfo
,
1325 siginfo_t
*info
= pinfo
;
1328 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1329 uint32_t *regs
= (uint32_t *)(info
+ 1);
1330 void *sigmask
= (regs
+ 20);
1331 /* XXX: is there a standard glibc define ? */
1332 unsigned long pc
= regs
[1];
1335 struct sigcontext
*sc
= puc
;
1336 unsigned long pc
= sc
->sigc_regs
.tpc
;
1337 void *sigmask
= (void *)sc
->sigc_mask
;
1338 #elif defined(__OpenBSD__)
1339 struct sigcontext
*uc
= puc
;
1340 unsigned long pc
= uc
->sc_pc
;
1341 void *sigmask
= (void *)(long)uc
->sc_mask
;
1345 /* XXX: need kernel patch to get write flag faster */
1347 insn
= *(uint32_t *)pc
;
1348 if ((insn
>> 30) == 3) {
1349 switch((insn
>> 19) & 0x3f) {
1361 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1362 is_write
, sigmask
, NULL
);
1365 #elif defined(__arm__)
1367 int cpu_signal_handler(int host_signum
, void *pinfo
,
1370 siginfo_t
*info
= pinfo
;
1371 struct ucontext
*uc
= puc
;
1375 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1376 pc
= uc
->uc_mcontext
.gregs
[R15
];
1378 pc
= uc
->uc_mcontext
.arm_pc
;
1380 /* XXX: compute is_write */
1382 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1384 &uc
->uc_sigmask
, puc
);
1387 #elif defined(__mc68000)
1389 int cpu_signal_handler(int host_signum
, void *pinfo
,
1392 siginfo_t
*info
= pinfo
;
1393 struct ucontext
*uc
= puc
;
1397 pc
= uc
->uc_mcontext
.gregs
[16];
1398 /* XXX: compute is_write */
1400 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1402 &uc
->uc_sigmask
, puc
);
1405 #elif defined(__ia64)
1408 /* This ought to be in <bits/siginfo.h>... */
1409 # define __ISR_VALID 1
1412 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1414 siginfo_t
*info
= pinfo
;
1415 struct ucontext
*uc
= puc
;
1419 ip
= uc
->uc_mcontext
.sc_ip
;
1420 switch (host_signum
) {
1426 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1427 /* ISR.W (write-access) is bit 33: */
1428 is_write
= (info
->si_isr
>> 33) & 1;
1434 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1436 &uc
->uc_sigmask
, puc
);
1439 #elif defined(__s390__)
1441 int cpu_signal_handler(int host_signum
, void *pinfo
,
1444 siginfo_t
*info
= pinfo
;
1445 struct ucontext
*uc
= puc
;
1449 pc
= uc
->uc_mcontext
.psw
.addr
;
1450 /* XXX: compute is_write */
1452 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1453 is_write
, &uc
->uc_sigmask
, puc
);
1456 #elif defined(__mips__)
1458 int cpu_signal_handler(int host_signum
, void *pinfo
,
1461 siginfo_t
*info
= pinfo
;
1462 struct ucontext
*uc
= puc
;
1463 greg_t pc
= uc
->uc_mcontext
.pc
;
1466 /* XXX: compute is_write */
1468 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1469 is_write
, &uc
->uc_sigmask
, puc
);
1472 #elif defined(__hppa__)
1474 int cpu_signal_handler(int host_signum
, void *pinfo
,
1477 struct siginfo
*info
= pinfo
;
1478 struct ucontext
*uc
= puc
;
1482 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1483 /* FIXME: compute is_write */
1485 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1487 &uc
->uc_sigmask
, puc
);
1492 #error host CPU specific signal handler needed
1496 #endif /* !defined(CONFIG_SOFTMMU) */