2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag
;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env
->jmp_env
, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
70 #elif defined(__OpenBSD__)
71 struct sigcontext
*uc
= puc
;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
89 env
->exception_index
= -1;
90 longjmp(env
->jmp_env
, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
97 unsigned long next_tb
;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles
> CF_COUNT_MASK
)
103 max_cycles
= CF_COUNT_MASK
;
105 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
107 env
->current_tb
= tb
;
108 /* execute the generated code */
109 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
111 if ((next_tb
& 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env
, tb
);
116 tb_phys_invalidate(tb
, -1);
120 static TranslationBlock
*tb_find_slow(target_ulong pc
,
121 target_ulong cs_base
,
124 TranslationBlock
*tb
, **ptb1
;
126 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
128 tb_invalidated_flag
= 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc
= get_phys_addr_code(env
, pc
);
134 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
136 h
= tb_phys_hash_func(phys_pc
);
137 ptb1
= &tb_phys_hash
[h
];
143 tb
->page_addr
[0] == phys_page1
&&
144 tb
->cs_base
== cs_base
&&
145 tb
->flags
== flags
) {
146 /* check next page if needed */
147 if (tb
->page_addr
[1] != -1) {
148 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
150 phys_page2
= get_phys_addr_code(env
, virt_page2
);
151 if (tb
->page_addr
[1] == phys_page2
)
157 ptb1
= &tb
->phys_hash_next
;
160 /* if no translated code available, then translate it now */
161 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
164 /* we add the TB in the virtual pc hash table */
165 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
169 static inline TranslationBlock
*tb_find_fast(void)
171 TranslationBlock
*tb
;
172 target_ulong cs_base
, pc
;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
178 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
179 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
180 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
181 tb
->flags
!= flags
)) {
182 tb
= tb_find_slow(pc
, cs_base
, flags
);
187 static CPUDebugExcpHandler
*debug_excp_handler
;
189 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
191 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
193 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUState
*env
)
201 if (!env
->watchpoint_hit
)
202 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
203 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
205 if (debug_excp_handler
)
206 debug_excp_handler(env
);
209 /* main execution loop */
211 int cpu_exec(CPUState
*env1
)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret
, interrupt_request
;
216 TranslationBlock
*tb
;
218 unsigned long next_tb
;
220 if (cpu_halted(env1
) == EXCP_HALTED
)
223 cpu_single_env
= env1
;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
234 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
235 CC_OP
= CC_OP_EFLAGS
;
236 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env
->cc_op
= CC_OP_FLAGS
;
240 env
->cc_dest
= env
->sr
& 0xf;
241 env
->cc_x
= (env
->sr
>> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_MIPS)
246 #elif defined(TARGET_SH4)
247 #elif defined(TARGET_CRIS)
250 #error unsupported target CPU
252 env
->exception_index
= -1;
254 /* prepare setjmp context for exception handling */
256 if (setjmp(env
->jmp_env
) == 0) {
257 env
->current_tb
= NULL
;
258 /* if an exception is pending, we execute it here */
259 if (env
->exception_index
>= 0) {
260 if (env
->exception_index
>= EXCP_INTERRUPT
) {
261 /* exit request from the cpu execution loop */
262 ret
= env
->exception_index
;
263 if (ret
== EXCP_DEBUG
)
264 cpu_handle_debug_exception(env
);
267 #if defined(CONFIG_USER_ONLY)
268 /* if user mode only, we simulate a fake exception
269 which will be handled outside the cpu execution
271 #if defined(TARGET_I386)
272 do_interrupt_user(env
->exception_index
,
273 env
->exception_is_int
,
275 env
->exception_next_eip
);
276 /* successfully delivered */
277 env
->old_exception
= -1;
279 ret
= env
->exception_index
;
282 #if defined(TARGET_I386)
283 /* simulate a real cpu exception. On i386, it can
284 trigger new exceptions, but we do not handle
285 double or triple faults yet. */
286 do_interrupt(env
->exception_index
,
287 env
->exception_is_int
,
289 env
->exception_next_eip
, 0);
290 /* successfully delivered */
291 env
->old_exception
= -1;
292 #elif defined(TARGET_PPC)
294 #elif defined(TARGET_MIPS)
296 #elif defined(TARGET_SPARC)
298 #elif defined(TARGET_ARM)
300 #elif defined(TARGET_SH4)
302 #elif defined(TARGET_ALPHA)
304 #elif defined(TARGET_CRIS)
306 #elif defined(TARGET_M68K)
311 env
->exception_index
= -1;
314 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
316 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
317 ret
= kqemu_cpu_exec(env
);
318 /* put eflags in CPU temporary format */
319 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
320 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
321 CC_OP
= CC_OP_EFLAGS
;
322 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
325 longjmp(env
->jmp_env
, 1);
326 } else if (ret
== 2) {
327 /* softmmu execution needed */
329 if (env
->interrupt_request
!= 0) {
330 /* hardware interrupt will be executed just after */
332 /* otherwise, we restart */
333 longjmp(env
->jmp_env
, 1);
341 longjmp(env
->jmp_env
, 1);
344 next_tb
= 0; /* force lookup of first TB */
346 interrupt_request
= env
->interrupt_request
;
347 if (unlikely(interrupt_request
)) {
348 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
349 /* Mask out external interrupts for this step. */
350 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
355 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
356 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
357 env
->exception_index
= EXCP_DEBUG
;
360 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
361 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
362 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
363 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
365 env
->exception_index
= EXCP_HLT
;
369 #if defined(TARGET_I386)
370 if (env
->hflags2
& HF2_GIF_MASK
) {
371 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
372 !(env
->hflags
& HF_SMM_MASK
)) {
373 svm_check_intercept(SVM_EXIT_SMI
);
374 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
377 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
378 !(env
->hflags2
& HF2_NMI_MASK
)) {
379 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
380 env
->hflags2
|= HF2_NMI_MASK
;
381 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
383 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
384 (((env
->hflags2
& HF2_VINTR_MASK
) &&
385 (env
->hflags2
& HF2_HIF_MASK
)) ||
386 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
387 (env
->eflags
& IF_MASK
&&
388 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
390 svm_check_intercept(SVM_EXIT_INTR
);
391 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
392 intno
= cpu_get_pic_interrupt(env
);
393 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
394 do_interrupt(intno
, 0, 0, 0, 1);
395 /* ensure that no TB jump will be modified as
396 the program flow was changed */
398 #if !defined(CONFIG_USER_ONLY)
399 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
400 (env
->eflags
& IF_MASK
) &&
401 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
403 /* FIXME: this should respect TPR */
404 svm_check_intercept(SVM_EXIT_VINTR
);
405 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
406 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
407 do_interrupt(intno
, 0, 0, 0, 1);
408 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
413 #elif defined(TARGET_PPC)
415 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
419 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
420 ppc_hw_interrupt(env
);
421 if (env
->pending_interrupts
== 0)
422 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
425 #elif defined(TARGET_MIPS)
426 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
427 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
428 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
429 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
430 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
431 !(env
->hflags
& MIPS_HFLAG_DM
)) {
433 env
->exception_index
= EXCP_EXT_INTERRUPT
;
438 #elif defined(TARGET_SPARC)
439 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
441 int pil
= env
->interrupt_index
& 15;
442 int type
= env
->interrupt_index
& 0xf0;
444 if (((type
== TT_EXTINT
) &&
445 (pil
== 15 || pil
> env
->psrpil
)) ||
447 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
448 env
->exception_index
= env
->interrupt_index
;
450 env
->interrupt_index
= 0;
451 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
456 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
457 //do_interrupt(0, 0, 0, 0, 0);
458 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
460 #elif defined(TARGET_ARM)
461 if (interrupt_request
& CPU_INTERRUPT_FIQ
462 && !(env
->uncached_cpsr
& CPSR_F
)) {
463 env
->exception_index
= EXCP_FIQ
;
467 /* ARMv7-M interrupt return works by loading a magic value
468 into the PC. On real hardware the load causes the
469 return to occur. The qemu implementation performs the
470 jump normally, then does the exception return when the
471 CPU tries to execute code at the magic address.
472 This will cause the magic PC value to be pushed to
473 the stack if an interrupt occured at the wrong time.
474 We avoid this by disabling interrupts when
475 pc contains a magic address. */
476 if (interrupt_request
& CPU_INTERRUPT_HARD
477 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
478 || !(env
->uncached_cpsr
& CPSR_I
))) {
479 env
->exception_index
= EXCP_IRQ
;
483 #elif defined(TARGET_SH4)
484 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
488 #elif defined(TARGET_ALPHA)
489 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
493 #elif defined(TARGET_CRIS)
494 if (interrupt_request
& CPU_INTERRUPT_HARD
495 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
496 env
->exception_index
= EXCP_IRQ
;
500 if (interrupt_request
& CPU_INTERRUPT_NMI
501 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
502 env
->exception_index
= EXCP_NMI
;
506 #elif defined(TARGET_M68K)
507 if (interrupt_request
& CPU_INTERRUPT_HARD
508 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
509 < env
->pending_level
) {
510 /* Real hardware gets the interrupt vector via an
511 IACK cycle at this point. Current emulated
512 hardware doesn't rely on this, so we
513 provide/save the vector when the interrupt is
515 env
->exception_index
= env
->pending_vector
;
520 /* Don't use the cached interupt_request value,
521 do_interrupt may have updated the EXITTB flag. */
522 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
523 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
524 /* ensure that no TB jump will be modified as
525 the program flow was changed */
528 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
529 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
530 env
->exception_index
= EXCP_INTERRUPT
;
535 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
536 /* restore flags in standard format */
538 #if defined(TARGET_I386)
539 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
540 log_cpu_state(env
, X86_DUMP_CCOP
);
541 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
542 #elif defined(TARGET_ARM)
543 log_cpu_state(env
, 0);
544 #elif defined(TARGET_SPARC)
545 log_cpu_state(env
, 0);
546 #elif defined(TARGET_PPC)
547 log_cpu_state(env
, 0);
548 #elif defined(TARGET_M68K)
549 cpu_m68k_flush_flags(env
, env
->cc_op
);
550 env
->cc_op
= CC_OP_FLAGS
;
551 env
->sr
= (env
->sr
& 0xffe0)
552 | env
->cc_dest
| (env
->cc_x
<< 4);
553 log_cpu_state(env
, 0);
554 #elif defined(TARGET_MIPS)
555 log_cpu_state(env
, 0);
556 #elif defined(TARGET_SH4)
557 log_cpu_state(env
, 0);
558 #elif defined(TARGET_ALPHA)
559 log_cpu_state(env
, 0);
560 #elif defined(TARGET_CRIS)
561 log_cpu_state(env
, 0);
563 #error unsupported target CPU
569 /* Note: we do it here to avoid a gcc bug on Mac OS X when
570 doing it in tb_find_slow */
571 if (tb_invalidated_flag
) {
572 /* as some TB could have been invalidated because
573 of memory exceptions while generating the code, we
574 must recompute the hash index here */
576 tb_invalidated_flag
= 0;
579 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
580 (long)tb
->tc_ptr
, tb
->pc
,
581 lookup_symbol(tb
->pc
));
583 /* see if we can patch the calling TB. When the TB
584 spans two pages, we cannot safely do a direct
589 (env
->kqemu_enabled
!= 2) &&
591 tb
->page_addr
[1] == -1) {
592 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
595 spin_unlock(&tb_lock
);
596 env
->current_tb
= tb
;
598 /* cpu_interrupt might be called while translating the
599 TB, but before it is linked into a potentially
600 infinite loop and becomes env->current_tb. Avoid
601 starting execution if there is a pending interrupt. */
602 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
603 env
->current_tb
= NULL
;
605 while (env
->current_tb
) {
607 /* execute the generated code */
608 #if defined(__sparc__) && !defined(HOST_SOLARIS)
610 env
= cpu_single_env
;
611 #define env cpu_single_env
613 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
614 env
->current_tb
= NULL
;
615 if ((next_tb
& 3) == 2) {
616 /* Instruction counter expired. */
618 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
620 cpu_pc_from_tb(env
, tb
);
621 insns_left
= env
->icount_decr
.u32
;
622 if (env
->icount_extra
&& insns_left
>= 0) {
623 /* Refill decrementer and continue execution. */
624 env
->icount_extra
+= insns_left
;
625 if (env
->icount_extra
> 0xffff) {
628 insns_left
= env
->icount_extra
;
630 env
->icount_extra
-= insns_left
;
631 env
->icount_decr
.u16
.low
= insns_left
;
633 if (insns_left
> 0) {
634 /* Execute remaining instructions. */
635 cpu_exec_nocache(insns_left
, tb
);
637 env
->exception_index
= EXCP_INTERRUPT
;
643 /* reset soft MMU for next block (it can currently
644 only be set by a memory fault) */
645 #if defined(USE_KQEMU)
646 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
647 if (kqemu_is_ok(env
) &&
648 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
659 #if defined(TARGET_I386)
660 /* restore flags in standard format */
661 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
662 #elif defined(TARGET_ARM)
663 /* XXX: Save/restore host fpu exception state?. */
664 #elif defined(TARGET_SPARC)
665 #elif defined(TARGET_PPC)
666 #elif defined(TARGET_M68K)
667 cpu_m68k_flush_flags(env
, env
->cc_op
);
668 env
->cc_op
= CC_OP_FLAGS
;
669 env
->sr
= (env
->sr
& 0xffe0)
670 | env
->cc_dest
| (env
->cc_x
<< 4);
671 #elif defined(TARGET_MIPS)
672 #elif defined(TARGET_SH4)
673 #elif defined(TARGET_ALPHA)
674 #elif defined(TARGET_CRIS)
677 #error unsupported target CPU
680 /* restore global registers */
681 #include "hostregs_helper.h"
683 /* fail safe : never use cpu_single_env outside cpu_exec() */
684 cpu_single_env
= NULL
;
688 /* must only be called from the generated code as an exception can be
690 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
692 /* XXX: cannot enable it yet because it yields to MMU exception
693 where NIP != read address on PowerPC */
695 target_ulong phys_addr
;
696 phys_addr
= get_phys_addr_code(env
, start
);
697 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
701 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
703 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
705 CPUX86State
*saved_env
;
709 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
711 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
712 (selector
<< 4), 0xffff, 0);
714 helper_load_seg(seg_reg
, selector
);
719 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
721 CPUX86State
*saved_env
;
726 helper_fsave(ptr
, data32
);
731 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
733 CPUX86State
*saved_env
;
738 helper_frstor(ptr
, data32
);
743 #endif /* TARGET_I386 */
745 #if !defined(CONFIG_SOFTMMU)
747 #if defined(TARGET_I386)
749 /* 'pc' is the host PC at which the exception was raised. 'address' is
750 the effective address of the memory exception. 'is_write' is 1 if a
751 write caused the exception and otherwise 0'. 'old_set' is the
752 signal set which should be restored */
753 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
754 int is_write
, sigset_t
*old_set
,
757 TranslationBlock
*tb
;
761 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
762 #if defined(DEBUG_SIGNAL)
763 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
764 pc
, address
, is_write
, *(unsigned long *)old_set
);
766 /* XXX: locking issue */
767 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
771 /* see if it is an MMU fault */
772 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
774 return 0; /* not an MMU fault */
776 return 1; /* the MMU fault was handled without causing real CPU fault */
777 /* now we have a real cpu fault */
780 /* the PC is inside the translated code. It means that we have
781 a virtual CPU fault */
782 cpu_restore_state(tb
, env
, pc
, puc
);
786 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
787 env
->eip
, env
->cr
[2], env
->error_code
);
789 /* we restore the process signal mask as the sigreturn should
790 do it (XXX: use sigsetjmp) */
791 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
792 raise_exception_err(env
->exception_index
, env
->error_code
);
794 /* activate soft MMU for this block */
795 env
->hflags
|= HF_SOFTMMU_MASK
;
796 cpu_resume_from_signal(env
, puc
);
798 /* never comes here */
802 #elif defined(TARGET_ARM)
803 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
804 int is_write
, sigset_t
*old_set
,
807 TranslationBlock
*tb
;
811 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
812 #if defined(DEBUG_SIGNAL)
813 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
814 pc
, address
, is_write
, *(unsigned long *)old_set
);
816 /* XXX: locking issue */
817 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
820 /* see if it is an MMU fault */
821 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
823 return 0; /* not an MMU fault */
825 return 1; /* the MMU fault was handled without causing real CPU fault */
826 /* now we have a real cpu fault */
829 /* the PC is inside the translated code. It means that we have
830 a virtual CPU fault */
831 cpu_restore_state(tb
, env
, pc
, puc
);
833 /* we restore the process signal mask as the sigreturn should
834 do it (XXX: use sigsetjmp) */
835 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
837 /* never comes here */
840 #elif defined(TARGET_SPARC)
841 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
842 int is_write
, sigset_t
*old_set
,
845 TranslationBlock
*tb
;
849 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
850 #if defined(DEBUG_SIGNAL)
851 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
852 pc
, address
, is_write
, *(unsigned long *)old_set
);
854 /* XXX: locking issue */
855 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
858 /* see if it is an MMU fault */
859 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
861 return 0; /* not an MMU fault */
863 return 1; /* the MMU fault was handled without causing real CPU fault */
864 /* now we have a real cpu fault */
867 /* the PC is inside the translated code. It means that we have
868 a virtual CPU fault */
869 cpu_restore_state(tb
, env
, pc
, puc
);
871 /* we restore the process signal mask as the sigreturn should
872 do it (XXX: use sigsetjmp) */
873 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
875 /* never comes here */
878 #elif defined (TARGET_PPC)
879 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
880 int is_write
, sigset_t
*old_set
,
883 TranslationBlock
*tb
;
887 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
888 #if defined(DEBUG_SIGNAL)
889 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
890 pc
, address
, is_write
, *(unsigned long *)old_set
);
892 /* XXX: locking issue */
893 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
897 /* see if it is an MMU fault */
898 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
900 return 0; /* not an MMU fault */
902 return 1; /* the MMU fault was handled without causing real CPU fault */
904 /* now we have a real cpu fault */
907 /* the PC is inside the translated code. It means that we have
908 a virtual CPU fault */
909 cpu_restore_state(tb
, env
, pc
, puc
);
913 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
914 env
->nip
, env
->error_code
, tb
);
916 /* we restore the process signal mask as the sigreturn should
917 do it (XXX: use sigsetjmp) */
918 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
921 /* activate soft MMU for this block */
922 cpu_resume_from_signal(env
, puc
);
924 /* never comes here */
928 #elif defined(TARGET_M68K)
929 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
930 int is_write
, sigset_t
*old_set
,
933 TranslationBlock
*tb
;
937 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
938 #if defined(DEBUG_SIGNAL)
939 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
940 pc
, address
, is_write
, *(unsigned long *)old_set
);
942 /* XXX: locking issue */
943 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
946 /* see if it is an MMU fault */
947 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
949 return 0; /* not an MMU fault */
951 return 1; /* the MMU fault was handled without causing real CPU fault */
952 /* now we have a real cpu fault */
955 /* the PC is inside the translated code. It means that we have
956 a virtual CPU fault */
957 cpu_restore_state(tb
, env
, pc
, puc
);
959 /* we restore the process signal mask as the sigreturn should
960 do it (XXX: use sigsetjmp) */
961 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
963 /* never comes here */
967 #elif defined (TARGET_MIPS)
968 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
969 int is_write
, sigset_t
*old_set
,
972 TranslationBlock
*tb
;
976 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
977 #if defined(DEBUG_SIGNAL)
978 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
979 pc
, address
, is_write
, *(unsigned long *)old_set
);
981 /* XXX: locking issue */
982 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
986 /* see if it is an MMU fault */
987 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
989 return 0; /* not an MMU fault */
991 return 1; /* the MMU fault was handled without causing real CPU fault */
993 /* now we have a real cpu fault */
996 /* the PC is inside the translated code. It means that we have
997 a virtual CPU fault */
998 cpu_restore_state(tb
, env
, pc
, puc
);
1002 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1003 env
->PC
, env
->error_code
, tb
);
1005 /* we restore the process signal mask as the sigreturn should
1006 do it (XXX: use sigsetjmp) */
1007 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1010 /* activate soft MMU for this block */
1011 cpu_resume_from_signal(env
, puc
);
1013 /* never comes here */
1017 #elif defined (TARGET_SH4)
1018 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1019 int is_write
, sigset_t
*old_set
,
1022 TranslationBlock
*tb
;
1026 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1027 #if defined(DEBUG_SIGNAL)
1028 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1029 pc
, address
, is_write
, *(unsigned long *)old_set
);
1031 /* XXX: locking issue */
1032 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1036 /* see if it is an MMU fault */
1037 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1039 return 0; /* not an MMU fault */
1041 return 1; /* the MMU fault was handled without causing real CPU fault */
1043 /* now we have a real cpu fault */
1044 tb
= tb_find_pc(pc
);
1046 /* the PC is inside the translated code. It means that we have
1047 a virtual CPU fault */
1048 cpu_restore_state(tb
, env
, pc
, puc
);
1051 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1052 env
->nip
, env
->error_code
, tb
);
1054 /* we restore the process signal mask as the sigreturn should
1055 do it (XXX: use sigsetjmp) */
1056 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1058 /* never comes here */
1062 #elif defined (TARGET_ALPHA)
1063 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1064 int is_write
, sigset_t
*old_set
,
1067 TranslationBlock
*tb
;
1071 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1072 #if defined(DEBUG_SIGNAL)
1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1074 pc
, address
, is_write
, *(unsigned long *)old_set
);
1076 /* XXX: locking issue */
1077 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1081 /* see if it is an MMU fault */
1082 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1084 return 0; /* not an MMU fault */
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1088 /* now we have a real cpu fault */
1089 tb
= tb_find_pc(pc
);
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb
, env
, pc
, puc
);
1096 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1097 env
->nip
, env
->error_code
, tb
);
1099 /* we restore the process signal mask as the sigreturn should
1100 do it (XXX: use sigsetjmp) */
1101 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1103 /* never comes here */
1106 #elif defined (TARGET_CRIS)
1107 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1108 int is_write
, sigset_t
*old_set
,
1111 TranslationBlock
*tb
;
1115 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1116 #if defined(DEBUG_SIGNAL)
1117 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1118 pc
, address
, is_write
, *(unsigned long *)old_set
);
1120 /* XXX: locking issue */
1121 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1125 /* see if it is an MMU fault */
1126 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1128 return 0; /* not an MMU fault */
1130 return 1; /* the MMU fault was handled without causing real CPU fault */
1132 /* now we have a real cpu fault */
1133 tb
= tb_find_pc(pc
);
1135 /* the PC is inside the translated code. It means that we have
1136 a virtual CPU fault */
1137 cpu_restore_state(tb
, env
, pc
, puc
);
1139 /* we restore the process signal mask as the sigreturn should
1140 do it (XXX: use sigsetjmp) */
1141 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1143 /* never comes here */
1148 #error unsupported target CPU
1151 #if defined(__i386__)
1153 #if defined(__APPLE__)
1154 # include <sys/ucontext.h>
1156 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1157 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1158 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1160 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1161 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1162 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1165 int cpu_signal_handler(int host_signum
, void *pinfo
,
1168 siginfo_t
*info
= pinfo
;
1169 struct ucontext
*uc
= puc
;
1177 #define REG_TRAPNO TRAPNO
1180 trapno
= TRAP_sig(uc
);
1181 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1183 (ERROR_sig(uc
) >> 1) & 1 : 0,
1184 &uc
->uc_sigmask
, puc
);
1187 #elif defined(__x86_64__)
1190 #define REG_ERR _REG_ERR
1191 #define REG_TRAPNO _REG_TRAPNO
1193 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1194 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1196 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1197 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1200 int cpu_signal_handler(int host_signum
, void *pinfo
,
1203 siginfo_t
*info
= pinfo
;
1206 ucontext_t
*uc
= puc
;
1208 struct ucontext
*uc
= puc
;
1211 pc
= QEMU_UC_MACHINE_PC(uc
);
1212 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1213 QEMU_UC_MCONTEXT_GREGS(uc
, REG_TRAPNO
) == 0xe ?
1214 (QEMU_UC_MCONTEXT_GREGS(uc
, REG_ERR
) >> 1) & 1 : 0,
1215 &uc
->uc_sigmask
, puc
);
1218 #elif defined(_ARCH_PPC)
1220 /***********************************************************************
1221 * signal context platform-specific definitions
1225 /* All Registers access - only for local access */
1226 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1227 /* Gpr Registers access */
1228 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1229 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1230 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1231 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1232 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1233 # define LR_sig(context) REG_sig(link, context) /* Link register */
1234 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1235 /* Float Registers access */
1236 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1237 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1238 /* Exception Registers access */
1239 # define DAR_sig(context) REG_sig(dar, context)
1240 # define DSISR_sig(context) REG_sig(dsisr, context)
1241 # define TRAP_sig(context) REG_sig(trap, context)
1245 # include <sys/ucontext.h>
1246 typedef struct ucontext SIGCONTEXT
;
1247 /* All Registers access - only for local access */
1248 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1249 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1250 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1251 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1252 /* Gpr Registers access */
1253 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1254 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1255 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1256 # define CTR_sig(context) REG_sig(ctr, context)
1257 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1258 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1259 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1260 /* Float Registers access */
1261 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1262 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1263 /* Exception Registers access */
1264 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1265 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1266 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1267 #endif /* __APPLE__ */
1269 int cpu_signal_handler(int host_signum
, void *pinfo
,
1272 siginfo_t
*info
= pinfo
;
1273 struct ucontext
*uc
= puc
;
1281 if (DSISR_sig(uc
) & 0x00800000)
1284 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1287 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1288 is_write
, &uc
->uc_sigmask
, puc
);
1291 #elif defined(__alpha__)
1293 int cpu_signal_handler(int host_signum
, void *pinfo
,
1296 siginfo_t
*info
= pinfo
;
1297 struct ucontext
*uc
= puc
;
1298 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1299 uint32_t insn
= *pc
;
1302 /* XXX: need kernel patch to get write flag faster */
1303 switch (insn
>> 26) {
1318 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1319 is_write
, &uc
->uc_sigmask
, puc
);
1321 #elif defined(__sparc__)
1323 int cpu_signal_handler(int host_signum
, void *pinfo
,
1326 siginfo_t
*info
= pinfo
;
1329 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1330 uint32_t *regs
= (uint32_t *)(info
+ 1);
1331 void *sigmask
= (regs
+ 20);
1332 /* XXX: is there a standard glibc define ? */
1333 unsigned long pc
= regs
[1];
1336 struct sigcontext
*sc
= puc
;
1337 unsigned long pc
= sc
->sigc_regs
.tpc
;
1338 void *sigmask
= (void *)sc
->sigc_mask
;
1339 #elif defined(__OpenBSD__)
1340 struct sigcontext
*uc
= puc
;
1341 unsigned long pc
= uc
->sc_pc
;
1342 void *sigmask
= (void *)(long)uc
->sc_mask
;
1346 /* XXX: need kernel patch to get write flag faster */
1348 insn
= *(uint32_t *)pc
;
1349 if ((insn
>> 30) == 3) {
1350 switch((insn
>> 19) & 0x3f) {
1362 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1363 is_write
, sigmask
, NULL
);
1366 #elif defined(__arm__)
1368 int cpu_signal_handler(int host_signum
, void *pinfo
,
1371 siginfo_t
*info
= pinfo
;
1372 struct ucontext
*uc
= puc
;
1376 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1377 pc
= uc
->uc_mcontext
.gregs
[R15
];
1379 pc
= uc
->uc_mcontext
.arm_pc
;
1381 /* XXX: compute is_write */
1383 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1385 &uc
->uc_sigmask
, puc
);
1388 #elif defined(__mc68000)
1390 int cpu_signal_handler(int host_signum
, void *pinfo
,
1393 siginfo_t
*info
= pinfo
;
1394 struct ucontext
*uc
= puc
;
1398 pc
= uc
->uc_mcontext
.gregs
[16];
1399 /* XXX: compute is_write */
1401 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1403 &uc
->uc_sigmask
, puc
);
1406 #elif defined(__ia64)
1409 /* This ought to be in <bits/siginfo.h>... */
1410 # define __ISR_VALID 1
1413 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1415 siginfo_t
*info
= pinfo
;
1416 struct ucontext
*uc
= puc
;
1420 ip
= uc
->uc_mcontext
.sc_ip
;
1421 switch (host_signum
) {
1427 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1428 /* ISR.W (write-access) is bit 33: */
1429 is_write
= (info
->si_isr
>> 33) & 1;
1435 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1437 &uc
->uc_sigmask
, puc
);
1440 #elif defined(__s390__)
1442 int cpu_signal_handler(int host_signum
, void *pinfo
,
1445 siginfo_t
*info
= pinfo
;
1446 struct ucontext
*uc
= puc
;
1450 pc
= uc
->uc_mcontext
.psw
.addr
;
1451 /* XXX: compute is_write */
1453 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1454 is_write
, &uc
->uc_sigmask
, puc
);
1457 #elif defined(__mips__)
1459 int cpu_signal_handler(int host_signum
, void *pinfo
,
1462 siginfo_t
*info
= pinfo
;
1463 struct ucontext
*uc
= puc
;
1464 greg_t pc
= uc
->uc_mcontext
.pc
;
1467 /* XXX: compute is_write */
1469 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1470 is_write
, &uc
->uc_sigmask
, puc
);
1473 #elif defined(__hppa__)
1475 int cpu_signal_handler(int host_signum
, void *pinfo
,
1478 struct siginfo
*info
= pinfo
;
1479 struct ucontext
*uc
= puc
;
1483 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1484 /* FIXME: compute is_write */
1486 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1488 &uc
->uc_sigmask
, puc
);
1493 #error host CPU specific signal handler needed
1497 #endif /* !defined(CONFIG_SOFTMMU) */