2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
43 #define env cpu_single_env
46 int tb_invalidated_flag
;
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
56 longjmp(env
->jmp_env
, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
82 longjmp(env
->jmp_env
, 1);
85 /* Execute the code without caching the generated code. An interpreter
86 could be used if available. */
87 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
89 unsigned long next_tb
;
92 /* Should never happen.
93 We only end up here when an existing TB is too long. */
94 if (max_cycles
> CF_COUNT_MASK
)
95 max_cycles
= CF_COUNT_MASK
;
97 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
100 /* execute the generated code */
101 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
103 if ((next_tb
& 3) == 2) {
104 /* Restore PC. This may happen if async event occurs before
105 the TB starts executing. */
106 CPU_PC_FROM_TB(env
, tb
);
108 tb_phys_invalidate(tb
, -1);
112 static TranslationBlock
*tb_find_slow(target_ulong pc
,
113 target_ulong cs_base
,
116 TranslationBlock
*tb
, **ptb1
;
118 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
120 tb_invalidated_flag
= 0;
122 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
124 /* find translated block using physical mappings */
125 phys_pc
= get_phys_addr_code(env
, pc
);
126 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
128 h
= tb_phys_hash_func(phys_pc
);
129 ptb1
= &tb_phys_hash
[h
];
135 tb
->page_addr
[0] == phys_page1
&&
136 tb
->cs_base
== cs_base
&&
137 tb
->flags
== flags
) {
138 /* check next page if needed */
139 if (tb
->page_addr
[1] != -1) {
140 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
142 phys_page2
= get_phys_addr_code(env
, virt_page2
);
143 if (tb
->page_addr
[1] == phys_page2
)
149 ptb1
= &tb
->phys_hash_next
;
152 /* if no translated code available, then translate it now */
153 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
156 /* we add the TB in the virtual pc hash table */
157 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
161 static inline TranslationBlock
*tb_find_fast(void)
163 TranslationBlock
*tb
;
164 target_ulong cs_base
, pc
;
167 /* we record a subset of the CPU state. It will
168 always be the same before a given translated block
170 #if defined(TARGET_I386)
172 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
173 cs_base
= env
->segs
[R_CS
].base
;
174 pc
= cs_base
+ env
->eip
;
175 #elif defined(TARGET_ARM)
176 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
177 | (env
->vfp
.vec_stride
<< 4);
178 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
180 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
182 flags
|= (env
->condexec_bits
<< 8);
185 #elif defined(TARGET_SPARC)
186 #ifdef TARGET_SPARC64
187 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags
= ((env
->pstate
& PS_AM
) << 2)
189 | (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
190 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
192 // FPU enable . Supervisor
193 flags
= (env
->psref
<< 4) | env
->psrs
;
197 #elif defined(TARGET_PPC)
201 #elif defined(TARGET_MIPS)
202 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
204 pc
= env
->active_tc
.PC
;
205 #elif defined(TARGET_M68K)
206 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
207 | (env
->sr
& SR_S
) /* Bit 13 */
208 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
211 #elif defined(TARGET_SH4)
215 #elif defined(TARGET_ALPHA)
219 #elif defined(TARGET_CRIS)
220 flags
= env
->pregs
[PR_CCS
] & (P_FLAG
| U_FLAG
| X_FLAG
);
225 #error unsupported CPU
227 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
228 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
229 tb
->flags
!= flags
)) {
230 tb
= tb_find_slow(pc
, cs_base
, flags
);
235 /* main execution loop */
237 int cpu_exec(CPUState
*env1
)
239 #define DECLARE_HOST_REGS 1
240 #include "hostregs_helper.h"
241 int ret
, interrupt_request
;
242 TranslationBlock
*tb
;
244 unsigned long next_tb
;
246 if (cpu_halted(env1
) == EXCP_HALTED
)
249 cpu_single_env
= env1
;
251 /* first we save global registers */
252 #define SAVE_HOST_REGS 1
253 #include "hostregs_helper.h"
257 #if defined(TARGET_I386)
258 /* put eflags in CPU temporary format */
259 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
260 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
261 CC_OP
= CC_OP_EFLAGS
;
262 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
263 #elif defined(TARGET_SPARC)
264 #elif defined(TARGET_M68K)
265 env
->cc_op
= CC_OP_FLAGS
;
266 env
->cc_dest
= env
->sr
& 0xf;
267 env
->cc_x
= (env
->sr
>> 4) & 1;
268 #elif defined(TARGET_ALPHA)
269 #elif defined(TARGET_ARM)
270 #elif defined(TARGET_PPC)
271 #elif defined(TARGET_MIPS)
272 #elif defined(TARGET_SH4)
273 #elif defined(TARGET_CRIS)
276 #error unsupported target CPU
278 env
->exception_index
= -1;
280 /* prepare setjmp context for exception handling */
282 if (setjmp(env
->jmp_env
) == 0) {
283 env
->current_tb
= NULL
;
284 /* if an exception is pending, we execute it here */
285 if (env
->exception_index
>= 0) {
286 if (env
->exception_index
>= EXCP_INTERRUPT
) {
287 /* exit request from the cpu execution loop */
288 ret
= env
->exception_index
;
290 } else if (env
->user_mode_only
) {
291 /* if user mode only, we simulate a fake exception
292 which will be handled outside the cpu execution
294 #if defined(TARGET_I386)
295 do_interrupt_user(env
->exception_index
,
296 env
->exception_is_int
,
298 env
->exception_next_eip
);
299 /* successfully delivered */
300 env
->old_exception
= -1;
302 ret
= env
->exception_index
;
305 #if defined(TARGET_I386)
306 /* simulate a real cpu exception. On i386, it can
307 trigger new exceptions, but we do not handle
308 double or triple faults yet. */
309 do_interrupt(env
->exception_index
,
310 env
->exception_is_int
,
312 env
->exception_next_eip
, 0);
313 /* successfully delivered */
314 env
->old_exception
= -1;
315 #elif defined(TARGET_PPC)
317 #elif defined(TARGET_MIPS)
319 #elif defined(TARGET_SPARC)
321 #elif defined(TARGET_ARM)
323 #elif defined(TARGET_SH4)
325 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_CRIS)
329 #elif defined(TARGET_M68K)
333 env
->exception_index
= -1;
336 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
338 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
339 ret
= kqemu_cpu_exec(env
);
340 /* put eflags in CPU temporary format */
341 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
342 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
343 CC_OP
= CC_OP_EFLAGS
;
344 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
347 longjmp(env
->jmp_env
, 1);
348 } else if (ret
== 2) {
349 /* softmmu execution needed */
351 if (env
->interrupt_request
!= 0) {
352 /* hardware interrupt will be executed just after */
354 /* otherwise, we restart */
355 longjmp(env
->jmp_env
, 1);
361 next_tb
= 0; /* force lookup of first TB */
363 interrupt_request
= env
->interrupt_request
;
364 if (unlikely(interrupt_request
) &&
365 likely(!(env
->singlestep_enabled
& SSTEP_NOIRQ
))) {
366 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
367 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
368 env
->exception_index
= EXCP_DEBUG
;
371 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
372 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
373 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
374 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
376 env
->exception_index
= EXCP_HLT
;
380 #if defined(TARGET_I386)
381 if (env
->hflags2
& HF2_GIF_MASK
) {
382 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
383 !(env
->hflags
& HF_SMM_MASK
)) {
384 svm_check_intercept(SVM_EXIT_SMI
);
385 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
388 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
389 !(env
->hflags2
& HF2_NMI_MASK
)) {
390 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
391 env
->hflags2
|= HF2_NMI_MASK
;
392 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
394 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
395 (((env
->hflags2
& HF2_VINTR_MASK
) &&
396 (env
->hflags2
& HF2_HIF_MASK
)) ||
397 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
398 (env
->eflags
& IF_MASK
&&
399 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
401 svm_check_intercept(SVM_EXIT_INTR
);
402 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
403 intno
= cpu_get_pic_interrupt(env
);
404 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
405 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
407 do_interrupt(intno
, 0, 0, 0, 1);
408 /* ensure that no TB jump will be modified as
409 the program flow was changed */
411 #if !defined(CONFIG_USER_ONLY)
412 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
413 (env
->eflags
& IF_MASK
) &&
414 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
416 /* FIXME: this should respect TPR */
417 svm_check_intercept(SVM_EXIT_VINTR
);
418 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
419 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
420 if (loglevel
& CPU_LOG_TB_IN_ASM
)
421 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
422 do_interrupt(intno
, 0, 0, 0, 1);
427 #elif defined(TARGET_PPC)
429 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
433 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
434 ppc_hw_interrupt(env
);
435 if (env
->pending_interrupts
== 0)
436 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
439 #elif defined(TARGET_MIPS)
440 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
441 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
442 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
443 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
444 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
445 !(env
->hflags
& MIPS_HFLAG_DM
)) {
447 env
->exception_index
= EXCP_EXT_INTERRUPT
;
452 #elif defined(TARGET_SPARC)
453 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
455 int pil
= env
->interrupt_index
& 15;
456 int type
= env
->interrupt_index
& 0xf0;
458 if (((type
== TT_EXTINT
) &&
459 (pil
== 15 || pil
> env
->psrpil
)) ||
461 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
462 env
->exception_index
= env
->interrupt_index
;
464 env
->interrupt_index
= 0;
465 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
470 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
471 //do_interrupt(0, 0, 0, 0, 0);
472 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
474 #elif defined(TARGET_ARM)
475 if (interrupt_request
& CPU_INTERRUPT_FIQ
476 && !(env
->uncached_cpsr
& CPSR_F
)) {
477 env
->exception_index
= EXCP_FIQ
;
481 /* ARMv7-M interrupt return works by loading a magic value
482 into the PC. On real hardware the load causes the
483 return to occur. The qemu implementation performs the
484 jump normally, then does the exception return when the
485 CPU tries to execute code at the magic address.
486 This will cause the magic PC value to be pushed to
487 the stack if an interrupt occured at the wrong time.
488 We avoid this by disabling interrupts when
489 pc contains a magic address. */
490 if (interrupt_request
& CPU_INTERRUPT_HARD
491 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
492 || !(env
->uncached_cpsr
& CPSR_I
))) {
493 env
->exception_index
= EXCP_IRQ
;
497 #elif defined(TARGET_SH4)
498 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
502 #elif defined(TARGET_ALPHA)
503 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
507 #elif defined(TARGET_CRIS)
508 if (interrupt_request
& CPU_INTERRUPT_HARD
509 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
510 env
->exception_index
= EXCP_IRQ
;
514 if (interrupt_request
& CPU_INTERRUPT_NMI
515 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
516 env
->exception_index
= EXCP_NMI
;
520 #elif defined(TARGET_M68K)
521 if (interrupt_request
& CPU_INTERRUPT_HARD
522 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
523 < env
->pending_level
) {
524 /* Real hardware gets the interrupt vector via an
525 IACK cycle at this point. Current emulated
526 hardware doesn't rely on this, so we
527 provide/save the vector when the interrupt is
529 env
->exception_index
= env
->pending_vector
;
534 /* Don't use the cached interupt_request value,
535 do_interrupt may have updated the EXITTB flag. */
536 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
537 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
538 /* ensure that no TB jump will be modified as
539 the program flow was changed */
542 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
543 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
544 env
->exception_index
= EXCP_INTERRUPT
;
549 if ((loglevel
& CPU_LOG_TB_CPU
)) {
550 /* restore flags in standard format */
552 #if defined(TARGET_I386)
553 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
554 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
555 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
556 #elif defined(TARGET_ARM)
557 cpu_dump_state(env
, logfile
, fprintf
, 0);
558 #elif defined(TARGET_SPARC)
559 cpu_dump_state(env
, logfile
, fprintf
, 0);
560 #elif defined(TARGET_PPC)
561 cpu_dump_state(env
, logfile
, fprintf
, 0);
562 #elif defined(TARGET_M68K)
563 cpu_m68k_flush_flags(env
, env
->cc_op
);
564 env
->cc_op
= CC_OP_FLAGS
;
565 env
->sr
= (env
->sr
& 0xffe0)
566 | env
->cc_dest
| (env
->cc_x
<< 4);
567 cpu_dump_state(env
, logfile
, fprintf
, 0);
568 #elif defined(TARGET_MIPS)
569 cpu_dump_state(env
, logfile
, fprintf
, 0);
570 #elif defined(TARGET_SH4)
571 cpu_dump_state(env
, logfile
, fprintf
, 0);
572 #elif defined(TARGET_ALPHA)
573 cpu_dump_state(env
, logfile
, fprintf
, 0);
574 #elif defined(TARGET_CRIS)
575 cpu_dump_state(env
, logfile
, fprintf
, 0);
577 #error unsupported target CPU
583 /* Note: we do it here to avoid a gcc bug on Mac OS X when
584 doing it in tb_find_slow */
585 if (tb_invalidated_flag
) {
586 /* as some TB could have been invalidated because
587 of memory exceptions while generating the code, we
588 must recompute the hash index here */
590 tb_invalidated_flag
= 0;
593 if ((loglevel
& CPU_LOG_EXEC
)) {
594 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
595 (long)tb
->tc_ptr
, tb
->pc
,
596 lookup_symbol(tb
->pc
));
599 /* see if we can patch the calling TB. When the TB
600 spans two pages, we cannot safely do a direct
605 (env
->kqemu_enabled
!= 2) &&
607 tb
->page_addr
[1] == -1) {
608 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
611 spin_unlock(&tb_lock
);
612 env
->current_tb
= tb
;
613 while (env
->current_tb
) {
615 /* execute the generated code */
616 #if defined(__sparc__) && !defined(HOST_SOLARIS)
618 env
= cpu_single_env
;
619 #define env cpu_single_env
621 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
622 env
->current_tb
= NULL
;
623 if ((next_tb
& 3) == 2) {
624 /* Instruction counter expired. */
626 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
628 CPU_PC_FROM_TB(env
, tb
);
629 insns_left
= env
->icount_decr
.u32
;
630 if (env
->icount_extra
&& insns_left
>= 0) {
631 /* Refill decrementer and continue execution. */
632 env
->icount_extra
+= insns_left
;
633 if (env
->icount_extra
> 0xffff) {
636 insns_left
= env
->icount_extra
;
638 env
->icount_extra
-= insns_left
;
639 env
->icount_decr
.u16
.low
= insns_left
;
641 if (insns_left
> 0) {
642 /* Execute remaining instructions. */
643 cpu_exec_nocache(insns_left
, tb
);
645 env
->exception_index
= EXCP_INTERRUPT
;
651 /* reset soft MMU for next block (it can currently
652 only be set by a memory fault) */
653 #if defined(USE_KQEMU)
654 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
655 if (kqemu_is_ok(env
) &&
656 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
667 #if defined(TARGET_I386)
668 /* restore flags in standard format */
669 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
670 #elif defined(TARGET_ARM)
671 /* XXX: Save/restore host fpu exception state?. */
672 #elif defined(TARGET_SPARC)
673 #elif defined(TARGET_PPC)
674 #elif defined(TARGET_M68K)
675 cpu_m68k_flush_flags(env
, env
->cc_op
);
676 env
->cc_op
= CC_OP_FLAGS
;
677 env
->sr
= (env
->sr
& 0xffe0)
678 | env
->cc_dest
| (env
->cc_x
<< 4);
679 #elif defined(TARGET_MIPS)
680 #elif defined(TARGET_SH4)
681 #elif defined(TARGET_ALPHA)
682 #elif defined(TARGET_CRIS)
685 #error unsupported target CPU
688 /* restore global registers */
689 #include "hostregs_helper.h"
691 /* fail safe : never use cpu_single_env outside cpu_exec() */
692 cpu_single_env
= NULL
;
696 /* must only be called from the generated code as an exception can be
698 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
700 /* XXX: cannot enable it yet because it yields to MMU exception
701 where NIP != read address on PowerPC */
703 target_ulong phys_addr
;
704 phys_addr
= get_phys_addr_code(env
, start
);
705 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
709 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
711 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
713 CPUX86State
*saved_env
;
717 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
719 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
720 (selector
<< 4), 0xffff, 0);
722 helper_load_seg(seg_reg
, selector
);
727 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
729 CPUX86State
*saved_env
;
734 helper_fsave(ptr
, data32
);
739 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
741 CPUX86State
*saved_env
;
746 helper_frstor(ptr
, data32
);
751 #endif /* TARGET_I386 */
753 #if !defined(CONFIG_SOFTMMU)
755 #if defined(TARGET_I386)
757 /* 'pc' is the host PC at which the exception was raised. 'address' is
758 the effective address of the memory exception. 'is_write' is 1 if a
759 write caused the exception and otherwise 0'. 'old_set' is the
760 signal set which should be restored */
761 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
762 int is_write
, sigset_t
*old_set
,
765 TranslationBlock
*tb
;
769 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
770 #if defined(DEBUG_SIGNAL)
771 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
772 pc
, address
, is_write
, *(unsigned long *)old_set
);
774 /* XXX: locking issue */
775 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
779 /* see if it is an MMU fault */
780 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
782 return 0; /* not an MMU fault */
784 return 1; /* the MMU fault was handled without causing real CPU fault */
785 /* now we have a real cpu fault */
788 /* the PC is inside the translated code. It means that we have
789 a virtual CPU fault */
790 cpu_restore_state(tb
, env
, pc
, puc
);
794 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
795 env
->eip
, env
->cr
[2], env
->error_code
);
797 /* we restore the process signal mask as the sigreturn should
798 do it (XXX: use sigsetjmp) */
799 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
800 raise_exception_err(env
->exception_index
, env
->error_code
);
802 /* activate soft MMU for this block */
803 env
->hflags
|= HF_SOFTMMU_MASK
;
804 cpu_resume_from_signal(env
, puc
);
806 /* never comes here */
810 #elif defined(TARGET_ARM)
811 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
812 int is_write
, sigset_t
*old_set
,
815 TranslationBlock
*tb
;
819 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
820 #if defined(DEBUG_SIGNAL)
821 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
822 pc
, address
, is_write
, *(unsigned long *)old_set
);
824 /* XXX: locking issue */
825 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
828 /* see if it is an MMU fault */
829 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
831 return 0; /* not an MMU fault */
833 return 1; /* the MMU fault was handled without causing real CPU fault */
834 /* now we have a real cpu fault */
837 /* the PC is inside the translated code. It means that we have
838 a virtual CPU fault */
839 cpu_restore_state(tb
, env
, pc
, puc
);
841 /* we restore the process signal mask as the sigreturn should
842 do it (XXX: use sigsetjmp) */
843 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
845 /* never comes here */
848 #elif defined(TARGET_SPARC)
849 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
850 int is_write
, sigset_t
*old_set
,
853 TranslationBlock
*tb
;
857 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
858 #if defined(DEBUG_SIGNAL)
859 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
860 pc
, address
, is_write
, *(unsigned long *)old_set
);
862 /* XXX: locking issue */
863 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
866 /* see if it is an MMU fault */
867 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
869 return 0; /* not an MMU fault */
871 return 1; /* the MMU fault was handled without causing real CPU fault */
872 /* now we have a real cpu fault */
875 /* the PC is inside the translated code. It means that we have
876 a virtual CPU fault */
877 cpu_restore_state(tb
, env
, pc
, puc
);
879 /* we restore the process signal mask as the sigreturn should
880 do it (XXX: use sigsetjmp) */
881 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
883 /* never comes here */
886 #elif defined (TARGET_PPC)
887 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
888 int is_write
, sigset_t
*old_set
,
891 TranslationBlock
*tb
;
895 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
896 #if defined(DEBUG_SIGNAL)
897 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
898 pc
, address
, is_write
, *(unsigned long *)old_set
);
900 /* XXX: locking issue */
901 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
905 /* see if it is an MMU fault */
906 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
908 return 0; /* not an MMU fault */
910 return 1; /* the MMU fault was handled without causing real CPU fault */
912 /* now we have a real cpu fault */
915 /* the PC is inside the translated code. It means that we have
916 a virtual CPU fault */
917 cpu_restore_state(tb
, env
, pc
, puc
);
921 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
922 env
->nip
, env
->error_code
, tb
);
924 /* we restore the process signal mask as the sigreturn should
925 do it (XXX: use sigsetjmp) */
926 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
927 do_raise_exception_err(env
->exception_index
, env
->error_code
);
929 /* activate soft MMU for this block */
930 cpu_resume_from_signal(env
, puc
);
932 /* never comes here */
936 #elif defined(TARGET_M68K)
937 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
938 int is_write
, sigset_t
*old_set
,
941 TranslationBlock
*tb
;
945 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
946 #if defined(DEBUG_SIGNAL)
947 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
948 pc
, address
, is_write
, *(unsigned long *)old_set
);
950 /* XXX: locking issue */
951 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
954 /* see if it is an MMU fault */
955 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
957 return 0; /* not an MMU fault */
959 return 1; /* the MMU fault was handled without causing real CPU fault */
960 /* now we have a real cpu fault */
963 /* the PC is inside the translated code. It means that we have
964 a virtual CPU fault */
965 cpu_restore_state(tb
, env
, pc
, puc
);
967 /* we restore the process signal mask as the sigreturn should
968 do it (XXX: use sigsetjmp) */
969 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
971 /* never comes here */
975 #elif defined (TARGET_MIPS)
976 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
977 int is_write
, sigset_t
*old_set
,
980 TranslationBlock
*tb
;
984 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
985 #if defined(DEBUG_SIGNAL)
986 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
987 pc
, address
, is_write
, *(unsigned long *)old_set
);
989 /* XXX: locking issue */
990 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
994 /* see if it is an MMU fault */
995 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
997 return 0; /* not an MMU fault */
999 return 1; /* the MMU fault was handled without causing real CPU fault */
1001 /* now we have a real cpu fault */
1002 tb
= tb_find_pc(pc
);
1004 /* the PC is inside the translated code. It means that we have
1005 a virtual CPU fault */
1006 cpu_restore_state(tb
, env
, pc
, puc
);
1010 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1011 env
->PC
, env
->error_code
, tb
);
1013 /* we restore the process signal mask as the sigreturn should
1014 do it (XXX: use sigsetjmp) */
1015 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1016 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1018 /* activate soft MMU for this block */
1019 cpu_resume_from_signal(env
, puc
);
1021 /* never comes here */
1025 #elif defined (TARGET_SH4)
1026 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1027 int is_write
, sigset_t
*old_set
,
1030 TranslationBlock
*tb
;
1034 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1035 #if defined(DEBUG_SIGNAL)
1036 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1037 pc
, address
, is_write
, *(unsigned long *)old_set
);
1039 /* XXX: locking issue */
1040 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1044 /* see if it is an MMU fault */
1045 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1047 return 0; /* not an MMU fault */
1049 return 1; /* the MMU fault was handled without causing real CPU fault */
1051 /* now we have a real cpu fault */
1052 tb
= tb_find_pc(pc
);
1054 /* the PC is inside the translated code. It means that we have
1055 a virtual CPU fault */
1056 cpu_restore_state(tb
, env
, pc
, puc
);
1059 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1060 env
->nip
, env
->error_code
, tb
);
1062 /* we restore the process signal mask as the sigreturn should
1063 do it (XXX: use sigsetjmp) */
1064 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1066 /* never comes here */
1070 #elif defined (TARGET_ALPHA)
1071 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1072 int is_write
, sigset_t
*old_set
,
1075 TranslationBlock
*tb
;
1079 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1080 #if defined(DEBUG_SIGNAL)
1081 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1082 pc
, address
, is_write
, *(unsigned long *)old_set
);
1084 /* XXX: locking issue */
1085 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1089 /* see if it is an MMU fault */
1090 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1092 return 0; /* not an MMU fault */
1094 return 1; /* the MMU fault was handled without causing real CPU fault */
1096 /* now we have a real cpu fault */
1097 tb
= tb_find_pc(pc
);
1099 /* the PC is inside the translated code. It means that we have
1100 a virtual CPU fault */
1101 cpu_restore_state(tb
, env
, pc
, puc
);
1104 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1105 env
->nip
, env
->error_code
, tb
);
1107 /* we restore the process signal mask as the sigreturn should
1108 do it (XXX: use sigsetjmp) */
1109 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1111 /* never comes here */
1114 #elif defined (TARGET_CRIS)
1115 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1116 int is_write
, sigset_t
*old_set
,
1119 TranslationBlock
*tb
;
1123 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1124 #if defined(DEBUG_SIGNAL)
1125 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1126 pc
, address
, is_write
, *(unsigned long *)old_set
);
1128 /* XXX: locking issue */
1129 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1133 /* see if it is an MMU fault */
1134 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1136 return 0; /* not an MMU fault */
1138 return 1; /* the MMU fault was handled without causing real CPU fault */
1140 /* now we have a real cpu fault */
1141 tb
= tb_find_pc(pc
);
1143 /* the PC is inside the translated code. It means that we have
1144 a virtual CPU fault */
1145 cpu_restore_state(tb
, env
, pc
, puc
);
1147 /* we restore the process signal mask as the sigreturn should
1148 do it (XXX: use sigsetjmp) */
1149 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1151 /* never comes here */
1156 #error unsupported target CPU
1159 #if defined(__i386__)
1161 #if defined(__APPLE__)
1162 # include <sys/ucontext.h>
1164 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1165 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1166 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1168 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1169 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1170 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1173 int cpu_signal_handler(int host_signum
, void *pinfo
,
1176 siginfo_t
*info
= pinfo
;
1177 struct ucontext
*uc
= puc
;
1185 #define REG_TRAPNO TRAPNO
1188 trapno
= TRAP_sig(uc
);
1189 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1191 (ERROR_sig(uc
) >> 1) & 1 : 0,
1192 &uc
->uc_sigmask
, puc
);
1195 #elif defined(__x86_64__)
1197 int cpu_signal_handler(int host_signum
, void *pinfo
,
1200 siginfo_t
*info
= pinfo
;
1201 struct ucontext
*uc
= puc
;
1204 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1205 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1206 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1207 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1208 &uc
->uc_sigmask
, puc
);
1211 #elif defined(__powerpc__)
1213 /***********************************************************************
1214 * signal context platform-specific definitions
1218 /* All Registers access - only for local access */
1219 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1220 /* Gpr Registers access */
1221 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1222 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1223 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1224 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1225 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1226 # define LR_sig(context) REG_sig(link, context) /* Link register */
1227 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1228 /* Float Registers access */
1229 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1230 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1231 /* Exception Registers access */
1232 # define DAR_sig(context) REG_sig(dar, context)
1233 # define DSISR_sig(context) REG_sig(dsisr, context)
1234 # define TRAP_sig(context) REG_sig(trap, context)
1238 # include <sys/ucontext.h>
1239 typedef struct ucontext SIGCONTEXT
;
1240 /* All Registers access - only for local access */
1241 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1242 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1243 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1244 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1245 /* Gpr Registers access */
1246 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1247 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1248 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1249 # define CTR_sig(context) REG_sig(ctr, context)
1250 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1251 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1252 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1253 /* Float Registers access */
1254 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1255 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1256 /* Exception Registers access */
1257 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1258 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1259 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1260 #endif /* __APPLE__ */
1262 int cpu_signal_handler(int host_signum
, void *pinfo
,
1265 siginfo_t
*info
= pinfo
;
1266 struct ucontext
*uc
= puc
;
1274 if (DSISR_sig(uc
) & 0x00800000)
1277 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1280 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1281 is_write
, &uc
->uc_sigmask
, puc
);
1284 #elif defined(__alpha__)
1286 int cpu_signal_handler(int host_signum
, void *pinfo
,
1289 siginfo_t
*info
= pinfo
;
1290 struct ucontext
*uc
= puc
;
1291 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1292 uint32_t insn
= *pc
;
1295 /* XXX: need kernel patch to get write flag faster */
1296 switch (insn
>> 26) {
1311 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1312 is_write
, &uc
->uc_sigmask
, puc
);
1314 #elif defined(__sparc__)
1316 int cpu_signal_handler(int host_signum
, void *pinfo
,
1319 siginfo_t
*info
= pinfo
;
1322 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1323 uint32_t *regs
= (uint32_t *)(info
+ 1);
1324 void *sigmask
= (regs
+ 20);
1325 /* XXX: is there a standard glibc define ? */
1326 unsigned long pc
= regs
[1];
1328 struct sigcontext
*sc
= puc
;
1329 unsigned long pc
= sc
->sigc_regs
.tpc
;
1330 void *sigmask
= (void *)sc
->sigc_mask
;
1333 /* XXX: need kernel patch to get write flag faster */
1335 insn
= *(uint32_t *)pc
;
1336 if ((insn
>> 30) == 3) {
1337 switch((insn
>> 19) & 0x3f) {
1349 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1350 is_write
, sigmask
, NULL
);
1353 #elif defined(__arm__)
1355 int cpu_signal_handler(int host_signum
, void *pinfo
,
1358 siginfo_t
*info
= pinfo
;
1359 struct ucontext
*uc
= puc
;
1363 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1364 pc
= uc
->uc_mcontext
.gregs
[R15
];
1366 pc
= uc
->uc_mcontext
.arm_pc
;
1368 /* XXX: compute is_write */
1370 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1372 &uc
->uc_sigmask
, puc
);
1375 #elif defined(__mc68000)
1377 int cpu_signal_handler(int host_signum
, void *pinfo
,
1380 siginfo_t
*info
= pinfo
;
1381 struct ucontext
*uc
= puc
;
1385 pc
= uc
->uc_mcontext
.gregs
[16];
1386 /* XXX: compute is_write */
1388 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1390 &uc
->uc_sigmask
, puc
);
1393 #elif defined(__ia64)
1396 /* This ought to be in <bits/siginfo.h>... */
1397 # define __ISR_VALID 1
1400 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1402 siginfo_t
*info
= pinfo
;
1403 struct ucontext
*uc
= puc
;
1407 ip
= uc
->uc_mcontext
.sc_ip
;
1408 switch (host_signum
) {
1414 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1415 /* ISR.W (write-access) is bit 33: */
1416 is_write
= (info
->si_isr
>> 33) & 1;
1422 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1424 &uc
->uc_sigmask
, puc
);
1427 #elif defined(__s390__)
1429 int cpu_signal_handler(int host_signum
, void *pinfo
,
1432 siginfo_t
*info
= pinfo
;
1433 struct ucontext
*uc
= puc
;
1437 pc
= uc
->uc_mcontext
.psw
.addr
;
1438 /* XXX: compute is_write */
1440 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1441 is_write
, &uc
->uc_sigmask
, puc
);
1444 #elif defined(__mips__)
1446 int cpu_signal_handler(int host_signum
, void *pinfo
,
1449 siginfo_t
*info
= pinfo
;
1450 struct ucontext
*uc
= puc
;
1451 greg_t pc
= uc
->uc_mcontext
.pc
;
1454 /* XXX: compute is_write */
1456 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1457 is_write
, &uc
->uc_sigmask
, puc
);
1460 #elif defined(__hppa__)
1462 int cpu_signal_handler(int host_signum
, void *pinfo
,
1465 struct siginfo
*info
= pinfo
;
1466 struct ucontext
*uc
= puc
;
1470 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1471 /* FIXME: compute is_write */
1473 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1475 &uc
->uc_sigmask
, puc
);
1480 #error host CPU specific signal handler needed
1484 #endif /* !defined(CONFIG_SOFTMMU) */