2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
40 extern int kvm_allowed
;
43 int tb_invalidated_flag
;
46 //#define DEBUG_SIGNAL
48 #define SAVE_GLOBALS()
49 #define RESTORE_GLOBALS()
51 #if defined(__sparc__) && !defined(HOST_SOLARIS)
53 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
54 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
55 // Work around ugly bugs in glibc that mangle global register contents
57 static volatile void *saved_env
;
58 static volatile unsigned long saved_t0
, saved_i7
;
60 #define SAVE_GLOBALS() do { \
63 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
66 #undef RESTORE_GLOBALS
67 #define RESTORE_GLOBALS() do { \
68 env = (void *)saved_env; \
70 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
73 static int sparc_setjmp(jmp_buf buf
)
83 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
85 static void sparc_longjmp(jmp_buf buf
, int val
)
90 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
94 void cpu_loop_exit(void)
96 /* NOTE: the register at this point must be saved by hand because
97 longjmp restore them */
99 longjmp(env
->jmp_env
, 1);
102 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
106 /* exit the current TB from a signal handler. The host registers are
107 restored in a state compatible with the CPU emulator
109 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
111 #if !defined(CONFIG_SOFTMMU)
112 struct ucontext
*uc
= puc
;
117 /* XXX: restore cpu registers saved in host registers */
119 #if !defined(CONFIG_SOFTMMU)
121 /* XXX: use siglongjmp ? */
122 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
125 longjmp(env
->jmp_env
, 1);
129 static TranslationBlock
*tb_find_slow(target_ulong pc
,
130 target_ulong cs_base
,
133 TranslationBlock
*tb
, **ptb1
;
136 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
141 tb_invalidated_flag
= 0;
143 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
145 /* find translated block using physical mappings */
146 phys_pc
= get_phys_addr_code(env
, pc
);
147 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
149 h
= tb_phys_hash_func(phys_pc
);
150 ptb1
= &tb_phys_hash
[h
];
156 tb
->page_addr
[0] == phys_page1
&&
157 tb
->cs_base
== cs_base
&&
158 tb
->flags
== flags
) {
159 /* check next page if needed */
160 if (tb
->page_addr
[1] != -1) {
161 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
163 phys_page2
= get_phys_addr_code(env
, virt_page2
);
164 if (tb
->page_addr
[1] == phys_page2
)
170 ptb1
= &tb
->phys_hash_next
;
173 /* if no translated code available, then translate it now */
176 /* flush must be done */
178 /* cannot fail at this point */
180 /* don't forget to invalidate previous TB info */
181 tb_invalidated_flag
= 1;
183 tc_ptr
= code_gen_ptr
;
185 tb
->cs_base
= cs_base
;
188 cpu_gen_code(env
, tb
, &code_gen_size
);
190 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
192 /* check next page if needed */
193 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
195 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
196 phys_page2
= get_phys_addr_code(env
, virt_page2
);
198 tb_link_phys(tb
, phys_pc
, phys_page2
);
201 /* we add the TB in the virtual pc hash table */
202 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
203 spin_unlock(&tb_lock
);
207 static inline TranslationBlock
*tb_find_fast(void)
209 TranslationBlock
*tb
;
210 target_ulong cs_base
, pc
;
213 /* we record a subset of the CPU state. It will
214 always be the same before a given translated block
216 #if defined(TARGET_I386)
218 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
219 flags
|= env
->intercept
;
220 cs_base
= env
->segs
[R_CS
].base
;
221 pc
= cs_base
+ env
->eip
;
222 #elif defined(TARGET_ARM)
223 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
224 | (env
->vfp
.vec_stride
<< 4);
225 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
227 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
229 flags
|= (env
->condexec_bits
<< 8);
232 #elif defined(TARGET_SPARC)
233 #ifdef TARGET_SPARC64
234 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
235 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
236 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
238 // FPU enable . Supervisor
239 flags
= (env
->psref
<< 4) | env
->psrs
;
243 #elif defined(TARGET_PPC)
247 #elif defined(TARGET_MIPS)
248 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
250 pc
= env
->PC
[env
->current_tc
];
251 #elif defined(TARGET_M68K)
252 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
253 | (env
->sr
& SR_S
) /* Bit 13 */
254 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
257 #elif defined(TARGET_SH4)
261 #elif defined(TARGET_ALPHA)
265 #elif defined(TARGET_CRIS)
269 #elif defined(TARGET_IA64)
271 cs_base
= 0; /* XXXXX */
274 #error unsupported CPU
276 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
277 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
278 tb
->flags
!= flags
, 0)) {
279 tb
= tb_find_slow(pc
, cs_base
, flags
);
280 /* Note: we do it here to avoid a gcc bug on Mac OS X when
281 doing it in tb_find_slow */
282 if (tb_invalidated_flag
) {
283 /* as some TB could have been invalidated because
284 of memory exceptions while generating the code, we
285 must recompute the hash index here */
292 #define BREAK_CHAIN T0 = 0
294 /* main execution loop */
296 int cpu_exec(CPUState
*env1
)
298 #define DECLARE_HOST_REGS 1
299 #include "hostregs_helper.h"
300 #if defined(TARGET_SPARC)
301 #if defined(reg_REGWPTR)
302 uint32_t *saved_regwptr
;
305 int ret
, interrupt_request
;
306 void (*gen_func
)(void);
307 TranslationBlock
*tb
;
310 if (cpu_halted(env1
) == EXCP_HALTED
)
313 cpu_single_env
= env1
;
315 /* first we save global registers */
316 #define SAVE_HOST_REGS 1
317 #include "hostregs_helper.h"
322 #if defined(TARGET_I386)
323 /* put eflags in CPU temporary format */
324 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
325 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
326 CC_OP
= CC_OP_EFLAGS
;
327 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
328 #elif defined(TARGET_SPARC)
329 #if defined(reg_REGWPTR)
330 saved_regwptr
= REGWPTR
;
332 #elif defined(TARGET_M68K)
333 env
->cc_op
= CC_OP_FLAGS
;
334 env
->cc_dest
= env
->sr
& 0xf;
335 env
->cc_x
= (env
->sr
>> 4) & 1;
336 #elif defined(TARGET_ALPHA)
337 #elif defined(TARGET_ARM)
338 #elif defined(TARGET_PPC)
339 #elif defined(TARGET_MIPS)
340 #elif defined(TARGET_SH4)
341 #elif defined(TARGET_CRIS)
342 #elif defined(TARGET_IA64)
345 #error unsupported target CPU
347 env
->exception_index
= -1;
349 /* prepare setjmp context for exception handling */
351 if (setjmp(env
->jmp_env
) == 0) {
352 env
->current_tb
= NULL
;
353 /* if an exception is pending, we execute it here */
354 if (env
->exception_index
>= 0) {
355 if (env
->exception_index
>= EXCP_INTERRUPT
) {
356 /* exit request from the cpu execution loop */
357 ret
= env
->exception_index
;
359 } else if (env
->user_mode_only
) {
360 /* if user mode only, we simulate a fake exception
361 which will be handled outside the cpu execution
363 #if defined(TARGET_I386)
364 do_interrupt_user(env
->exception_index
,
365 env
->exception_is_int
,
367 env
->exception_next_eip
);
369 ret
= env
->exception_index
;
372 #if defined(TARGET_I386)
373 /* simulate a real cpu exception. On i386, it can
374 trigger new exceptions, but we do not handle
375 double or triple faults yet. */
376 do_interrupt(env
->exception_index
,
377 env
->exception_is_int
,
379 env
->exception_next_eip
, 0);
380 /* successfully delivered */
381 env
->old_exception
= -1;
382 #elif defined(TARGET_PPC)
384 #elif defined(TARGET_MIPS)
386 #elif defined(TARGET_SPARC)
387 do_interrupt(env
->exception_index
);
388 #elif defined(TARGET_ARM)
390 #elif defined(TARGET_SH4)
392 #elif defined(TARGET_ALPHA)
394 #elif defined(TARGET_CRIS)
396 #elif defined(TARGET_M68K)
398 #elif defined(TARGET_IA64)
402 env
->exception_index
= -1;
405 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
407 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
408 ret
= kqemu_cpu_exec(env
);
409 /* put eflags in CPU temporary format */
410 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
411 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
412 CC_OP
= CC_OP_EFLAGS
;
413 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
416 longjmp(env
->jmp_env
, 1);
417 } else if (ret
== 2) {
418 /* softmmu execution needed */
420 if (env
->interrupt_request
!= 0) {
421 /* hardware interrupt will be executed just after */
423 /* otherwise, we restart */
424 longjmp(env
->jmp_env
, 1);
433 longjmp(env
->jmp_env
, 1);
436 T0
= 0; /* force lookup of first TB */
439 interrupt_request
= env
->interrupt_request
;
440 if (__builtin_expect(interrupt_request
, 0)
441 #if defined(TARGET_I386)
442 && env
->hflags
& HF_GIF_MASK
445 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
446 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
447 env
->exception_index
= EXCP_DEBUG
;
450 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
451 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
452 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
453 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
455 env
->exception_index
= EXCP_HLT
;
459 #if defined(TARGET_I386)
460 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
461 !(env
->hflags
& HF_SMM_MASK
)) {
462 svm_check_intercept(SVM_EXIT_SMI
);
463 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
466 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
467 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
468 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
470 svm_check_intercept(SVM_EXIT_INTR
);
471 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
472 intno
= cpu_get_pic_interrupt(env
);
473 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
474 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
476 do_interrupt(intno
, 0, 0, 0, 1);
477 /* ensure that no TB jump will be modified as
478 the program flow was changed */
480 #if !defined(CONFIG_USER_ONLY)
481 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
482 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
484 /* FIXME: this should respect TPR */
485 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
486 svm_check_intercept(SVM_EXIT_VINTR
);
487 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
488 if (loglevel
& CPU_LOG_TB_IN_ASM
)
489 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
490 do_interrupt(intno
, 0, 0, -1, 1);
491 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
492 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
496 #elif defined(TARGET_PPC)
498 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
502 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
503 ppc_hw_interrupt(env
);
504 if (env
->pending_interrupts
== 0)
505 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
508 #elif defined(TARGET_MIPS)
509 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
510 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
511 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
512 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
513 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
514 !(env
->hflags
& MIPS_HFLAG_DM
)) {
516 env
->exception_index
= EXCP_EXT_INTERRUPT
;
521 #elif defined(TARGET_SPARC)
522 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
524 int pil
= env
->interrupt_index
& 15;
525 int type
= env
->interrupt_index
& 0xf0;
527 if (((type
== TT_EXTINT
) &&
528 (pil
== 15 || pil
> env
->psrpil
)) ||
530 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
531 do_interrupt(env
->interrupt_index
);
532 env
->interrupt_index
= 0;
533 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
538 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
539 //do_interrupt(0, 0, 0, 0, 0);
540 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
542 #elif defined(TARGET_ARM)
543 if (interrupt_request
& CPU_INTERRUPT_FIQ
544 && !(env
->uncached_cpsr
& CPSR_F
)) {
545 env
->exception_index
= EXCP_FIQ
;
549 /* ARMv7-M interrupt return works by loading a magic value
550 into the PC. On real hardware the load causes the
551 return to occur. The qemu implementation performs the
552 jump normally, then does the exception return when the
553 CPU tries to execute code at the magic address.
554 This will cause the magic PC value to be pushed to
555 the stack if an interrupt occured at the wrong time.
556 We avoid this by disabling interrupts when
557 pc contains a magic address. */
558 if (interrupt_request
& CPU_INTERRUPT_HARD
559 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
560 || !(env
->uncached_cpsr
& CPSR_I
))) {
561 env
->exception_index
= EXCP_IRQ
;
565 #elif defined(TARGET_SH4)
566 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
570 #elif defined(TARGET_ALPHA)
571 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
575 #elif defined(TARGET_CRIS)
576 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
578 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
581 #elif defined(TARGET_M68K)
582 if (interrupt_request
& CPU_INTERRUPT_HARD
583 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
584 < env
->pending_level
) {
585 /* Real hardware gets the interrupt vector via an
586 IACK cycle at this point. Current emulated
587 hardware doesn't rely on this, so we
588 provide/save the vector when the interrupt is
590 env
->exception_index
= env
->pending_vector
;
595 /* Don't use the cached interupt_request value,
596 do_interrupt may have updated the EXITTB flag. */
597 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
598 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
599 /* ensure that no TB jump will be modified as
600 the program flow was changed */
603 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
604 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
605 env
->exception_index
= EXCP_INTERRUPT
;
610 if ((loglevel
& CPU_LOG_TB_CPU
)) {
611 /* restore flags in standard format */
613 #if defined(TARGET_I386)
614 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
615 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
616 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
617 #elif defined(TARGET_ARM)
618 cpu_dump_state(env
, logfile
, fprintf
, 0);
619 #elif defined(TARGET_SPARC)
620 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
621 env
->regwptr
= REGWPTR
;
622 cpu_dump_state(env
, logfile
, fprintf
, 0);
623 #elif defined(TARGET_PPC)
624 cpu_dump_state(env
, logfile
, fprintf
, 0);
625 #elif defined(TARGET_M68K)
626 cpu_m68k_flush_flags(env
, env
->cc_op
);
627 env
->cc_op
= CC_OP_FLAGS
;
628 env
->sr
= (env
->sr
& 0xffe0)
629 | env
->cc_dest
| (env
->cc_x
<< 4);
630 cpu_dump_state(env
, logfile
, fprintf
, 0);
631 #elif defined(TARGET_MIPS)
632 cpu_dump_state(env
, logfile
, fprintf
, 0);
633 #elif defined(TARGET_SH4)
634 cpu_dump_state(env
, logfile
, fprintf
, 0);
635 #elif defined(TARGET_ALPHA)
636 cpu_dump_state(env
, logfile
, fprintf
, 0);
637 #elif defined(TARGET_CRIS)
638 cpu_dump_state(env
, logfile
, fprintf
, 0);
640 #error unsupported target CPU
646 if ((loglevel
& CPU_LOG_EXEC
)) {
647 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
648 (long)tb
->tc_ptr
, tb
->pc
,
649 lookup_symbol(tb
->pc
));
653 /* see if we can patch the calling TB. When the TB
654 spans two pages, we cannot safely do a direct
659 (env
->kqemu_enabled
!= 2) &&
661 tb
->page_addr
[1] == -1) {
663 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
664 spin_unlock(&tb_lock
);
668 env
->current_tb
= tb
;
669 /* execute the generated code */
670 gen_func
= (void *)tc_ptr
;
671 #if defined(__sparc__)
672 __asm__
__volatile__("call %0\n\t"
676 : "i0", "i1", "i2", "i3", "i4", "i5",
677 "o0", "o1", "o2", "o3", "o4", "o5",
678 "l0", "l1", "l2", "l3", "l4", "l5",
680 #elif defined(__arm__)
681 asm volatile ("mov pc, %0\n\t"
682 ".global exec_loop\n\t"
686 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
687 #elif defined(__ia64)
694 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
695 (*(void (*)(void)) &fp
)();
699 env
->current_tb
= NULL
;
700 /* reset soft MMU for next block (it can currently
701 only be set by a memory fault) */
702 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
703 if (env
->hflags
& HF_SOFTMMU_MASK
) {
704 env
->hflags
&= ~HF_SOFTMMU_MASK
;
705 /* do not allow linking to another block */
709 #if defined(USE_KQEMU)
710 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
711 if (kqemu_is_ok(env
) &&
712 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
723 #if defined(TARGET_I386)
724 /* restore flags in standard format */
725 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
726 #elif defined(TARGET_ARM)
727 /* XXX: Save/restore host fpu exception state?. */
728 #elif defined(TARGET_SPARC)
729 #if defined(reg_REGWPTR)
730 REGWPTR
= saved_regwptr
;
732 #elif defined(TARGET_PPC)
733 #elif defined(TARGET_M68K)
734 cpu_m68k_flush_flags(env
, env
->cc_op
);
735 env
->cc_op
= CC_OP_FLAGS
;
736 env
->sr
= (env
->sr
& 0xffe0)
737 | env
->cc_dest
| (env
->cc_x
<< 4);
738 #elif defined(TARGET_MIPS)
739 #elif defined(TARGET_SH4)
740 #elif defined(TARGET_IA64)
741 #elif defined(TARGET_ALPHA)
742 #elif defined(TARGET_CRIS)
745 #error unsupported target CPU
748 /* restore global registers */
750 #include "hostregs_helper.h"
752 /* fail safe : never use cpu_single_env outside cpu_exec() */
753 cpu_single_env
= NULL
;
757 /* must only be called from the generated code as an exception can be
759 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
761 /* XXX: cannot enable it yet because it yields to MMU exception
762 where NIP != read address on PowerPC */
764 target_ulong phys_addr
;
765 phys_addr
= get_phys_addr_code(env
, start
);
766 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
770 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
772 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
774 CPUX86State
*saved_env
;
778 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
780 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
781 (selector
<< 4), 0xffff, 0);
783 load_seg(seg_reg
, selector
);
788 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
790 CPUX86State
*saved_env
;
795 helper_fsave(ptr
, data32
);
800 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
802 CPUX86State
*saved_env
;
807 helper_frstor(ptr
, data32
);
812 #endif /* TARGET_I386 */
814 #if !defined(CONFIG_SOFTMMU)
816 #if defined(TARGET_I386)
818 /* 'pc' is the host PC at which the exception was raised. 'address' is
819 the effective address of the memory exception. 'is_write' is 1 if a
820 write caused the exception and otherwise 0'. 'old_set' is the
821 signal set which should be restored */
822 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
823 int is_write
, sigset_t
*old_set
,
826 TranslationBlock
*tb
;
830 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
831 #if defined(DEBUG_SIGNAL)
832 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
833 pc
, address
, is_write
, *(unsigned long *)old_set
);
835 /* XXX: locking issue */
836 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
840 /* see if it is an MMU fault */
841 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
843 return 0; /* not an MMU fault */
845 return 1; /* the MMU fault was handled without causing real CPU fault */
846 /* now we have a real cpu fault */
849 /* the PC is inside the translated code. It means that we have
850 a virtual CPU fault */
851 cpu_restore_state(tb
, env
, pc
, puc
);
855 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
856 env
->eip
, env
->cr
[2], env
->error_code
);
858 /* we restore the process signal mask as the sigreturn should
859 do it (XXX: use sigsetjmp) */
860 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
861 raise_exception_err(env
->exception_index
, env
->error_code
);
863 /* activate soft MMU for this block */
864 env
->hflags
|= HF_SOFTMMU_MASK
;
865 cpu_resume_from_signal(env
, puc
);
867 /* never comes here */
871 #elif defined(TARGET_ARM)
872 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
873 int is_write
, sigset_t
*old_set
,
876 TranslationBlock
*tb
;
880 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
881 #if defined(DEBUG_SIGNAL)
882 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
883 pc
, address
, is_write
, *(unsigned long *)old_set
);
885 /* XXX: locking issue */
886 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
889 /* see if it is an MMU fault */
890 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
892 return 0; /* not an MMU fault */
894 return 1; /* the MMU fault was handled without causing real CPU fault */
895 /* now we have a real cpu fault */
898 /* the PC is inside the translated code. It means that we have
899 a virtual CPU fault */
900 cpu_restore_state(tb
, env
, pc
, puc
);
902 /* we restore the process signal mask as the sigreturn should
903 do it (XXX: use sigsetjmp) */
904 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
907 #elif defined(TARGET_SPARC)
908 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
909 int is_write
, sigset_t
*old_set
,
912 TranslationBlock
*tb
;
916 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
917 #if defined(DEBUG_SIGNAL)
918 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
919 pc
, address
, is_write
, *(unsigned long *)old_set
);
921 /* XXX: locking issue */
922 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
925 /* see if it is an MMU fault */
926 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
928 return 0; /* not an MMU fault */
930 return 1; /* the MMU fault was handled without causing real CPU fault */
931 /* now we have a real cpu fault */
934 /* the PC is inside the translated code. It means that we have
935 a virtual CPU fault */
936 cpu_restore_state(tb
, env
, pc
, puc
);
938 /* we restore the process signal mask as the sigreturn should
939 do it (XXX: use sigsetjmp) */
940 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
943 #elif defined (TARGET_PPC)
944 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
945 int is_write
, sigset_t
*old_set
,
948 TranslationBlock
*tb
;
952 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
953 #if defined(DEBUG_SIGNAL)
954 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
955 pc
, address
, is_write
, *(unsigned long *)old_set
);
957 /* XXX: locking issue */
958 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
962 /* see if it is an MMU fault */
963 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
965 return 0; /* not an MMU fault */
967 return 1; /* the MMU fault was handled without causing real CPU fault */
969 /* now we have a real cpu fault */
972 /* the PC is inside the translated code. It means that we have
973 a virtual CPU fault */
974 cpu_restore_state(tb
, env
, pc
, puc
);
978 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
979 env
->nip
, env
->error_code
, tb
);
981 /* we restore the process signal mask as the sigreturn should
982 do it (XXX: use sigsetjmp) */
983 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
984 do_raise_exception_err(env
->exception_index
, env
->error_code
);
986 /* activate soft MMU for this block */
987 cpu_resume_from_signal(env
, puc
);
989 /* never comes here */
993 #elif defined(TARGET_M68K)
994 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
995 int is_write
, sigset_t
*old_set
,
998 TranslationBlock
*tb
;
1002 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1003 #if defined(DEBUG_SIGNAL)
1004 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1005 pc
, address
, is_write
, *(unsigned long *)old_set
);
1007 /* XXX: locking issue */
1008 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1011 /* see if it is an MMU fault */
1012 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1014 return 0; /* not an MMU fault */
1016 return 1; /* the MMU fault was handled without causing real CPU fault */
1017 /* now we have a real cpu fault */
1018 tb
= tb_find_pc(pc
);
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb
, env
, pc
, puc
);
1024 /* we restore the process signal mask as the sigreturn should
1025 do it (XXX: use sigsetjmp) */
1026 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1028 /* never comes here */
1032 #elif defined (TARGET_MIPS)
1033 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1034 int is_write
, sigset_t
*old_set
,
1037 TranslationBlock
*tb
;
1041 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1042 #if defined(DEBUG_SIGNAL)
1043 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1044 pc
, address
, is_write
, *(unsigned long *)old_set
);
1046 /* XXX: locking issue */
1047 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1051 /* see if it is an MMU fault */
1052 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1054 return 0; /* not an MMU fault */
1056 return 1; /* the MMU fault was handled without causing real CPU fault */
1058 /* now we have a real cpu fault */
1059 tb
= tb_find_pc(pc
);
1061 /* the PC is inside the translated code. It means that we have
1062 a virtual CPU fault */
1063 cpu_restore_state(tb
, env
, pc
, puc
);
1067 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1068 env
->PC
, env
->error_code
, tb
);
1070 /* we restore the process signal mask as the sigreturn should
1071 do it (XXX: use sigsetjmp) */
1072 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1073 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1075 /* activate soft MMU for this block */
1076 cpu_resume_from_signal(env
, puc
);
1078 /* never comes here */
1082 #elif defined (TARGET_SH4)
1083 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1084 int is_write
, sigset_t
*old_set
,
1087 TranslationBlock
*tb
;
1091 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1092 #if defined(DEBUG_SIGNAL)
1093 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1094 pc
, address
, is_write
, *(unsigned long *)old_set
);
1096 /* XXX: locking issue */
1097 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1101 /* see if it is an MMU fault */
1102 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1104 return 0; /* not an MMU fault */
1106 return 1; /* the MMU fault was handled without causing real CPU fault */
1108 /* now we have a real cpu fault */
1109 tb
= tb_find_pc(pc
);
1111 /* the PC is inside the translated code. It means that we have
1112 a virtual CPU fault */
1113 cpu_restore_state(tb
, env
, pc
, puc
);
1116 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1117 env
->nip
, env
->error_code
, tb
);
1119 /* we restore the process signal mask as the sigreturn should
1120 do it (XXX: use sigsetjmp) */
1121 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1123 /* never comes here */
1127 #elif defined (TARGET_ALPHA)
1128 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1129 int is_write
, sigset_t
*old_set
,
1132 TranslationBlock
*tb
;
1136 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1137 #if defined(DEBUG_SIGNAL)
1138 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1139 pc
, address
, is_write
, *(unsigned long *)old_set
);
1141 /* XXX: locking issue */
1142 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1146 /* see if it is an MMU fault */
1147 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1149 return 0; /* not an MMU fault */
1151 return 1; /* the MMU fault was handled without causing real CPU fault */
1153 /* now we have a real cpu fault */
1154 tb
= tb_find_pc(pc
);
1156 /* the PC is inside the translated code. It means that we have
1157 a virtual CPU fault */
1158 cpu_restore_state(tb
, env
, pc
, puc
);
1161 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1162 env
->nip
, env
->error_code
, tb
);
1164 /* we restore the process signal mask as the sigreturn should
1165 do it (XXX: use sigsetjmp) */
1166 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1168 /* never comes here */
1171 #elif defined (TARGET_CRIS)
1172 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1173 int is_write
, sigset_t
*old_set
,
1176 TranslationBlock
*tb
;
1180 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1181 #if defined(DEBUG_SIGNAL)
1182 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1183 pc
, address
, is_write
, *(unsigned long *)old_set
);
1185 /* XXX: locking issue */
1186 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1190 /* see if it is an MMU fault */
1191 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1193 return 0; /* not an MMU fault */
1195 return 1; /* the MMU fault was handled without causing real CPU fault */
1197 /* now we have a real cpu fault */
1198 tb
= tb_find_pc(pc
);
1200 /* the PC is inside the translated code. It means that we have
1201 a virtual CPU fault */
1202 cpu_restore_state(tb
, env
, pc
, puc
);
1205 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1206 env
->nip
, env
->error_code
, tb
);
1208 /* we restore the process signal mask as the sigreturn should
1209 do it (XXX: use sigsetjmp) */
1210 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1212 /* never comes here */
1217 #error unsupported target CPU
1220 #if defined(__i386__)
1222 #if defined(__APPLE__)
1223 # include <sys/ucontext.h>
1225 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1226 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1227 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1229 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1230 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1231 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1234 int cpu_signal_handler(int host_signum
, void *pinfo
,
1237 siginfo_t
*info
= pinfo
;
1238 struct ucontext
*uc
= puc
;
1246 #define REG_TRAPNO TRAPNO
1249 trapno
= TRAP_sig(uc
);
1250 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1252 (ERROR_sig(uc
) >> 1) & 1 : 0,
1253 &uc
->uc_sigmask
, puc
);
1256 #elif defined(__x86_64__)
1258 int cpu_signal_handler(int host_signum
, void *pinfo
,
1261 siginfo_t
*info
= pinfo
;
1262 struct ucontext
*uc
= puc
;
1265 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1266 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1267 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1268 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1269 &uc
->uc_sigmask
, puc
);
1272 #elif defined(__powerpc__)
1274 /***********************************************************************
1275 * signal context platform-specific definitions
1279 /* All Registers access - only for local access */
1280 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1281 /* Gpr Registers access */
1282 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1283 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1284 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1285 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1286 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1287 # define LR_sig(context) REG_sig(link, context) /* Link register */
1288 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1289 /* Float Registers access */
1290 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1291 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1292 /* Exception Registers access */
1293 # define DAR_sig(context) REG_sig(dar, context)
1294 # define DSISR_sig(context) REG_sig(dsisr, context)
1295 # define TRAP_sig(context) REG_sig(trap, context)
1299 # include <sys/ucontext.h>
1300 typedef struct ucontext SIGCONTEXT
;
1301 /* All Registers access - only for local access */
1302 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1303 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1304 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1305 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1306 /* Gpr Registers access */
1307 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1308 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1309 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1310 # define CTR_sig(context) REG_sig(ctr, context)
1311 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1312 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1313 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1314 /* Float Registers access */
1315 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1316 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1317 /* Exception Registers access */
1318 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1319 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1320 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1321 #endif /* __APPLE__ */
1323 int cpu_signal_handler(int host_signum
, void *pinfo
,
1326 siginfo_t
*info
= pinfo
;
1327 struct ucontext
*uc
= puc
;
1335 if (DSISR_sig(uc
) & 0x00800000)
1338 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1341 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1342 is_write
, &uc
->uc_sigmask
, puc
);
1345 #elif defined(__alpha__)
1347 int cpu_signal_handler(int host_signum
, void *pinfo
,
1350 siginfo_t
*info
= pinfo
;
1351 struct ucontext
*uc
= puc
;
1352 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1353 uint32_t insn
= *pc
;
1356 /* XXX: need kernel patch to get write flag faster */
1357 switch (insn
>> 26) {
1372 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1373 is_write
, &uc
->uc_sigmask
, puc
);
1375 #elif defined(__sparc__)
1377 int cpu_signal_handler(int host_signum
, void *pinfo
,
1380 siginfo_t
*info
= pinfo
;
1381 uint32_t *regs
= (uint32_t *)(info
+ 1);
1382 void *sigmask
= (regs
+ 20);
1387 /* XXX: is there a standard glibc define ? */
1389 /* XXX: need kernel patch to get write flag faster */
1391 insn
= *(uint32_t *)pc
;
1392 if ((insn
>> 30) == 3) {
1393 switch((insn
>> 19) & 0x3f) {
1405 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1406 is_write
, sigmask
, NULL
);
1409 #elif defined(__arm__)
1411 int cpu_signal_handler(int host_signum
, void *pinfo
,
1414 siginfo_t
*info
= pinfo
;
1415 struct ucontext
*uc
= puc
;
1419 pc
= uc
->uc_mcontext
.gregs
[R15
];
1420 /* XXX: compute is_write */
1422 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1424 &uc
->uc_sigmask
, puc
);
1427 #elif defined(__mc68000)
1429 int cpu_signal_handler(int host_signum
, void *pinfo
,
1432 siginfo_t
*info
= pinfo
;
1433 struct ucontext
*uc
= puc
;
1437 pc
= uc
->uc_mcontext
.gregs
[16];
1438 /* XXX: compute is_write */
1440 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1442 &uc
->uc_sigmask
, puc
);
1445 #elif defined(__ia64)
1448 /* This ought to be in <bits/siginfo.h>... */
1449 # define __ISR_VALID 1
1452 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1454 siginfo_t
*info
= pinfo
;
1455 struct ucontext
*uc
= puc
;
1459 ip
= uc
->uc_mcontext
.sc_ip
;
1460 switch (host_signum
) {
1466 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1467 /* ISR.W (write-access) is bit 33: */
1468 is_write
= (info
->si_isr
>> 33) & 1;
1474 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1476 &uc
->uc_sigmask
, puc
);
1479 #elif defined(__s390__)
1481 int cpu_signal_handler(int host_signum
, void *pinfo
,
1484 siginfo_t
*info
= pinfo
;
1485 struct ucontext
*uc
= puc
;
1489 pc
= uc
->uc_mcontext
.psw
.addr
;
1490 /* XXX: compute is_write */
1492 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1493 is_write
, &uc
->uc_sigmask
, puc
);
1496 #elif defined(__mips__)
1498 int cpu_signal_handler(int host_signum
, void *pinfo
,
1501 siginfo_t
*info
= pinfo
;
1502 struct ucontext
*uc
= puc
;
1503 greg_t pc
= uc
->uc_mcontext
.pc
;
1506 /* XXX: compute is_write */
1508 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1509 is_write
, &uc
->uc_sigmask
, puc
);
1514 #error host CPU specific signal handler needed
1518 #endif /* !defined(CONFIG_SOFTMMU) */