2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
43 #define env cpu_single_env
46 int tb_invalidated_flag
;
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
56 longjmp(env
->jmp_env
, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
82 longjmp(env
->jmp_env
, 1);
85 /* Execute the code without caching the generated code. An interpreter
86 could be used if available. */
87 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
89 unsigned long next_tb
;
92 /* Should never happen.
93 We only end up here when an existing TB is too long. */
94 if (max_cycles
> CF_COUNT_MASK
)
95 max_cycles
= CF_COUNT_MASK
;
97 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
100 /* execute the generated code */
101 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
103 if ((next_tb
& 3) == 2) {
104 /* Restore PC. This may happen if async event occurs before
105 the TB starts executing. */
106 CPU_PC_FROM_TB(env
, tb
);
108 tb_phys_invalidate(tb
, -1);
112 static TranslationBlock
*tb_find_slow(target_ulong pc
,
113 target_ulong cs_base
,
116 TranslationBlock
*tb
, **ptb1
;
118 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
120 tb_invalidated_flag
= 0;
122 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
124 /* find translated block using physical mappings */
125 phys_pc
= get_phys_addr_code(env
, pc
);
126 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
128 h
= tb_phys_hash_func(phys_pc
);
129 ptb1
= &tb_phys_hash
[h
];
135 tb
->page_addr
[0] == phys_page1
&&
136 tb
->cs_base
== cs_base
&&
137 tb
->flags
== flags
) {
138 /* check next page if needed */
139 if (tb
->page_addr
[1] != -1) {
140 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
142 phys_page2
= get_phys_addr_code(env
, virt_page2
);
143 if (tb
->page_addr
[1] == phys_page2
)
149 ptb1
= &tb
->phys_hash_next
;
152 /* if no translated code available, then translate it now */
153 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
156 /* we add the TB in the virtual pc hash table */
157 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
161 static inline TranslationBlock
*tb_find_fast(void)
163 TranslationBlock
*tb
;
164 target_ulong cs_base
, pc
;
167 /* we record a subset of the CPU state. It will
168 always be the same before a given translated block
170 #if defined(TARGET_I386)
172 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
173 cs_base
= env
->segs
[R_CS
].base
;
174 pc
= cs_base
+ env
->eip
;
175 #elif defined(TARGET_ARM)
176 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
177 | (env
->vfp
.vec_stride
<< 4);
178 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
180 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
182 flags
|= (env
->condexec_bits
<< 8);
185 #elif defined(TARGET_SPARC)
186 #ifdef TARGET_SPARC64
187 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags
= ((env
->pstate
& PS_AM
) << 2)
189 | (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
190 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
192 // FPU enable . Supervisor
193 flags
= (env
->psref
<< 4) | env
->psrs
;
197 #elif defined(TARGET_PPC)
201 #elif defined(TARGET_MIPS)
202 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
204 pc
= env
->active_tc
.PC
;
205 #elif defined(TARGET_M68K)
206 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
207 | (env
->sr
& SR_S
) /* Bit 13 */
208 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
211 #elif defined(TARGET_SH4)
212 flags
= (env
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
213 | DELAY_SLOT_TRUE
| DELAY_SLOT_CLEARME
)) /* Bits 0- 3 */
214 | (env
->fpscr
& (FPSCR_FR
| FPSCR_SZ
| FPSCR_PR
)) /* Bits 19-21 */
215 | (env
->sr
& (SR_MD
| SR_RB
)); /* Bits 29-30 */
218 #elif defined(TARGET_ALPHA)
222 #elif defined(TARGET_CRIS)
223 flags
= env
->pregs
[PR_CCS
] & (P_FLAG
| U_FLAG
| X_FLAG
);
228 #error unsupported CPU
230 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
231 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
232 tb
->flags
!= flags
)) {
233 tb
= tb_find_slow(pc
, cs_base
, flags
);
238 /* main execution loop */
240 int cpu_exec(CPUState
*env1
)
242 #define DECLARE_HOST_REGS 1
243 #include "hostregs_helper.h"
244 int ret
, interrupt_request
;
245 TranslationBlock
*tb
;
247 unsigned long next_tb
;
249 if (cpu_halted(env1
) == EXCP_HALTED
)
252 cpu_single_env
= env1
;
254 /* first we save global registers */
255 #define SAVE_HOST_REGS 1
256 #include "hostregs_helper.h"
260 #if defined(TARGET_I386)
261 /* put eflags in CPU temporary format */
262 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
263 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
264 CC_OP
= CC_OP_EFLAGS
;
265 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
266 #elif defined(TARGET_SPARC)
267 #elif defined(TARGET_M68K)
268 env
->cc_op
= CC_OP_FLAGS
;
269 env
->cc_dest
= env
->sr
& 0xf;
270 env
->cc_x
= (env
->sr
>> 4) & 1;
271 #elif defined(TARGET_ALPHA)
272 #elif defined(TARGET_ARM)
273 #elif defined(TARGET_PPC)
274 #elif defined(TARGET_MIPS)
275 #elif defined(TARGET_SH4)
276 #elif defined(TARGET_CRIS)
279 #error unsupported target CPU
281 env
->exception_index
= -1;
283 /* prepare setjmp context for exception handling */
285 if (setjmp(env
->jmp_env
) == 0) {
286 env
->current_tb
= NULL
;
287 /* if an exception is pending, we execute it here */
288 if (env
->exception_index
>= 0) {
289 if (env
->exception_index
>= EXCP_INTERRUPT
) {
290 /* exit request from the cpu execution loop */
291 ret
= env
->exception_index
;
293 } else if (env
->user_mode_only
) {
294 /* if user mode only, we simulate a fake exception
295 which will be handled outside the cpu execution
297 #if defined(TARGET_I386)
298 do_interrupt_user(env
->exception_index
,
299 env
->exception_is_int
,
301 env
->exception_next_eip
);
302 /* successfully delivered */
303 env
->old_exception
= -1;
305 ret
= env
->exception_index
;
308 #if defined(TARGET_I386)
309 /* simulate a real cpu exception. On i386, it can
310 trigger new exceptions, but we do not handle
311 double or triple faults yet. */
312 do_interrupt(env
->exception_index
,
313 env
->exception_is_int
,
315 env
->exception_next_eip
, 0);
316 /* successfully delivered */
317 env
->old_exception
= -1;
318 #elif defined(TARGET_PPC)
320 #elif defined(TARGET_MIPS)
322 #elif defined(TARGET_SPARC)
324 #elif defined(TARGET_ARM)
326 #elif defined(TARGET_SH4)
328 #elif defined(TARGET_ALPHA)
330 #elif defined(TARGET_CRIS)
332 #elif defined(TARGET_M68K)
336 env
->exception_index
= -1;
339 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
341 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
342 ret
= kqemu_cpu_exec(env
);
343 /* put eflags in CPU temporary format */
344 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
345 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
346 CC_OP
= CC_OP_EFLAGS
;
347 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
350 longjmp(env
->jmp_env
, 1);
351 } else if (ret
== 2) {
352 /* softmmu execution needed */
354 if (env
->interrupt_request
!= 0) {
355 /* hardware interrupt will be executed just after */
357 /* otherwise, we restart */
358 longjmp(env
->jmp_env
, 1);
364 next_tb
= 0; /* force lookup of first TB */
366 interrupt_request
= env
->interrupt_request
;
367 if (unlikely(interrupt_request
) &&
368 likely(!(env
->singlestep_enabled
& SSTEP_NOIRQ
))) {
369 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
370 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
371 env
->exception_index
= EXCP_DEBUG
;
374 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
375 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
376 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
377 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
379 env
->exception_index
= EXCP_HLT
;
383 #if defined(TARGET_I386)
384 if (env
->hflags2
& HF2_GIF_MASK
) {
385 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
386 !(env
->hflags
& HF_SMM_MASK
)) {
387 svm_check_intercept(SVM_EXIT_SMI
);
388 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
391 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
392 !(env
->hflags2
& HF2_NMI_MASK
)) {
393 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
394 env
->hflags2
|= HF2_NMI_MASK
;
395 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
397 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
398 (((env
->hflags2
& HF2_VINTR_MASK
) &&
399 (env
->hflags2
& HF2_HIF_MASK
)) ||
400 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
401 (env
->eflags
& IF_MASK
&&
402 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
404 svm_check_intercept(SVM_EXIT_INTR
);
405 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
406 intno
= cpu_get_pic_interrupt(env
);
407 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
408 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
410 do_interrupt(intno
, 0, 0, 0, 1);
411 /* ensure that no TB jump will be modified as
412 the program flow was changed */
414 #if !defined(CONFIG_USER_ONLY)
415 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
416 (env
->eflags
& IF_MASK
) &&
417 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
419 /* FIXME: this should respect TPR */
420 svm_check_intercept(SVM_EXIT_VINTR
);
421 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
422 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
423 if (loglevel
& CPU_LOG_TB_IN_ASM
)
424 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
425 do_interrupt(intno
, 0, 0, 0, 1);
430 #elif defined(TARGET_PPC)
432 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
436 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
437 ppc_hw_interrupt(env
);
438 if (env
->pending_interrupts
== 0)
439 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
442 #elif defined(TARGET_MIPS)
443 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
444 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
445 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
446 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
447 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
448 !(env
->hflags
& MIPS_HFLAG_DM
)) {
450 env
->exception_index
= EXCP_EXT_INTERRUPT
;
455 #elif defined(TARGET_SPARC)
456 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
458 int pil
= env
->interrupt_index
& 15;
459 int type
= env
->interrupt_index
& 0xf0;
461 if (((type
== TT_EXTINT
) &&
462 (pil
== 15 || pil
> env
->psrpil
)) ||
464 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
465 env
->exception_index
= env
->interrupt_index
;
467 env
->interrupt_index
= 0;
468 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
473 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
474 //do_interrupt(0, 0, 0, 0, 0);
475 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
477 #elif defined(TARGET_ARM)
478 if (interrupt_request
& CPU_INTERRUPT_FIQ
479 && !(env
->uncached_cpsr
& CPSR_F
)) {
480 env
->exception_index
= EXCP_FIQ
;
484 /* ARMv7-M interrupt return works by loading a magic value
485 into the PC. On real hardware the load causes the
486 return to occur. The qemu implementation performs the
487 jump normally, then does the exception return when the
488 CPU tries to execute code at the magic address.
489 This will cause the magic PC value to be pushed to
490 the stack if an interrupt occured at the wrong time.
491 We avoid this by disabling interrupts when
492 pc contains a magic address. */
493 if (interrupt_request
& CPU_INTERRUPT_HARD
494 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
495 || !(env
->uncached_cpsr
& CPSR_I
))) {
496 env
->exception_index
= EXCP_IRQ
;
500 #elif defined(TARGET_SH4)
501 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
505 #elif defined(TARGET_ALPHA)
506 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
510 #elif defined(TARGET_CRIS)
511 if (interrupt_request
& CPU_INTERRUPT_HARD
512 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
513 env
->exception_index
= EXCP_IRQ
;
517 if (interrupt_request
& CPU_INTERRUPT_NMI
518 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
519 env
->exception_index
= EXCP_NMI
;
523 #elif defined(TARGET_M68K)
524 if (interrupt_request
& CPU_INTERRUPT_HARD
525 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
526 < env
->pending_level
) {
527 /* Real hardware gets the interrupt vector via an
528 IACK cycle at this point. Current emulated
529 hardware doesn't rely on this, so we
530 provide/save the vector when the interrupt is
532 env
->exception_index
= env
->pending_vector
;
537 /* Don't use the cached interupt_request value,
538 do_interrupt may have updated the EXITTB flag. */
539 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
540 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
541 /* ensure that no TB jump will be modified as
542 the program flow was changed */
545 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
546 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
547 env
->exception_index
= EXCP_INTERRUPT
;
552 if ((loglevel
& CPU_LOG_TB_CPU
)) {
553 /* restore flags in standard format */
555 #if defined(TARGET_I386)
556 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
557 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
558 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
559 #elif defined(TARGET_ARM)
560 cpu_dump_state(env
, logfile
, fprintf
, 0);
561 #elif defined(TARGET_SPARC)
562 cpu_dump_state(env
, logfile
, fprintf
, 0);
563 #elif defined(TARGET_PPC)
564 cpu_dump_state(env
, logfile
, fprintf
, 0);
565 #elif defined(TARGET_M68K)
566 cpu_m68k_flush_flags(env
, env
->cc_op
);
567 env
->cc_op
= CC_OP_FLAGS
;
568 env
->sr
= (env
->sr
& 0xffe0)
569 | env
->cc_dest
| (env
->cc_x
<< 4);
570 cpu_dump_state(env
, logfile
, fprintf
, 0);
571 #elif defined(TARGET_MIPS)
572 cpu_dump_state(env
, logfile
, fprintf
, 0);
573 #elif defined(TARGET_SH4)
574 cpu_dump_state(env
, logfile
, fprintf
, 0);
575 #elif defined(TARGET_ALPHA)
576 cpu_dump_state(env
, logfile
, fprintf
, 0);
577 #elif defined(TARGET_CRIS)
578 cpu_dump_state(env
, logfile
, fprintf
, 0);
580 #error unsupported target CPU
586 /* Note: we do it here to avoid a gcc bug on Mac OS X when
587 doing it in tb_find_slow */
588 if (tb_invalidated_flag
) {
589 /* as some TB could have been invalidated because
590 of memory exceptions while generating the code, we
591 must recompute the hash index here */
593 tb_invalidated_flag
= 0;
596 if ((loglevel
& CPU_LOG_EXEC
)) {
597 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
598 (long)tb
->tc_ptr
, tb
->pc
,
599 lookup_symbol(tb
->pc
));
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
608 (env
->kqemu_enabled
!= 2) &&
610 tb
->page_addr
[1] == -1) {
611 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
614 spin_unlock(&tb_lock
);
615 env
->current_tb
= tb
;
616 while (env
->current_tb
) {
618 /* execute the generated code */
619 #if defined(__sparc__) && !defined(HOST_SOLARIS)
621 env
= cpu_single_env
;
622 #define env cpu_single_env
624 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
625 env
->current_tb
= NULL
;
626 if ((next_tb
& 3) == 2) {
627 /* Instruction counter expired. */
629 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
631 CPU_PC_FROM_TB(env
, tb
);
632 insns_left
= env
->icount_decr
.u32
;
633 if (env
->icount_extra
&& insns_left
>= 0) {
634 /* Refill decrementer and continue execution. */
635 env
->icount_extra
+= insns_left
;
636 if (env
->icount_extra
> 0xffff) {
639 insns_left
= env
->icount_extra
;
641 env
->icount_extra
-= insns_left
;
642 env
->icount_decr
.u16
.low
= insns_left
;
644 if (insns_left
> 0) {
645 /* Execute remaining instructions. */
646 cpu_exec_nocache(insns_left
, tb
);
648 env
->exception_index
= EXCP_INTERRUPT
;
654 /* reset soft MMU for next block (it can currently
655 only be set by a memory fault) */
656 #if defined(USE_KQEMU)
657 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
658 if (kqemu_is_ok(env
) &&
659 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
670 #if defined(TARGET_I386)
671 /* restore flags in standard format */
672 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
673 #elif defined(TARGET_ARM)
674 /* XXX: Save/restore host fpu exception state?. */
675 #elif defined(TARGET_SPARC)
676 #elif defined(TARGET_PPC)
677 #elif defined(TARGET_M68K)
678 cpu_m68k_flush_flags(env
, env
->cc_op
);
679 env
->cc_op
= CC_OP_FLAGS
;
680 env
->sr
= (env
->sr
& 0xffe0)
681 | env
->cc_dest
| (env
->cc_x
<< 4);
682 #elif defined(TARGET_MIPS)
683 #elif defined(TARGET_SH4)
684 #elif defined(TARGET_ALPHA)
685 #elif defined(TARGET_CRIS)
688 #error unsupported target CPU
691 /* restore global registers */
692 #include "hostregs_helper.h"
694 /* fail safe : never use cpu_single_env outside cpu_exec() */
695 cpu_single_env
= NULL
;
699 /* must only be called from the generated code as an exception can be
701 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
703 /* XXX: cannot enable it yet because it yields to MMU exception
704 where NIP != read address on PowerPC */
706 target_ulong phys_addr
;
707 phys_addr
= get_phys_addr_code(env
, start
);
708 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
712 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
714 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
716 CPUX86State
*saved_env
;
720 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
722 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
723 (selector
<< 4), 0xffff, 0);
725 helper_load_seg(seg_reg
, selector
);
730 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
732 CPUX86State
*saved_env
;
737 helper_fsave(ptr
, data32
);
742 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
744 CPUX86State
*saved_env
;
749 helper_frstor(ptr
, data32
);
754 #endif /* TARGET_I386 */
756 #if !defined(CONFIG_SOFTMMU)
758 #if defined(TARGET_I386)
760 /* 'pc' is the host PC at which the exception was raised. 'address' is
761 the effective address of the memory exception. 'is_write' is 1 if a
762 write caused the exception and otherwise 0'. 'old_set' is the
763 signal set which should be restored */
764 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
765 int is_write
, sigset_t
*old_set
,
768 TranslationBlock
*tb
;
772 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
773 #if defined(DEBUG_SIGNAL)
774 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
775 pc
, address
, is_write
, *(unsigned long *)old_set
);
777 /* XXX: locking issue */
778 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
782 /* see if it is an MMU fault */
783 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
785 return 0; /* not an MMU fault */
787 return 1; /* the MMU fault was handled without causing real CPU fault */
788 /* now we have a real cpu fault */
791 /* the PC is inside the translated code. It means that we have
792 a virtual CPU fault */
793 cpu_restore_state(tb
, env
, pc
, puc
);
797 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
798 env
->eip
, env
->cr
[2], env
->error_code
);
800 /* we restore the process signal mask as the sigreturn should
801 do it (XXX: use sigsetjmp) */
802 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
803 raise_exception_err(env
->exception_index
, env
->error_code
);
805 /* activate soft MMU for this block */
806 env
->hflags
|= HF_SOFTMMU_MASK
;
807 cpu_resume_from_signal(env
, puc
);
809 /* never comes here */
813 #elif defined(TARGET_ARM)
814 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
815 int is_write
, sigset_t
*old_set
,
818 TranslationBlock
*tb
;
822 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
823 #if defined(DEBUG_SIGNAL)
824 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
825 pc
, address
, is_write
, *(unsigned long *)old_set
);
827 /* XXX: locking issue */
828 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
831 /* see if it is an MMU fault */
832 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
834 return 0; /* not an MMU fault */
836 return 1; /* the MMU fault was handled without causing real CPU fault */
837 /* now we have a real cpu fault */
840 /* the PC is inside the translated code. It means that we have
841 a virtual CPU fault */
842 cpu_restore_state(tb
, env
, pc
, puc
);
844 /* we restore the process signal mask as the sigreturn should
845 do it (XXX: use sigsetjmp) */
846 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
848 /* never comes here */
851 #elif defined(TARGET_SPARC)
852 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
853 int is_write
, sigset_t
*old_set
,
856 TranslationBlock
*tb
;
860 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
861 #if defined(DEBUG_SIGNAL)
862 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
863 pc
, address
, is_write
, *(unsigned long *)old_set
);
865 /* XXX: locking issue */
866 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
869 /* see if it is an MMU fault */
870 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
872 return 0; /* not an MMU fault */
874 return 1; /* the MMU fault was handled without causing real CPU fault */
875 /* now we have a real cpu fault */
878 /* the PC is inside the translated code. It means that we have
879 a virtual CPU fault */
880 cpu_restore_state(tb
, env
, pc
, puc
);
882 /* we restore the process signal mask as the sigreturn should
883 do it (XXX: use sigsetjmp) */
884 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
886 /* never comes here */
889 #elif defined (TARGET_PPC)
890 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
891 int is_write
, sigset_t
*old_set
,
894 TranslationBlock
*tb
;
898 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
899 #if defined(DEBUG_SIGNAL)
900 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
901 pc
, address
, is_write
, *(unsigned long *)old_set
);
903 /* XXX: locking issue */
904 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
908 /* see if it is an MMU fault */
909 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
911 return 0; /* not an MMU fault */
913 return 1; /* the MMU fault was handled without causing real CPU fault */
915 /* now we have a real cpu fault */
918 /* the PC is inside the translated code. It means that we have
919 a virtual CPU fault */
920 cpu_restore_state(tb
, env
, pc
, puc
);
924 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
925 env
->nip
, env
->error_code
, tb
);
927 /* we restore the process signal mask as the sigreturn should
928 do it (XXX: use sigsetjmp) */
929 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
930 do_raise_exception_err(env
->exception_index
, env
->error_code
);
932 /* activate soft MMU for this block */
933 cpu_resume_from_signal(env
, puc
);
935 /* never comes here */
939 #elif defined(TARGET_M68K)
940 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
941 int is_write
, sigset_t
*old_set
,
944 TranslationBlock
*tb
;
948 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
949 #if defined(DEBUG_SIGNAL)
950 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
951 pc
, address
, is_write
, *(unsigned long *)old_set
);
953 /* XXX: locking issue */
954 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
957 /* see if it is an MMU fault */
958 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
960 return 0; /* not an MMU fault */
962 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb
, env
, pc
, puc
);
970 /* we restore the process signal mask as the sigreturn should
971 do it (XXX: use sigsetjmp) */
972 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
974 /* never comes here */
978 #elif defined (TARGET_MIPS)
979 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
980 int is_write
, sigset_t
*old_set
,
983 TranslationBlock
*tb
;
987 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
988 #if defined(DEBUG_SIGNAL)
989 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
990 pc
, address
, is_write
, *(unsigned long *)old_set
);
992 /* XXX: locking issue */
993 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
997 /* see if it is an MMU fault */
998 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1000 return 0; /* not an MMU fault */
1002 return 1; /* the MMU fault was handled without causing real CPU fault */
1004 /* now we have a real cpu fault */
1005 tb
= tb_find_pc(pc
);
1007 /* the PC is inside the translated code. It means that we have
1008 a virtual CPU fault */
1009 cpu_restore_state(tb
, env
, pc
, puc
);
1013 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1014 env
->PC
, env
->error_code
, tb
);
1016 /* we restore the process signal mask as the sigreturn should
1017 do it (XXX: use sigsetjmp) */
1018 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1019 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1021 /* activate soft MMU for this block */
1022 cpu_resume_from_signal(env
, puc
);
1024 /* never comes here */
1028 #elif defined (TARGET_SH4)
1029 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1030 int is_write
, sigset_t
*old_set
,
1033 TranslationBlock
*tb
;
1037 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1038 #if defined(DEBUG_SIGNAL)
1039 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1040 pc
, address
, is_write
, *(unsigned long *)old_set
);
1042 /* XXX: locking issue */
1043 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1047 /* see if it is an MMU fault */
1048 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1050 return 0; /* not an MMU fault */
1052 return 1; /* the MMU fault was handled without causing real CPU fault */
1054 /* now we have a real cpu fault */
1055 tb
= tb_find_pc(pc
);
1057 /* the PC is inside the translated code. It means that we have
1058 a virtual CPU fault */
1059 cpu_restore_state(tb
, env
, pc
, puc
);
1062 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1063 env
->nip
, env
->error_code
, tb
);
1065 /* we restore the process signal mask as the sigreturn should
1066 do it (XXX: use sigsetjmp) */
1067 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1069 /* never comes here */
1073 #elif defined (TARGET_ALPHA)
1074 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1075 int is_write
, sigset_t
*old_set
,
1078 TranslationBlock
*tb
;
1082 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1083 #if defined(DEBUG_SIGNAL)
1084 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1085 pc
, address
, is_write
, *(unsigned long *)old_set
);
1087 /* XXX: locking issue */
1088 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1092 /* see if it is an MMU fault */
1093 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1095 return 0; /* not an MMU fault */
1097 return 1; /* the MMU fault was handled without causing real CPU fault */
1099 /* now we have a real cpu fault */
1100 tb
= tb_find_pc(pc
);
1102 /* the PC is inside the translated code. It means that we have
1103 a virtual CPU fault */
1104 cpu_restore_state(tb
, env
, pc
, puc
);
1107 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1108 env
->nip
, env
->error_code
, tb
);
1110 /* we restore the process signal mask as the sigreturn should
1111 do it (XXX: use sigsetjmp) */
1112 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1114 /* never comes here */
1117 #elif defined (TARGET_CRIS)
1118 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1119 int is_write
, sigset_t
*old_set
,
1122 TranslationBlock
*tb
;
1126 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1127 #if defined(DEBUG_SIGNAL)
1128 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1129 pc
, address
, is_write
, *(unsigned long *)old_set
);
1131 /* XXX: locking issue */
1132 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1136 /* see if it is an MMU fault */
1137 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1139 return 0; /* not an MMU fault */
1141 return 1; /* the MMU fault was handled without causing real CPU fault */
1143 /* now we have a real cpu fault */
1144 tb
= tb_find_pc(pc
);
1146 /* the PC is inside the translated code. It means that we have
1147 a virtual CPU fault */
1148 cpu_restore_state(tb
, env
, pc
, puc
);
1150 /* we restore the process signal mask as the sigreturn should
1151 do it (XXX: use sigsetjmp) */
1152 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1154 /* never comes here */
1159 #error unsupported target CPU
1162 #if defined(__i386__)
1164 #if defined(__APPLE__)
1165 # include <sys/ucontext.h>
1167 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1168 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1169 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1171 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1172 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1173 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1176 int cpu_signal_handler(int host_signum
, void *pinfo
,
1179 siginfo_t
*info
= pinfo
;
1180 struct ucontext
*uc
= puc
;
1188 #define REG_TRAPNO TRAPNO
1191 trapno
= TRAP_sig(uc
);
1192 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1194 (ERROR_sig(uc
) >> 1) & 1 : 0,
1195 &uc
->uc_sigmask
, puc
);
1198 #elif defined(__x86_64__)
1200 int cpu_signal_handler(int host_signum
, void *pinfo
,
1203 siginfo_t
*info
= pinfo
;
1204 struct ucontext
*uc
= puc
;
1207 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1208 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1209 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1210 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1211 &uc
->uc_sigmask
, puc
);
1214 #elif defined(__powerpc__)
1216 /***********************************************************************
1217 * signal context platform-specific definitions
1221 /* All Registers access - only for local access */
1222 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1223 /* Gpr Registers access */
1224 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1225 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1226 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1227 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1228 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1229 # define LR_sig(context) REG_sig(link, context) /* Link register */
1230 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1231 /* Float Registers access */
1232 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1233 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1234 /* Exception Registers access */
1235 # define DAR_sig(context) REG_sig(dar, context)
1236 # define DSISR_sig(context) REG_sig(dsisr, context)
1237 # define TRAP_sig(context) REG_sig(trap, context)
1241 # include <sys/ucontext.h>
1242 typedef struct ucontext SIGCONTEXT
;
1243 /* All Registers access - only for local access */
1244 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1245 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1246 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1247 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1248 /* Gpr Registers access */
1249 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1250 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1251 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1252 # define CTR_sig(context) REG_sig(ctr, context)
1253 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1254 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1255 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1256 /* Float Registers access */
1257 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1258 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1259 /* Exception Registers access */
1260 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1261 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1262 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1263 #endif /* __APPLE__ */
1265 int cpu_signal_handler(int host_signum
, void *pinfo
,
1268 siginfo_t
*info
= pinfo
;
1269 struct ucontext
*uc
= puc
;
1277 if (DSISR_sig(uc
) & 0x00800000)
1280 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1283 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1284 is_write
, &uc
->uc_sigmask
, puc
);
1287 #elif defined(__alpha__)
1289 int cpu_signal_handler(int host_signum
, void *pinfo
,
1292 siginfo_t
*info
= pinfo
;
1293 struct ucontext
*uc
= puc
;
1294 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1295 uint32_t insn
= *pc
;
1298 /* XXX: need kernel patch to get write flag faster */
1299 switch (insn
>> 26) {
1314 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1315 is_write
, &uc
->uc_sigmask
, puc
);
1317 #elif defined(__sparc__)
1319 int cpu_signal_handler(int host_signum
, void *pinfo
,
1322 siginfo_t
*info
= pinfo
;
1325 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1326 uint32_t *regs
= (uint32_t *)(info
+ 1);
1327 void *sigmask
= (regs
+ 20);
1328 /* XXX: is there a standard glibc define ? */
1329 unsigned long pc
= regs
[1];
1331 struct sigcontext
*sc
= puc
;
1332 unsigned long pc
= sc
->sigc_regs
.tpc
;
1333 void *sigmask
= (void *)sc
->sigc_mask
;
1336 /* XXX: need kernel patch to get write flag faster */
1338 insn
= *(uint32_t *)pc
;
1339 if ((insn
>> 30) == 3) {
1340 switch((insn
>> 19) & 0x3f) {
1352 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1353 is_write
, sigmask
, NULL
);
1356 #elif defined(__arm__)
1358 int cpu_signal_handler(int host_signum
, void *pinfo
,
1361 siginfo_t
*info
= pinfo
;
1362 struct ucontext
*uc
= puc
;
1366 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1367 pc
= uc
->uc_mcontext
.gregs
[R15
];
1369 pc
= uc
->uc_mcontext
.arm_pc
;
1371 /* XXX: compute is_write */
1373 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1375 &uc
->uc_sigmask
, puc
);
1378 #elif defined(__mc68000)
1380 int cpu_signal_handler(int host_signum
, void *pinfo
,
1383 siginfo_t
*info
= pinfo
;
1384 struct ucontext
*uc
= puc
;
1388 pc
= uc
->uc_mcontext
.gregs
[16];
1389 /* XXX: compute is_write */
1391 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1393 &uc
->uc_sigmask
, puc
);
1396 #elif defined(__ia64)
1399 /* This ought to be in <bits/siginfo.h>... */
1400 # define __ISR_VALID 1
1403 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1405 siginfo_t
*info
= pinfo
;
1406 struct ucontext
*uc
= puc
;
1410 ip
= uc
->uc_mcontext
.sc_ip
;
1411 switch (host_signum
) {
1417 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1418 /* ISR.W (write-access) is bit 33: */
1419 is_write
= (info
->si_isr
>> 33) & 1;
1425 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1427 &uc
->uc_sigmask
, puc
);
1430 #elif defined(__s390__)
1432 int cpu_signal_handler(int host_signum
, void *pinfo
,
1435 siginfo_t
*info
= pinfo
;
1436 struct ucontext
*uc
= puc
;
1440 pc
= uc
->uc_mcontext
.psw
.addr
;
1441 /* XXX: compute is_write */
1443 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1444 is_write
, &uc
->uc_sigmask
, puc
);
1447 #elif defined(__mips__)
1449 int cpu_signal_handler(int host_signum
, void *pinfo
,
1452 siginfo_t
*info
= pinfo
;
1453 struct ucontext
*uc
= puc
;
1454 greg_t pc
= uc
->uc_mcontext
.pc
;
1457 /* XXX: compute is_write */
1459 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1460 is_write
, &uc
->uc_sigmask
, puc
);
1463 #elif defined(__hppa__)
1465 int cpu_signal_handler(int host_signum
, void *pinfo
,
1468 struct siginfo
*info
= pinfo
;
1469 struct ucontext
*uc
= puc
;
1473 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1474 /* FIXME: compute is_write */
1476 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1478 &uc
->uc_sigmask
, puc
);
1483 #error host CPU specific signal handler needed
1487 #endif /* !defined(CONFIG_SOFTMMU) */