2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
24 #if !defined(TARGET_IA64)
28 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
44 #if defined(__sparc__) && !defined(HOST_SOLARIS)
45 // Work around ugly bugs in glibc that mangle global register contents
47 #define env cpu_single_env
50 int tb_invalidated_flag
;
53 //#define DEBUG_SIGNAL
55 void cpu_loop_exit(void)
57 /* NOTE: the register at this point must be saved by hand because
58 longjmp restore them */
60 longjmp(env
->jmp_env
, 1);
63 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
67 /* exit the current TB from a signal handler. The host registers are
68 restored in a state compatible with the CPU emulator
70 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
72 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext
*uc
= puc
;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
82 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
86 longjmp(env
->jmp_env
, 1);
89 /* Execute the code without caching the generated code. An interpreter
90 could be used if available. */
91 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
93 unsigned long next_tb
;
96 /* Should never happen.
97 We only end up here when an existing TB is too long. */
98 if (max_cycles
> CF_COUNT_MASK
)
99 max_cycles
= CF_COUNT_MASK
;
101 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
103 env
->current_tb
= tb
;
104 /* execute the generated code */
105 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
107 if ((next_tb
& 3) == 2) {
108 /* Restore PC. This may happen if async event occurs before
109 the TB starts executing. */
110 CPU_PC_FROM_TB(env
, tb
);
112 tb_phys_invalidate(tb
, -1);
116 static TranslationBlock
*tb_find_slow(target_ulong pc
,
117 target_ulong cs_base
,
120 TranslationBlock
*tb
, **ptb1
;
122 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
124 tb_invalidated_flag
= 0;
126 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
128 /* find translated block using physical mappings */
129 phys_pc
= get_phys_addr_code(env
, pc
);
130 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
132 h
= tb_phys_hash_func(phys_pc
);
133 ptb1
= &tb_phys_hash
[h
];
139 tb
->page_addr
[0] == phys_page1
&&
140 tb
->cs_base
== cs_base
&&
141 tb
->flags
== flags
) {
142 /* check next page if needed */
143 if (tb
->page_addr
[1] != -1) {
144 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
146 phys_page2
= get_phys_addr_code(env
, virt_page2
);
147 if (tb
->page_addr
[1] == phys_page2
)
153 ptb1
= &tb
->phys_hash_next
;
156 /* if no translated code available, then translate it now */
157 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
160 /* we add the TB in the virtual pc hash table */
161 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
165 static inline TranslationBlock
*tb_find_fast(void)
167 TranslationBlock
*tb
;
168 target_ulong cs_base
, pc
;
171 /* we record a subset of the CPU state. It will
172 always be the same before a given translated block
174 #if defined(TARGET_I386)
176 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
177 cs_base
= env
->segs
[R_CS
].base
;
178 pc
= cs_base
+ env
->eip
;
179 #elif defined(TARGET_ARM)
180 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
181 | (env
->vfp
.vec_stride
<< 4);
182 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
184 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
186 flags
|= (env
->condexec_bits
<< 8);
189 #elif defined(TARGET_SPARC)
190 #ifdef TARGET_SPARC64
191 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
192 flags
= ((env
->pstate
& PS_AM
) << 2)
193 | (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
194 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
196 // FPU enable . Supervisor
197 flags
= (env
->psref
<< 4) | env
->psrs
;
201 #elif defined(TARGET_PPC)
205 #elif defined(TARGET_MIPS)
206 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
208 pc
= env
->active_tc
.PC
;
209 #elif defined(TARGET_M68K)
210 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
211 | (env
->sr
& SR_S
) /* Bit 13 */
212 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
215 #elif defined(TARGET_SH4)
219 #elif defined(TARGET_ALPHA)
223 #elif defined(TARGET_CRIS)
224 flags
= env
->pregs
[PR_CCS
] & (P_FLAG
| U_FLAG
| X_FLAG
);
228 #elif defined(TARGET_IA64)
230 cs_base
= 0; /* XXXXX */
233 #error unsupported CPU
235 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
236 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
237 tb
->flags
!= flags
)) {
238 tb
= tb_find_slow(pc
, cs_base
, flags
);
243 /* main execution loop */
245 int cpu_exec(CPUState
*env1
)
247 #define DECLARE_HOST_REGS 1
248 #include "hostregs_helper.h"
249 int ret
, interrupt_request
;
250 TranslationBlock
*tb
;
252 unsigned long next_tb
;
254 if (cpu_halted(env1
) == EXCP_HALTED
)
257 cpu_single_env
= env1
;
259 /* first we save global registers */
260 #define SAVE_HOST_REGS 1
261 #include "hostregs_helper.h"
265 #if defined(TARGET_I386)
266 /* put eflags in CPU temporary format */
267 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
268 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
269 CC_OP
= CC_OP_EFLAGS
;
270 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
271 #elif defined(TARGET_SPARC)
272 #elif defined(TARGET_M68K)
273 env
->cc_op
= CC_OP_FLAGS
;
274 env
->cc_dest
= env
->sr
& 0xf;
275 env
->cc_x
= (env
->sr
>> 4) & 1;
276 #elif defined(TARGET_ALPHA)
277 #elif defined(TARGET_ARM)
278 #elif defined(TARGET_PPC)
279 #elif defined(TARGET_MIPS)
280 #elif defined(TARGET_SH4)
281 #elif defined(TARGET_CRIS)
282 #elif defined(TARGET_IA64)
285 #error unsupported target CPU
287 env
->exception_index
= -1;
289 /* prepare setjmp context for exception handling */
291 if (setjmp(env
->jmp_env
) == 0) {
292 env
->current_tb
= NULL
;
293 /* if an exception is pending, we execute it here */
294 if (env
->exception_index
>= 0) {
295 if (env
->exception_index
>= EXCP_INTERRUPT
) {
296 /* exit request from the cpu execution loop */
297 ret
= env
->exception_index
;
299 } else if (env
->user_mode_only
) {
300 /* if user mode only, we simulate a fake exception
301 which will be handled outside the cpu execution
303 #if defined(TARGET_I386)
304 do_interrupt_user(env
->exception_index
,
305 env
->exception_is_int
,
307 env
->exception_next_eip
);
308 /* successfully delivered */
309 env
->old_exception
= -1;
311 ret
= env
->exception_index
;
314 #if defined(TARGET_I386)
315 /* simulate a real cpu exception. On i386, it can
316 trigger new exceptions, but we do not handle
317 double or triple faults yet. */
318 do_interrupt(env
->exception_index
,
319 env
->exception_is_int
,
321 env
->exception_next_eip
, 0);
322 /* successfully delivered */
323 env
->old_exception
= -1;
324 #elif defined(TARGET_PPC)
326 #elif defined(TARGET_MIPS)
328 #elif defined(TARGET_SPARC)
330 #elif defined(TARGET_ARM)
332 #elif defined(TARGET_SH4)
334 #elif defined(TARGET_ALPHA)
336 #elif defined(TARGET_CRIS)
338 #elif defined(TARGET_M68K)
340 #elif defined(TARGET_IA64)
344 env
->exception_index
= -1;
347 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
349 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
350 ret
= kqemu_cpu_exec(env
);
351 /* put eflags in CPU temporary format */
352 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
353 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
354 CC_OP
= CC_OP_EFLAGS
;
355 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
358 longjmp(env
->jmp_env
, 1);
359 } else if (ret
== 2) {
360 /* softmmu execution needed */
362 if (env
->interrupt_request
!= 0) {
363 /* hardware interrupt will be executed just after */
365 /* otherwise, we restart */
366 longjmp(env
->jmp_env
, 1);
374 longjmp(env
->jmp_env
, 1);
376 next_tb
= 0; /* force lookup of first TB */
378 interrupt_request
= env
->interrupt_request
;
379 if (unlikely(interrupt_request
) &&
380 likely(!(env
->singlestep_enabled
& SSTEP_NOIRQ
))) {
381 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
382 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
383 env
->exception_index
= EXCP_DEBUG
;
386 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
387 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
388 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
389 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
391 env
->exception_index
= EXCP_HLT
;
395 #if defined(TARGET_I386)
396 if (env
->hflags2
& HF2_GIF_MASK
) {
397 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
398 !(env
->hflags
& HF_SMM_MASK
)) {
399 svm_check_intercept(SVM_EXIT_SMI
);
400 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
403 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
404 !(env
->hflags2
& HF2_NMI_MASK
)) {
405 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
406 env
->hflags2
|= HF2_NMI_MASK
;
407 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
409 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
410 (((env
->hflags2
& HF2_VINTR_MASK
) &&
411 (env
->hflags2
& HF2_HIF_MASK
)) ||
412 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
413 (env
->eflags
& IF_MASK
&&
414 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
416 svm_check_intercept(SVM_EXIT_INTR
);
417 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
418 intno
= cpu_get_pic_interrupt(env
);
419 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
420 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
422 do_interrupt(intno
, 0, 0, 0, 1);
423 /* ensure that no TB jump will be modified as
424 the program flow was changed */
426 #if !defined(CONFIG_USER_ONLY)
427 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
428 (env
->eflags
& IF_MASK
) &&
429 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
431 /* FIXME: this should respect TPR */
432 svm_check_intercept(SVM_EXIT_VINTR
);
433 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
434 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
435 if (loglevel
& CPU_LOG_TB_IN_ASM
)
436 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
437 do_interrupt(intno
, 0, 0, 0, 1);
442 #elif defined(TARGET_PPC)
444 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
448 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
449 ppc_hw_interrupt(env
);
450 if (env
->pending_interrupts
== 0)
451 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
454 #elif defined(TARGET_MIPS)
455 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
456 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
457 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
458 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
459 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
460 !(env
->hflags
& MIPS_HFLAG_DM
)) {
462 env
->exception_index
= EXCP_EXT_INTERRUPT
;
467 #elif defined(TARGET_SPARC)
468 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
470 int pil
= env
->interrupt_index
& 15;
471 int type
= env
->interrupt_index
& 0xf0;
473 if (((type
== TT_EXTINT
) &&
474 (pil
== 15 || pil
> env
->psrpil
)) ||
476 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
477 env
->exception_index
= env
->interrupt_index
;
479 env
->interrupt_index
= 0;
480 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
485 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
486 //do_interrupt(0, 0, 0, 0, 0);
487 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
489 #elif defined(TARGET_ARM)
490 if (interrupt_request
& CPU_INTERRUPT_FIQ
491 && !(env
->uncached_cpsr
& CPSR_F
)) {
492 env
->exception_index
= EXCP_FIQ
;
496 /* ARMv7-M interrupt return works by loading a magic value
497 into the PC. On real hardware the load causes the
498 return to occur. The qemu implementation performs the
499 jump normally, then does the exception return when the
500 CPU tries to execute code at the magic address.
501 This will cause the magic PC value to be pushed to
502 the stack if an interrupt occured at the wrong time.
503 We avoid this by disabling interrupts when
504 pc contains a magic address. */
505 if (interrupt_request
& CPU_INTERRUPT_HARD
506 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
507 || !(env
->uncached_cpsr
& CPSR_I
))) {
508 env
->exception_index
= EXCP_IRQ
;
512 #elif defined(TARGET_SH4)
513 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
517 #elif defined(TARGET_ALPHA)
518 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
522 #elif defined(TARGET_CRIS)
523 if (interrupt_request
& CPU_INTERRUPT_HARD
524 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
525 env
->exception_index
= EXCP_IRQ
;
529 if (interrupt_request
& CPU_INTERRUPT_NMI
530 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
531 env
->exception_index
= EXCP_NMI
;
535 #elif defined(TARGET_M68K)
536 if (interrupt_request
& CPU_INTERRUPT_HARD
537 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
538 < env
->pending_level
) {
539 /* Real hardware gets the interrupt vector via an
540 IACK cycle at this point. Current emulated
541 hardware doesn't rely on this, so we
542 provide/save the vector when the interrupt is
544 env
->exception_index
= env
->pending_vector
;
549 /* Don't use the cached interupt_request value,
550 do_interrupt may have updated the EXITTB flag. */
551 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
552 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
553 /* ensure that no TB jump will be modified as
554 the program flow was changed */
557 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
558 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
559 env
->exception_index
= EXCP_INTERRUPT
;
564 if ((loglevel
& CPU_LOG_TB_CPU
)) {
565 /* restore flags in standard format */
567 #if defined(TARGET_I386)
568 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
569 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
570 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
571 #elif defined(TARGET_ARM)
572 cpu_dump_state(env
, logfile
, fprintf
, 0);
573 #elif defined(TARGET_SPARC)
574 cpu_dump_state(env
, logfile
, fprintf
, 0);
575 #elif defined(TARGET_PPC)
576 cpu_dump_state(env
, logfile
, fprintf
, 0);
577 #elif defined(TARGET_M68K)
578 cpu_m68k_flush_flags(env
, env
->cc_op
);
579 env
->cc_op
= CC_OP_FLAGS
;
580 env
->sr
= (env
->sr
& 0xffe0)
581 | env
->cc_dest
| (env
->cc_x
<< 4);
582 cpu_dump_state(env
, logfile
, fprintf
, 0);
583 #elif defined(TARGET_MIPS)
584 cpu_dump_state(env
, logfile
, fprintf
, 0);
585 #elif defined(TARGET_SH4)
586 cpu_dump_state(env
, logfile
, fprintf
, 0);
587 #elif defined(TARGET_ALPHA)
588 cpu_dump_state(env
, logfile
, fprintf
, 0);
589 #elif defined(TARGET_CRIS)
590 cpu_dump_state(env
, logfile
, fprintf
, 0);
592 #error unsupported target CPU
598 /* Note: we do it here to avoid a gcc bug on Mac OS X when
599 doing it in tb_find_slow */
600 if (tb_invalidated_flag
) {
601 /* as some TB could have been invalidated because
602 of memory exceptions while generating the code, we
603 must recompute the hash index here */
605 tb_invalidated_flag
= 0;
608 if ((loglevel
& CPU_LOG_EXEC
)) {
609 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
610 (long)tb
->tc_ptr
, tb
->pc
,
611 lookup_symbol(tb
->pc
));
614 /* see if we can patch the calling TB. When the TB
615 spans two pages, we cannot safely do a direct
620 (env
->kqemu_enabled
!= 2) &&
622 tb
->page_addr
[1] == -1) {
623 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
626 spin_unlock(&tb_lock
);
627 env
->current_tb
= tb
;
628 while (env
->current_tb
) {
630 /* execute the generated code */
631 #if defined(__sparc__) && !defined(HOST_SOLARIS)
633 env
= cpu_single_env
;
634 #define env cpu_single_env
636 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
637 env
->current_tb
= NULL
;
638 if ((next_tb
& 3) == 2) {
639 /* Instruction counter expired. */
641 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
643 CPU_PC_FROM_TB(env
, tb
);
644 insns_left
= env
->icount_decr
.u32
;
645 if (env
->icount_extra
&& insns_left
>= 0) {
646 /* Refill decrementer and continue execution. */
647 env
->icount_extra
+= insns_left
;
648 if (env
->icount_extra
> 0xffff) {
651 insns_left
= env
->icount_extra
;
653 env
->icount_extra
-= insns_left
;
654 env
->icount_decr
.u16
.low
= insns_left
;
656 if (insns_left
> 0) {
657 /* Execute remaining instructions. */
658 cpu_exec_nocache(insns_left
, tb
);
660 env
->exception_index
= EXCP_INTERRUPT
;
666 /* reset soft MMU for next block (it can currently
667 only be set by a memory fault) */
668 #if defined(USE_KQEMU)
669 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
670 if (kqemu_is_ok(env
) &&
671 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
682 #if defined(TARGET_I386)
683 /* restore flags in standard format */
684 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
685 #elif defined(TARGET_ARM)
686 /* XXX: Save/restore host fpu exception state?. */
687 #elif defined(TARGET_SPARC)
688 #elif defined(TARGET_PPC)
689 #elif defined(TARGET_M68K)
690 cpu_m68k_flush_flags(env
, env
->cc_op
);
691 env
->cc_op
= CC_OP_FLAGS
;
692 env
->sr
= (env
->sr
& 0xffe0)
693 | env
->cc_dest
| (env
->cc_x
<< 4);
694 #elif defined(TARGET_MIPS)
695 #elif defined(TARGET_SH4)
696 #elif defined(TARGET_IA64)
697 #elif defined(TARGET_ALPHA)
698 #elif defined(TARGET_CRIS)
701 #error unsupported target CPU
704 /* restore global registers */
705 #include "hostregs_helper.h"
707 /* fail safe : never use cpu_single_env outside cpu_exec() */
708 cpu_single_env
= NULL
;
712 /* must only be called from the generated code as an exception can be
714 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
716 /* XXX: cannot enable it yet because it yields to MMU exception
717 where NIP != read address on PowerPC */
719 target_ulong phys_addr
;
720 phys_addr
= get_phys_addr_code(env
, start
);
721 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
725 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
727 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
729 CPUX86State
*saved_env
;
733 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
735 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
736 (selector
<< 4), 0xffff, 0);
738 helper_load_seg(seg_reg
, selector
);
743 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
745 CPUX86State
*saved_env
;
750 helper_fsave(ptr
, data32
);
755 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
757 CPUX86State
*saved_env
;
762 helper_frstor(ptr
, data32
);
767 #endif /* TARGET_I386 */
769 #if !defined(CONFIG_SOFTMMU)
771 #if defined(TARGET_I386)
773 /* 'pc' is the host PC at which the exception was raised. 'address' is
774 the effective address of the memory exception. 'is_write' is 1 if a
775 write caused the exception and otherwise 0'. 'old_set' is the
776 signal set which should be restored */
777 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
778 int is_write
, sigset_t
*old_set
,
781 TranslationBlock
*tb
;
785 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
786 #if defined(DEBUG_SIGNAL)
787 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
788 pc
, address
, is_write
, *(unsigned long *)old_set
);
790 /* XXX: locking issue */
791 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
795 /* see if it is an MMU fault */
796 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
798 return 0; /* not an MMU fault */
800 return 1; /* the MMU fault was handled without causing real CPU fault */
801 /* now we have a real cpu fault */
804 /* the PC is inside the translated code. It means that we have
805 a virtual CPU fault */
806 cpu_restore_state(tb
, env
, pc
, puc
);
810 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
811 env
->eip
, env
->cr
[2], env
->error_code
);
813 /* we restore the process signal mask as the sigreturn should
814 do it (XXX: use sigsetjmp) */
815 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
816 raise_exception_err(env
->exception_index
, env
->error_code
);
818 /* activate soft MMU for this block */
819 env
->hflags
|= HF_SOFTMMU_MASK
;
820 cpu_resume_from_signal(env
, puc
);
822 /* never comes here */
826 #elif defined(TARGET_ARM)
827 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
828 int is_write
, sigset_t
*old_set
,
831 TranslationBlock
*tb
;
835 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
836 #if defined(DEBUG_SIGNAL)
837 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
838 pc
, address
, is_write
, *(unsigned long *)old_set
);
840 /* XXX: locking issue */
841 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
844 /* see if it is an MMU fault */
845 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
847 return 0; /* not an MMU fault */
849 return 1; /* the MMU fault was handled without causing real CPU fault */
850 /* now we have a real cpu fault */
853 /* the PC is inside the translated code. It means that we have
854 a virtual CPU fault */
855 cpu_restore_state(tb
, env
, pc
, puc
);
857 /* we restore the process signal mask as the sigreturn should
858 do it (XXX: use sigsetjmp) */
859 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
861 /* never comes here */
864 #elif defined(TARGET_SPARC)
865 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
866 int is_write
, sigset_t
*old_set
,
869 TranslationBlock
*tb
;
873 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
874 #if defined(DEBUG_SIGNAL)
875 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
876 pc
, address
, is_write
, *(unsigned long *)old_set
);
878 /* XXX: locking issue */
879 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
882 /* see if it is an MMU fault */
883 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
885 return 0; /* not an MMU fault */
887 return 1; /* the MMU fault was handled without causing real CPU fault */
888 /* now we have a real cpu fault */
891 /* the PC is inside the translated code. It means that we have
892 a virtual CPU fault */
893 cpu_restore_state(tb
, env
, pc
, puc
);
895 /* we restore the process signal mask as the sigreturn should
896 do it (XXX: use sigsetjmp) */
897 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
899 /* never comes here */
902 #elif defined (TARGET_PPC)
903 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
904 int is_write
, sigset_t
*old_set
,
907 TranslationBlock
*tb
;
911 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
912 #if defined(DEBUG_SIGNAL)
913 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
914 pc
, address
, is_write
, *(unsigned long *)old_set
);
916 /* XXX: locking issue */
917 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
921 /* see if it is an MMU fault */
922 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
924 return 0; /* not an MMU fault */
926 return 1; /* the MMU fault was handled without causing real CPU fault */
928 /* now we have a real cpu fault */
931 /* the PC is inside the translated code. It means that we have
932 a virtual CPU fault */
933 cpu_restore_state(tb
, env
, pc
, puc
);
937 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
938 env
->nip
, env
->error_code
, tb
);
940 /* we restore the process signal mask as the sigreturn should
941 do it (XXX: use sigsetjmp) */
942 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
943 do_raise_exception_err(env
->exception_index
, env
->error_code
);
945 /* activate soft MMU for this block */
946 cpu_resume_from_signal(env
, puc
);
948 /* never comes here */
952 #elif defined(TARGET_M68K)
953 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
954 int is_write
, sigset_t
*old_set
,
957 TranslationBlock
*tb
;
961 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
962 #if defined(DEBUG_SIGNAL)
963 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
964 pc
, address
, is_write
, *(unsigned long *)old_set
);
966 /* XXX: locking issue */
967 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
970 /* see if it is an MMU fault */
971 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
973 return 0; /* not an MMU fault */
975 return 1; /* the MMU fault was handled without causing real CPU fault */
976 /* now we have a real cpu fault */
979 /* the PC is inside the translated code. It means that we have
980 a virtual CPU fault */
981 cpu_restore_state(tb
, env
, pc
, puc
);
983 /* we restore the process signal mask as the sigreturn should
984 do it (XXX: use sigsetjmp) */
985 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
987 /* never comes here */
991 #elif defined (TARGET_MIPS)
992 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
993 int is_write
, sigset_t
*old_set
,
996 TranslationBlock
*tb
;
1000 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1001 #if defined(DEBUG_SIGNAL)
1002 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1003 pc
, address
, is_write
, *(unsigned long *)old_set
);
1005 /* XXX: locking issue */
1006 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1010 /* see if it is an MMU fault */
1011 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1013 return 0; /* not an MMU fault */
1015 return 1; /* the MMU fault was handled without causing real CPU fault */
1017 /* now we have a real cpu fault */
1018 tb
= tb_find_pc(pc
);
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb
, env
, pc
, puc
);
1026 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1027 env
->PC
, env
->error_code
, tb
);
1029 /* we restore the process signal mask as the sigreturn should
1030 do it (XXX: use sigsetjmp) */
1031 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1032 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1034 /* activate soft MMU for this block */
1035 cpu_resume_from_signal(env
, puc
);
1037 /* never comes here */
1041 #elif defined (TARGET_SH4)
1042 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1043 int is_write
, sigset_t
*old_set
,
1046 TranslationBlock
*tb
;
1050 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1051 #if defined(DEBUG_SIGNAL)
1052 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1053 pc
, address
, is_write
, *(unsigned long *)old_set
);
1055 /* XXX: locking issue */
1056 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1060 /* see if it is an MMU fault */
1061 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1063 return 0; /* not an MMU fault */
1065 return 1; /* the MMU fault was handled without causing real CPU fault */
1067 /* now we have a real cpu fault */
1068 tb
= tb_find_pc(pc
);
1070 /* the PC is inside the translated code. It means that we have
1071 a virtual CPU fault */
1072 cpu_restore_state(tb
, env
, pc
, puc
);
1075 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1076 env
->nip
, env
->error_code
, tb
);
1078 /* we restore the process signal mask as the sigreturn should
1079 do it (XXX: use sigsetjmp) */
1080 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1082 /* never comes here */
1086 #elif defined (TARGET_ALPHA)
1087 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1088 int is_write
, sigset_t
*old_set
,
1091 TranslationBlock
*tb
;
1095 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1096 #if defined(DEBUG_SIGNAL)
1097 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1098 pc
, address
, is_write
, *(unsigned long *)old_set
);
1100 /* XXX: locking issue */
1101 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1105 /* see if it is an MMU fault */
1106 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1108 return 0; /* not an MMU fault */
1110 return 1; /* the MMU fault was handled without causing real CPU fault */
1112 /* now we have a real cpu fault */
1113 tb
= tb_find_pc(pc
);
1115 /* the PC is inside the translated code. It means that we have
1116 a virtual CPU fault */
1117 cpu_restore_state(tb
, env
, pc
, puc
);
1120 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1121 env
->nip
, env
->error_code
, tb
);
1123 /* we restore the process signal mask as the sigreturn should
1124 do it (XXX: use sigsetjmp) */
1125 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1127 /* never comes here */
1130 #elif defined (TARGET_CRIS)
1131 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1132 int is_write
, sigset_t
*old_set
,
1135 TranslationBlock
*tb
;
1139 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1140 #if defined(DEBUG_SIGNAL)
1141 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1142 pc
, address
, is_write
, *(unsigned long *)old_set
);
1144 /* XXX: locking issue */
1145 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1149 /* see if it is an MMU fault */
1150 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1152 return 0; /* not an MMU fault */
1154 return 1; /* the MMU fault was handled without causing real CPU fault */
1156 /* now we have a real cpu fault */
1157 tb
= tb_find_pc(pc
);
1159 /* the PC is inside the translated code. It means that we have
1160 a virtual CPU fault */
1161 cpu_restore_state(tb
, env
, pc
, puc
);
1163 /* we restore the process signal mask as the sigreturn should
1164 do it (XXX: use sigsetjmp) */
1165 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1167 /* never comes here */
1172 #error unsupported target CPU
1175 #if defined(__i386__)
1177 #if defined(__APPLE__)
1178 # include <sys/ucontext.h>
1180 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1181 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1182 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1184 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1185 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1186 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1189 int cpu_signal_handler(int host_signum
, void *pinfo
,
1192 siginfo_t
*info
= pinfo
;
1193 struct ucontext
*uc
= puc
;
1201 #define REG_TRAPNO TRAPNO
1204 trapno
= TRAP_sig(uc
);
1205 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1207 (ERROR_sig(uc
) >> 1) & 1 : 0,
1208 &uc
->uc_sigmask
, puc
);
1211 #elif defined(__x86_64__)
1213 int cpu_signal_handler(int host_signum
, void *pinfo
,
1216 siginfo_t
*info
= pinfo
;
1217 struct ucontext
*uc
= puc
;
1220 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1221 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1222 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1223 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1224 &uc
->uc_sigmask
, puc
);
1227 #elif defined(__powerpc__)
1229 /***********************************************************************
1230 * signal context platform-specific definitions
1234 /* All Registers access - only for local access */
1235 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1236 /* Gpr Registers access */
1237 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1238 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1239 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1240 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1241 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1242 # define LR_sig(context) REG_sig(link, context) /* Link register */
1243 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1244 /* Float Registers access */
1245 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1246 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1247 /* Exception Registers access */
1248 # define DAR_sig(context) REG_sig(dar, context)
1249 # define DSISR_sig(context) REG_sig(dsisr, context)
1250 # define TRAP_sig(context) REG_sig(trap, context)
1254 # include <sys/ucontext.h>
1255 typedef struct ucontext SIGCONTEXT
;
1256 /* All Registers access - only for local access */
1257 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1258 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1259 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1260 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1261 /* Gpr Registers access */
1262 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1263 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1264 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1265 # define CTR_sig(context) REG_sig(ctr, context)
1266 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1267 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1268 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1269 /* Float Registers access */
1270 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1271 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1272 /* Exception Registers access */
1273 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1274 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1275 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1276 #endif /* __APPLE__ */
1278 int cpu_signal_handler(int host_signum
, void *pinfo
,
1281 siginfo_t
*info
= pinfo
;
1282 struct ucontext
*uc
= puc
;
1290 if (DSISR_sig(uc
) & 0x00800000)
1293 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1296 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1297 is_write
, &uc
->uc_sigmask
, puc
);
1300 #elif defined(__alpha__)
1302 int cpu_signal_handler(int host_signum
, void *pinfo
,
1305 siginfo_t
*info
= pinfo
;
1306 struct ucontext
*uc
= puc
;
1307 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1308 uint32_t insn
= *pc
;
1311 /* XXX: need kernel patch to get write flag faster */
1312 switch (insn
>> 26) {
1327 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1328 is_write
, &uc
->uc_sigmask
, puc
);
1330 #elif defined(__sparc__)
1332 int cpu_signal_handler(int host_signum
, void *pinfo
,
1335 siginfo_t
*info
= pinfo
;
1338 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1339 uint32_t *regs
= (uint32_t *)(info
+ 1);
1340 void *sigmask
= (regs
+ 20);
1341 /* XXX: is there a standard glibc define ? */
1342 unsigned long pc
= regs
[1];
1344 struct sigcontext
*sc
= puc
;
1345 unsigned long pc
= sc
->sigc_regs
.tpc
;
1346 void *sigmask
= (void *)sc
->sigc_mask
;
1349 /* XXX: need kernel patch to get write flag faster */
1351 insn
= *(uint32_t *)pc
;
1352 if ((insn
>> 30) == 3) {
1353 switch((insn
>> 19) & 0x3f) {
1365 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1366 is_write
, sigmask
, NULL
);
1369 #elif defined(__arm__)
1371 int cpu_signal_handler(int host_signum
, void *pinfo
,
1374 siginfo_t
*info
= pinfo
;
1375 struct ucontext
*uc
= puc
;
1379 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1380 pc
= uc
->uc_mcontext
.gregs
[R15
];
1382 pc
= uc
->uc_mcontext
.arm_pc
;
1384 /* XXX: compute is_write */
1386 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1388 &uc
->uc_sigmask
, puc
);
1391 #elif defined(__mc68000)
1393 int cpu_signal_handler(int host_signum
, void *pinfo
,
1396 siginfo_t
*info
= pinfo
;
1397 struct ucontext
*uc
= puc
;
1401 pc
= uc
->uc_mcontext
.gregs
[16];
1402 /* XXX: compute is_write */
1404 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1406 &uc
->uc_sigmask
, puc
);
1409 #elif defined(__ia64)
1412 /* This ought to be in <bits/siginfo.h>... */
1413 # define __ISR_VALID 1
1416 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1418 siginfo_t
*info
= pinfo
;
1419 struct ucontext
*uc
= puc
;
1423 ip
= uc
->uc_mcontext
.sc_ip
;
1424 switch (host_signum
) {
1430 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1431 /* ISR.W (write-access) is bit 33: */
1432 is_write
= (info
->si_isr
>> 33) & 1;
1438 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1440 &uc
->uc_sigmask
, puc
);
1443 #elif defined(__s390__)
1445 int cpu_signal_handler(int host_signum
, void *pinfo
,
1448 siginfo_t
*info
= pinfo
;
1449 struct ucontext
*uc
= puc
;
1453 pc
= uc
->uc_mcontext
.psw
.addr
;
1454 /* XXX: compute is_write */
1456 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1457 is_write
, &uc
->uc_sigmask
, puc
);
1460 #elif defined(__mips__)
1462 int cpu_signal_handler(int host_signum
, void *pinfo
,
1465 siginfo_t
*info
= pinfo
;
1466 struct ucontext
*uc
= puc
;
1467 greg_t pc
= uc
->uc_mcontext
.pc
;
1470 /* XXX: compute is_write */
1472 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1473 is_write
, &uc
->uc_sigmask
, puc
);
1476 #elif defined(__hppa__)
1478 int cpu_signal_handler(int host_signum
, void *pinfo
,
1481 struct siginfo
*info
= pinfo
;
1482 struct ucontext
*uc
= puc
;
1486 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1487 /* FIXME: compute is_write */
1489 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1491 &uc
->uc_sigmask
, puc
);
1496 #error host CPU specific signal handler needed
1500 #endif /* !defined(CONFIG_SOFTMMU) */