2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag
;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env
->jmp_env
, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
70 #elif defined(__OpenBSD__)
71 struct sigcontext
*uc
= puc
;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
89 longjmp(env
->jmp_env
, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
96 unsigned long next_tb
;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles
> CF_COUNT_MASK
)
102 max_cycles
= CF_COUNT_MASK
;
104 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
106 env
->current_tb
= tb
;
107 /* execute the generated code */
108 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
110 if ((next_tb
& 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 CPU_PC_FROM_TB(env
, tb
);
115 tb_phys_invalidate(tb
, -1);
119 static TranslationBlock
*tb_find_slow(target_ulong pc
,
120 target_ulong cs_base
,
123 TranslationBlock
*tb
, **ptb1
;
125 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
127 tb_invalidated_flag
= 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc
= get_phys_addr_code(env
, pc
);
133 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
135 h
= tb_phys_hash_func(phys_pc
);
136 ptb1
= &tb_phys_hash
[h
];
142 tb
->page_addr
[0] == phys_page1
&&
143 tb
->cs_base
== cs_base
&&
144 tb
->flags
== flags
) {
145 /* check next page if needed */
146 if (tb
->page_addr
[1] != -1) {
147 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
149 phys_page2
= get_phys_addr_code(env
, virt_page2
);
150 if (tb
->page_addr
[1] == phys_page2
)
156 ptb1
= &tb
->phys_hash_next
;
159 /* if no translated code available, then translate it now */
160 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
163 /* we add the TB in the virtual pc hash table */
164 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
168 static inline TranslationBlock
*tb_find_fast(void)
170 TranslationBlock
*tb
;
171 target_ulong cs_base
, pc
;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
177 #if defined(TARGET_I386)
179 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
180 cs_base
= env
->segs
[R_CS
].base
;
181 pc
= cs_base
+ env
->eip
;
182 #elif defined(TARGET_ARM)
183 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
184 | (env
->vfp
.vec_stride
<< 4);
185 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
187 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
189 flags
|= (env
->condexec_bits
<< 8);
192 #elif defined(TARGET_SPARC)
193 #ifdef TARGET_SPARC64
194 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
195 flags
= ((env
->pstate
& PS_AM
) << 2)
196 | (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
197 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
199 // FPU enable . Supervisor
200 flags
= (env
->psref
<< 4) | env
->psrs
;
204 #elif defined(TARGET_PPC)
208 #elif defined(TARGET_MIPS)
209 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
211 pc
= env
->active_tc
.PC
;
212 #elif defined(TARGET_M68K)
213 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
214 | (env
->sr
& SR_S
) /* Bit 13 */
215 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
218 #elif defined(TARGET_SH4)
219 flags
= (env
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
220 | DELAY_SLOT_TRUE
| DELAY_SLOT_CLEARME
)) /* Bits 0- 3 */
221 | (env
->fpscr
& (FPSCR_FR
| FPSCR_SZ
| FPSCR_PR
)) /* Bits 19-21 */
222 | (env
->sr
& (SR_MD
| SR_RB
)); /* Bits 29-30 */
225 #elif defined(TARGET_ALPHA)
229 #elif defined(TARGET_CRIS)
230 flags
= env
->pregs
[PR_CCS
] & (S_FLAG
| P_FLAG
| U_FLAG
| X_FLAG
);
235 #error unsupported CPU
237 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
238 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
239 tb
->flags
!= flags
)) {
240 tb
= tb_find_slow(pc
, cs_base
, flags
);
245 /* main execution loop */
247 int cpu_exec(CPUState
*env1
)
249 #define DECLARE_HOST_REGS 1
250 #include "hostregs_helper.h"
251 int ret
, interrupt_request
;
252 TranslationBlock
*tb
;
254 unsigned long next_tb
;
256 if (cpu_halted(env1
) == EXCP_HALTED
)
259 cpu_single_env
= env1
;
261 /* first we save global registers */
262 #define SAVE_HOST_REGS 1
263 #include "hostregs_helper.h"
267 #if defined(TARGET_I386)
268 /* put eflags in CPU temporary format */
269 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
270 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
271 CC_OP
= CC_OP_EFLAGS
;
272 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
273 #elif defined(TARGET_SPARC)
274 #elif defined(TARGET_M68K)
275 env
->cc_op
= CC_OP_FLAGS
;
276 env
->cc_dest
= env
->sr
& 0xf;
277 env
->cc_x
= (env
->sr
>> 4) & 1;
278 #elif defined(TARGET_ALPHA)
279 #elif defined(TARGET_ARM)
280 #elif defined(TARGET_PPC)
281 #elif defined(TARGET_MIPS)
282 #elif defined(TARGET_SH4)
283 #elif defined(TARGET_CRIS)
286 #error unsupported target CPU
288 env
->exception_index
= -1;
290 /* prepare setjmp context for exception handling */
292 if (setjmp(env
->jmp_env
) == 0) {
293 env
->current_tb
= NULL
;
294 /* if an exception is pending, we execute it here */
295 if (env
->exception_index
>= 0) {
296 if (env
->exception_index
>= EXCP_INTERRUPT
) {
297 /* exit request from the cpu execution loop */
298 ret
= env
->exception_index
;
300 } else if (env
->user_mode_only
) {
301 /* if user mode only, we simulate a fake exception
302 which will be handled outside the cpu execution
304 #if defined(TARGET_I386)
305 do_interrupt_user(env
->exception_index
,
306 env
->exception_is_int
,
308 env
->exception_next_eip
);
309 /* successfully delivered */
310 env
->old_exception
= -1;
312 ret
= env
->exception_index
;
315 #if defined(TARGET_I386)
316 /* simulate a real cpu exception. On i386, it can
317 trigger new exceptions, but we do not handle
318 double or triple faults yet. */
319 do_interrupt(env
->exception_index
,
320 env
->exception_is_int
,
322 env
->exception_next_eip
, 0);
323 /* successfully delivered */
324 env
->old_exception
= -1;
325 #elif defined(TARGET_PPC)
327 #elif defined(TARGET_MIPS)
329 #elif defined(TARGET_SPARC)
331 #elif defined(TARGET_ARM)
333 #elif defined(TARGET_SH4)
335 #elif defined(TARGET_ALPHA)
337 #elif defined(TARGET_CRIS)
339 #elif defined(TARGET_M68K)
343 env
->exception_index
= -1;
346 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
348 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
349 ret
= kqemu_cpu_exec(env
);
350 /* put eflags in CPU temporary format */
351 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
352 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
353 CC_OP
= CC_OP_EFLAGS
;
354 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
357 longjmp(env
->jmp_env
, 1);
358 } else if (ret
== 2) {
359 /* softmmu execution needed */
361 if (env
->interrupt_request
!= 0) {
362 /* hardware interrupt will be executed just after */
364 /* otherwise, we restart */
365 longjmp(env
->jmp_env
, 1);
373 longjmp(env
->jmp_env
, 1);
376 next_tb
= 0; /* force lookup of first TB */
378 interrupt_request
= env
->interrupt_request
;
379 if (unlikely(interrupt_request
)) {
380 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
381 /* Mask out external interrupts for this step. */
382 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
387 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
388 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
389 env
->exception_index
= EXCP_DEBUG
;
392 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
393 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
394 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
395 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
397 env
->exception_index
= EXCP_HLT
;
401 #if defined(TARGET_I386)
402 if (env
->hflags2
& HF2_GIF_MASK
) {
403 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
404 !(env
->hflags
& HF_SMM_MASK
)) {
405 svm_check_intercept(SVM_EXIT_SMI
);
406 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
409 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
410 !(env
->hflags2
& HF2_NMI_MASK
)) {
411 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
412 env
->hflags2
|= HF2_NMI_MASK
;
413 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
415 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
416 (((env
->hflags2
& HF2_VINTR_MASK
) &&
417 (env
->hflags2
& HF2_HIF_MASK
)) ||
418 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
419 (env
->eflags
& IF_MASK
&&
420 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
422 svm_check_intercept(SVM_EXIT_INTR
);
423 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
424 intno
= cpu_get_pic_interrupt(env
);
425 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
426 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
428 do_interrupt(intno
, 0, 0, 0, 1);
429 /* ensure that no TB jump will be modified as
430 the program flow was changed */
432 #if !defined(CONFIG_USER_ONLY)
433 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
434 (env
->eflags
& IF_MASK
) &&
435 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
437 /* FIXME: this should respect TPR */
438 svm_check_intercept(SVM_EXIT_VINTR
);
439 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
440 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
441 if (loglevel
& CPU_LOG_TB_IN_ASM
)
442 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
443 do_interrupt(intno
, 0, 0, 0, 1);
448 #elif defined(TARGET_PPC)
450 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
454 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
455 ppc_hw_interrupt(env
);
456 if (env
->pending_interrupts
== 0)
457 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
460 #elif defined(TARGET_MIPS)
461 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
462 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
463 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
464 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
465 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
466 !(env
->hflags
& MIPS_HFLAG_DM
)) {
468 env
->exception_index
= EXCP_EXT_INTERRUPT
;
473 #elif defined(TARGET_SPARC)
474 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
476 int pil
= env
->interrupt_index
& 15;
477 int type
= env
->interrupt_index
& 0xf0;
479 if (((type
== TT_EXTINT
) &&
480 (pil
== 15 || pil
> env
->psrpil
)) ||
482 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
483 env
->exception_index
= env
->interrupt_index
;
485 env
->interrupt_index
= 0;
486 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
491 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
492 //do_interrupt(0, 0, 0, 0, 0);
493 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
495 #elif defined(TARGET_ARM)
496 if (interrupt_request
& CPU_INTERRUPT_FIQ
497 && !(env
->uncached_cpsr
& CPSR_F
)) {
498 env
->exception_index
= EXCP_FIQ
;
502 /* ARMv7-M interrupt return works by loading a magic value
503 into the PC. On real hardware the load causes the
504 return to occur. The qemu implementation performs the
505 jump normally, then does the exception return when the
506 CPU tries to execute code at the magic address.
507 This will cause the magic PC value to be pushed to
508 the stack if an interrupt occured at the wrong time.
509 We avoid this by disabling interrupts when
510 pc contains a magic address. */
511 if (interrupt_request
& CPU_INTERRUPT_HARD
512 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
513 || !(env
->uncached_cpsr
& CPSR_I
))) {
514 env
->exception_index
= EXCP_IRQ
;
518 #elif defined(TARGET_SH4)
519 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
523 #elif defined(TARGET_ALPHA)
524 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
528 #elif defined(TARGET_CRIS)
529 if (interrupt_request
& CPU_INTERRUPT_HARD
530 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
531 env
->exception_index
= EXCP_IRQ
;
535 if (interrupt_request
& CPU_INTERRUPT_NMI
536 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
537 env
->exception_index
= EXCP_NMI
;
541 #elif defined(TARGET_M68K)
542 if (interrupt_request
& CPU_INTERRUPT_HARD
543 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
544 < env
->pending_level
) {
545 /* Real hardware gets the interrupt vector via an
546 IACK cycle at this point. Current emulated
547 hardware doesn't rely on this, so we
548 provide/save the vector when the interrupt is
550 env
->exception_index
= env
->pending_vector
;
555 /* Don't use the cached interupt_request value,
556 do_interrupt may have updated the EXITTB flag. */
557 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
558 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
559 /* ensure that no TB jump will be modified as
560 the program flow was changed */
563 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
564 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
565 env
->exception_index
= EXCP_INTERRUPT
;
570 if ((loglevel
& CPU_LOG_TB_CPU
)) {
571 /* restore flags in standard format */
573 #if defined(TARGET_I386)
574 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
575 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
576 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
577 #elif defined(TARGET_ARM)
578 cpu_dump_state(env
, logfile
, fprintf
, 0);
579 #elif defined(TARGET_SPARC)
580 cpu_dump_state(env
, logfile
, fprintf
, 0);
581 #elif defined(TARGET_PPC)
582 cpu_dump_state(env
, logfile
, fprintf
, 0);
583 #elif defined(TARGET_M68K)
584 cpu_m68k_flush_flags(env
, env
->cc_op
);
585 env
->cc_op
= CC_OP_FLAGS
;
586 env
->sr
= (env
->sr
& 0xffe0)
587 | env
->cc_dest
| (env
->cc_x
<< 4);
588 cpu_dump_state(env
, logfile
, fprintf
, 0);
589 #elif defined(TARGET_MIPS)
590 cpu_dump_state(env
, logfile
, fprintf
, 0);
591 #elif defined(TARGET_SH4)
592 cpu_dump_state(env
, logfile
, fprintf
, 0);
593 #elif defined(TARGET_ALPHA)
594 cpu_dump_state(env
, logfile
, fprintf
, 0);
595 #elif defined(TARGET_CRIS)
596 cpu_dump_state(env
, logfile
, fprintf
, 0);
598 #error unsupported target CPU
604 /* Note: we do it here to avoid a gcc bug on Mac OS X when
605 doing it in tb_find_slow */
606 if (tb_invalidated_flag
) {
607 /* as some TB could have been invalidated because
608 of memory exceptions while generating the code, we
609 must recompute the hash index here */
611 tb_invalidated_flag
= 0;
614 if ((loglevel
& CPU_LOG_EXEC
)) {
615 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
616 (long)tb
->tc_ptr
, tb
->pc
,
617 lookup_symbol(tb
->pc
));
620 /* see if we can patch the calling TB. When the TB
621 spans two pages, we cannot safely do a direct
626 (env
->kqemu_enabled
!= 2) &&
628 tb
->page_addr
[1] == -1) {
629 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
632 spin_unlock(&tb_lock
);
633 env
->current_tb
= tb
;
635 /* cpu_interrupt might be called while translating the
636 TB, but before it is linked into a potentially
637 infinite loop and becomes env->current_tb. Avoid
638 starting execution if there is a pending interrupt. */
639 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
640 env
->current_tb
= NULL
;
642 while (env
->current_tb
) {
644 /* execute the generated code */
645 #if defined(__sparc__) && !defined(HOST_SOLARIS)
647 env
= cpu_single_env
;
648 #define env cpu_single_env
650 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
651 env
->current_tb
= NULL
;
652 if ((next_tb
& 3) == 2) {
653 /* Instruction counter expired. */
655 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
657 CPU_PC_FROM_TB(env
, tb
);
658 insns_left
= env
->icount_decr
.u32
;
659 if (env
->icount_extra
&& insns_left
>= 0) {
660 /* Refill decrementer and continue execution. */
661 env
->icount_extra
+= insns_left
;
662 if (env
->icount_extra
> 0xffff) {
665 insns_left
= env
->icount_extra
;
667 env
->icount_extra
-= insns_left
;
668 env
->icount_decr
.u16
.low
= insns_left
;
670 if (insns_left
> 0) {
671 /* Execute remaining instructions. */
672 cpu_exec_nocache(insns_left
, tb
);
674 env
->exception_index
= EXCP_INTERRUPT
;
680 /* reset soft MMU for next block (it can currently
681 only be set by a memory fault) */
682 #if defined(USE_KQEMU)
683 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
684 if (kqemu_is_ok(env
) &&
685 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
696 #if defined(TARGET_I386)
697 /* restore flags in standard format */
698 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
699 #elif defined(TARGET_ARM)
700 /* XXX: Save/restore host fpu exception state?. */
701 #elif defined(TARGET_SPARC)
702 #elif defined(TARGET_PPC)
703 #elif defined(TARGET_M68K)
704 cpu_m68k_flush_flags(env
, env
->cc_op
);
705 env
->cc_op
= CC_OP_FLAGS
;
706 env
->sr
= (env
->sr
& 0xffe0)
707 | env
->cc_dest
| (env
->cc_x
<< 4);
708 #elif defined(TARGET_MIPS)
709 #elif defined(TARGET_SH4)
710 #elif defined(TARGET_ALPHA)
711 #elif defined(TARGET_CRIS)
714 #error unsupported target CPU
717 /* restore global registers */
718 #include "hostregs_helper.h"
720 /* fail safe : never use cpu_single_env outside cpu_exec() */
721 cpu_single_env
= NULL
;
725 /* must only be called from the generated code as an exception can be
727 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
729 /* XXX: cannot enable it yet because it yields to MMU exception
730 where NIP != read address on PowerPC */
732 target_ulong phys_addr
;
733 phys_addr
= get_phys_addr_code(env
, start
);
734 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
738 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
740 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
742 CPUX86State
*saved_env
;
746 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
748 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
749 (selector
<< 4), 0xffff, 0);
751 helper_load_seg(seg_reg
, selector
);
756 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
758 CPUX86State
*saved_env
;
763 helper_fsave(ptr
, data32
);
768 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
770 CPUX86State
*saved_env
;
775 helper_frstor(ptr
, data32
);
780 #endif /* TARGET_I386 */
782 #if !defined(CONFIG_SOFTMMU)
784 #if defined(TARGET_I386)
786 /* 'pc' is the host PC at which the exception was raised. 'address' is
787 the effective address of the memory exception. 'is_write' is 1 if a
788 write caused the exception and otherwise 0'. 'old_set' is the
789 signal set which should be restored */
790 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
791 int is_write
, sigset_t
*old_set
,
794 TranslationBlock
*tb
;
798 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
799 #if defined(DEBUG_SIGNAL)
800 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
801 pc
, address
, is_write
, *(unsigned long *)old_set
);
803 /* XXX: locking issue */
804 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
808 /* see if it is an MMU fault */
809 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
811 return 0; /* not an MMU fault */
813 return 1; /* the MMU fault was handled without causing real CPU fault */
814 /* now we have a real cpu fault */
817 /* the PC is inside the translated code. It means that we have
818 a virtual CPU fault */
819 cpu_restore_state(tb
, env
, pc
, puc
);
823 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
824 env
->eip
, env
->cr
[2], env
->error_code
);
826 /* we restore the process signal mask as the sigreturn should
827 do it (XXX: use sigsetjmp) */
828 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
829 raise_exception_err(env
->exception_index
, env
->error_code
);
831 /* activate soft MMU for this block */
832 env
->hflags
|= HF_SOFTMMU_MASK
;
833 cpu_resume_from_signal(env
, puc
);
835 /* never comes here */
839 #elif defined(TARGET_ARM)
840 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
841 int is_write
, sigset_t
*old_set
,
844 TranslationBlock
*tb
;
848 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
849 #if defined(DEBUG_SIGNAL)
850 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
851 pc
, address
, is_write
, *(unsigned long *)old_set
);
853 /* XXX: locking issue */
854 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
857 /* see if it is an MMU fault */
858 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
860 return 0; /* not an MMU fault */
862 return 1; /* the MMU fault was handled without causing real CPU fault */
863 /* now we have a real cpu fault */
866 /* the PC is inside the translated code. It means that we have
867 a virtual CPU fault */
868 cpu_restore_state(tb
, env
, pc
, puc
);
870 /* we restore the process signal mask as the sigreturn should
871 do it (XXX: use sigsetjmp) */
872 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
874 /* never comes here */
877 #elif defined(TARGET_SPARC)
878 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
879 int is_write
, sigset_t
*old_set
,
882 TranslationBlock
*tb
;
886 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
887 #if defined(DEBUG_SIGNAL)
888 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
889 pc
, address
, is_write
, *(unsigned long *)old_set
);
891 /* XXX: locking issue */
892 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
895 /* see if it is an MMU fault */
896 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
898 return 0; /* not an MMU fault */
900 return 1; /* the MMU fault was handled without causing real CPU fault */
901 /* now we have a real cpu fault */
904 /* the PC is inside the translated code. It means that we have
905 a virtual CPU fault */
906 cpu_restore_state(tb
, env
, pc
, puc
);
908 /* we restore the process signal mask as the sigreturn should
909 do it (XXX: use sigsetjmp) */
910 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
912 /* never comes here */
915 #elif defined (TARGET_PPC)
916 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
917 int is_write
, sigset_t
*old_set
,
920 TranslationBlock
*tb
;
924 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
925 #if defined(DEBUG_SIGNAL)
926 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
927 pc
, address
, is_write
, *(unsigned long *)old_set
);
929 /* XXX: locking issue */
930 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
934 /* see if it is an MMU fault */
935 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
937 return 0; /* not an MMU fault */
939 return 1; /* the MMU fault was handled without causing real CPU fault */
941 /* now we have a real cpu fault */
944 /* the PC is inside the translated code. It means that we have
945 a virtual CPU fault */
946 cpu_restore_state(tb
, env
, pc
, puc
);
950 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
951 env
->nip
, env
->error_code
, tb
);
953 /* we restore the process signal mask as the sigreturn should
954 do it (XXX: use sigsetjmp) */
955 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
956 do_raise_exception_err(env
->exception_index
, env
->error_code
);
958 /* activate soft MMU for this block */
959 cpu_resume_from_signal(env
, puc
);
961 /* never comes here */
965 #elif defined(TARGET_M68K)
966 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
967 int is_write
, sigset_t
*old_set
,
970 TranslationBlock
*tb
;
974 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
975 #if defined(DEBUG_SIGNAL)
976 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
977 pc
, address
, is_write
, *(unsigned long *)old_set
);
979 /* XXX: locking issue */
980 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
983 /* see if it is an MMU fault */
984 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
986 return 0; /* not an MMU fault */
988 return 1; /* the MMU fault was handled without causing real CPU fault */
989 /* now we have a real cpu fault */
992 /* the PC is inside the translated code. It means that we have
993 a virtual CPU fault */
994 cpu_restore_state(tb
, env
, pc
, puc
);
996 /* we restore the process signal mask as the sigreturn should
997 do it (XXX: use sigsetjmp) */
998 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1000 /* never comes here */
1004 #elif defined (TARGET_MIPS)
1005 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1006 int is_write
, sigset_t
*old_set
,
1009 TranslationBlock
*tb
;
1013 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1014 #if defined(DEBUG_SIGNAL)
1015 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1016 pc
, address
, is_write
, *(unsigned long *)old_set
);
1018 /* XXX: locking issue */
1019 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1023 /* see if it is an MMU fault */
1024 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1026 return 0; /* not an MMU fault */
1028 return 1; /* the MMU fault was handled without causing real CPU fault */
1030 /* now we have a real cpu fault */
1031 tb
= tb_find_pc(pc
);
1033 /* the PC is inside the translated code. It means that we have
1034 a virtual CPU fault */
1035 cpu_restore_state(tb
, env
, pc
, puc
);
1039 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1040 env
->PC
, env
->error_code
, tb
);
1042 /* we restore the process signal mask as the sigreturn should
1043 do it (XXX: use sigsetjmp) */
1044 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1045 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1047 /* activate soft MMU for this block */
1048 cpu_resume_from_signal(env
, puc
);
1050 /* never comes here */
1054 #elif defined (TARGET_SH4)
1055 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1056 int is_write
, sigset_t
*old_set
,
1059 TranslationBlock
*tb
;
1063 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1064 #if defined(DEBUG_SIGNAL)
1065 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1066 pc
, address
, is_write
, *(unsigned long *)old_set
);
1068 /* XXX: locking issue */
1069 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1073 /* see if it is an MMU fault */
1074 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1076 return 0; /* not an MMU fault */
1078 return 1; /* the MMU fault was handled without causing real CPU fault */
1080 /* now we have a real cpu fault */
1081 tb
= tb_find_pc(pc
);
1083 /* the PC is inside the translated code. It means that we have
1084 a virtual CPU fault */
1085 cpu_restore_state(tb
, env
, pc
, puc
);
1088 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1089 env
->nip
, env
->error_code
, tb
);
1091 /* we restore the process signal mask as the sigreturn should
1092 do it (XXX: use sigsetjmp) */
1093 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1095 /* never comes here */
1099 #elif defined (TARGET_ALPHA)
1100 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1101 int is_write
, sigset_t
*old_set
,
1104 TranslationBlock
*tb
;
1108 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1109 #if defined(DEBUG_SIGNAL)
1110 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1111 pc
, address
, is_write
, *(unsigned long *)old_set
);
1113 /* XXX: locking issue */
1114 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1118 /* see if it is an MMU fault */
1119 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1121 return 0; /* not an MMU fault */
1123 return 1; /* the MMU fault was handled without causing real CPU fault */
1125 /* now we have a real cpu fault */
1126 tb
= tb_find_pc(pc
);
1128 /* the PC is inside the translated code. It means that we have
1129 a virtual CPU fault */
1130 cpu_restore_state(tb
, env
, pc
, puc
);
1133 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1134 env
->nip
, env
->error_code
, tb
);
1136 /* we restore the process signal mask as the sigreturn should
1137 do it (XXX: use sigsetjmp) */
1138 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1140 /* never comes here */
1143 #elif defined (TARGET_CRIS)
1144 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1145 int is_write
, sigset_t
*old_set
,
1148 TranslationBlock
*tb
;
1152 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1153 #if defined(DEBUG_SIGNAL)
1154 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1155 pc
, address
, is_write
, *(unsigned long *)old_set
);
1157 /* XXX: locking issue */
1158 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1162 /* see if it is an MMU fault */
1163 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1165 return 0; /* not an MMU fault */
1167 return 1; /* the MMU fault was handled without causing real CPU fault */
1169 /* now we have a real cpu fault */
1170 tb
= tb_find_pc(pc
);
1172 /* the PC is inside the translated code. It means that we have
1173 a virtual CPU fault */
1174 cpu_restore_state(tb
, env
, pc
, puc
);
1176 /* we restore the process signal mask as the sigreturn should
1177 do it (XXX: use sigsetjmp) */
1178 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1180 /* never comes here */
1185 #error unsupported target CPU
1188 #if defined(__i386__)
1190 #if defined(__APPLE__)
1191 # include <sys/ucontext.h>
1193 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1194 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1195 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1197 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1198 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1199 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1202 int cpu_signal_handler(int host_signum
, void *pinfo
,
1205 siginfo_t
*info
= pinfo
;
1206 struct ucontext
*uc
= puc
;
1214 #define REG_TRAPNO TRAPNO
1217 trapno
= TRAP_sig(uc
);
1218 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1220 (ERROR_sig(uc
) >> 1) & 1 : 0,
1221 &uc
->uc_sigmask
, puc
);
1224 #elif defined(__x86_64__)
1226 int cpu_signal_handler(int host_signum
, void *pinfo
,
1229 siginfo_t
*info
= pinfo
;
1230 struct ucontext
*uc
= puc
;
1233 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1234 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1235 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1236 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1237 &uc
->uc_sigmask
, puc
);
1240 #elif defined(__powerpc__)
1242 /***********************************************************************
1243 * signal context platform-specific definitions
1247 /* All Registers access - only for local access */
1248 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1249 /* Gpr Registers access */
1250 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1251 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1252 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1253 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1254 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1255 # define LR_sig(context) REG_sig(link, context) /* Link register */
1256 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1257 /* Float Registers access */
1258 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1259 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1260 /* Exception Registers access */
1261 # define DAR_sig(context) REG_sig(dar, context)
1262 # define DSISR_sig(context) REG_sig(dsisr, context)
1263 # define TRAP_sig(context) REG_sig(trap, context)
1267 # include <sys/ucontext.h>
1268 typedef struct ucontext SIGCONTEXT
;
1269 /* All Registers access - only for local access */
1270 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1271 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1272 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1273 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1274 /* Gpr Registers access */
1275 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1276 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1277 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1278 # define CTR_sig(context) REG_sig(ctr, context)
1279 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1280 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1281 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1282 /* Float Registers access */
1283 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1284 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1285 /* Exception Registers access */
1286 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1287 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1288 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1289 #endif /* __APPLE__ */
1291 int cpu_signal_handler(int host_signum
, void *pinfo
,
1294 siginfo_t
*info
= pinfo
;
1295 struct ucontext
*uc
= puc
;
1303 if (DSISR_sig(uc
) & 0x00800000)
1306 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1309 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1310 is_write
, &uc
->uc_sigmask
, puc
);
1313 #elif defined(__alpha__)
1315 int cpu_signal_handler(int host_signum
, void *pinfo
,
1318 siginfo_t
*info
= pinfo
;
1319 struct ucontext
*uc
= puc
;
1320 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1321 uint32_t insn
= *pc
;
1324 /* XXX: need kernel patch to get write flag faster */
1325 switch (insn
>> 26) {
1340 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1341 is_write
, &uc
->uc_sigmask
, puc
);
1343 #elif defined(__sparc__)
1345 int cpu_signal_handler(int host_signum
, void *pinfo
,
1348 siginfo_t
*info
= pinfo
;
1351 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1352 uint32_t *regs
= (uint32_t *)(info
+ 1);
1353 void *sigmask
= (regs
+ 20);
1354 /* XXX: is there a standard glibc define ? */
1355 unsigned long pc
= regs
[1];
1358 struct sigcontext
*sc
= puc
;
1359 unsigned long pc
= sc
->sigc_regs
.tpc
;
1360 void *sigmask
= (void *)sc
->sigc_mask
;
1361 #elif defined(__OpenBSD__)
1362 struct sigcontext
*uc
= puc
;
1363 unsigned long pc
= uc
->sc_pc
;
1364 void *sigmask
= (void *)(long)uc
->sc_mask
;
1368 /* XXX: need kernel patch to get write flag faster */
1370 insn
= *(uint32_t *)pc
;
1371 if ((insn
>> 30) == 3) {
1372 switch((insn
>> 19) & 0x3f) {
1384 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1385 is_write
, sigmask
, NULL
);
1388 #elif defined(__arm__)
1390 int cpu_signal_handler(int host_signum
, void *pinfo
,
1393 siginfo_t
*info
= pinfo
;
1394 struct ucontext
*uc
= puc
;
1398 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1399 pc
= uc
->uc_mcontext
.gregs
[R15
];
1401 pc
= uc
->uc_mcontext
.arm_pc
;
1403 /* XXX: compute is_write */
1405 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1407 &uc
->uc_sigmask
, puc
);
1410 #elif defined(__mc68000)
1412 int cpu_signal_handler(int host_signum
, void *pinfo
,
1415 siginfo_t
*info
= pinfo
;
1416 struct ucontext
*uc
= puc
;
1420 pc
= uc
->uc_mcontext
.gregs
[16];
1421 /* XXX: compute is_write */
1423 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1425 &uc
->uc_sigmask
, puc
);
1428 #elif defined(__ia64)
1431 /* This ought to be in <bits/siginfo.h>... */
1432 # define __ISR_VALID 1
1435 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1437 siginfo_t
*info
= pinfo
;
1438 struct ucontext
*uc
= puc
;
1442 ip
= uc
->uc_mcontext
.sc_ip
;
1443 switch (host_signum
) {
1449 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1450 /* ISR.W (write-access) is bit 33: */
1451 is_write
= (info
->si_isr
>> 33) & 1;
1457 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1459 &uc
->uc_sigmask
, puc
);
1462 #elif defined(__s390__)
1464 int cpu_signal_handler(int host_signum
, void *pinfo
,
1467 siginfo_t
*info
= pinfo
;
1468 struct ucontext
*uc
= puc
;
1472 pc
= uc
->uc_mcontext
.psw
.addr
;
1473 /* XXX: compute is_write */
1475 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1476 is_write
, &uc
->uc_sigmask
, puc
);
1479 #elif defined(__mips__)
1481 int cpu_signal_handler(int host_signum
, void *pinfo
,
1484 siginfo_t
*info
= pinfo
;
1485 struct ucontext
*uc
= puc
;
1486 greg_t pc
= uc
->uc_mcontext
.pc
;
1489 /* XXX: compute is_write */
1491 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1492 is_write
, &uc
->uc_sigmask
, puc
);
1495 #elif defined(__hppa__)
1497 int cpu_signal_handler(int host_signum
, void *pinfo
,
1500 struct siginfo
*info
= pinfo
;
1501 struct ucontext
*uc
= puc
;
1505 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1506 /* FIXME: compute is_write */
1508 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1510 &uc
->uc_sigmask
, puc
);
1515 #error host CPU specific signal handler needed
1519 #endif /* !defined(CONFIG_SOFTMMU) */