2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
24 #if !defined(TARGET_IA64)
29 #if !defined(CONFIG_SOFTMMU)
41 #include <sys/ucontext.h>
47 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 // Work around ugly bugs in glibc that mangle global register contents
50 #define env cpu_single_env
53 int tb_invalidated_flag
;
56 //#define DEBUG_SIGNAL
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
63 longjmp(env
->jmp_env
, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
71 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext
*uc
= puc
;
74 #elif defined(__OpenBSD__)
75 struct sigcontext
*uc
= puc
;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
85 /* XXX: use siglongjmp ? */
87 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
93 longjmp(env
->jmp_env
, 1);
96 /* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
100 unsigned long next_tb
;
101 TranslationBlock
*tb
;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles
> CF_COUNT_MASK
)
106 max_cycles
= CF_COUNT_MASK
;
108 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
110 env
->current_tb
= tb
;
111 /* execute the generated code */
112 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
114 if ((next_tb
& 3) == 2) {
115 /* Restore PC. This may happen if async event occurs before
116 the TB starts executing. */
117 CPU_PC_FROM_TB(env
, tb
);
119 tb_phys_invalidate(tb
, -1);
123 static TranslationBlock
*tb_find_slow(target_ulong pc
,
124 target_ulong cs_base
,
127 TranslationBlock
*tb
, **ptb1
;
129 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
131 tb_invalidated_flag
= 0;
133 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135 /* find translated block using physical mappings */
136 phys_pc
= get_phys_addr_code(env
, pc
);
137 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
139 h
= tb_phys_hash_func(phys_pc
);
140 ptb1
= &tb_phys_hash
[h
];
146 tb
->page_addr
[0] == phys_page1
&&
147 tb
->cs_base
== cs_base
&&
148 tb
->flags
== flags
) {
149 /* check next page if needed */
150 if (tb
->page_addr
[1] != -1) {
151 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
153 phys_page2
= get_phys_addr_code(env
, virt_page2
);
154 if (tb
->page_addr
[1] == phys_page2
)
160 ptb1
= &tb
->phys_hash_next
;
163 /* if no translated code available, then translate it now */
164 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
167 /* we add the TB in the virtual pc hash table */
168 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
172 static inline TranslationBlock
*tb_find_fast(void)
174 TranslationBlock
*tb
;
175 target_ulong cs_base
, pc
;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
181 #if defined(TARGET_I386)
183 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
184 cs_base
= env
->segs
[R_CS
].base
;
185 pc
= cs_base
+ env
->eip
;
186 #elif defined(TARGET_ARM)
187 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
188 | (env
->vfp
.vec_stride
<< 4);
189 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
191 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
193 flags
|= (env
->condexec_bits
<< 8);
196 #elif defined(TARGET_SPARC)
197 #ifdef TARGET_SPARC64
198 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
199 flags
= ((env
->pstate
& PS_AM
) << 2)
200 | (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
201 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
203 // FPU enable . Supervisor
204 flags
= (env
->psref
<< 4) | env
->psrs
;
208 #elif defined(TARGET_PPC)
212 #elif defined(TARGET_MIPS)
213 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
215 pc
= env
->active_tc
.PC
;
216 #elif defined(TARGET_M68K)
217 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
218 | (env
->sr
& SR_S
) /* Bit 13 */
219 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
222 #elif defined(TARGET_SH4)
223 flags
= (env
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
224 | DELAY_SLOT_TRUE
| DELAY_SLOT_CLEARME
)) /* Bits 0- 3 */
225 | (env
->fpscr
& (FPSCR_FR
| FPSCR_SZ
| FPSCR_PR
)) /* Bits 19-21 */
226 | (env
->sr
& (SR_MD
| SR_RB
)); /* Bits 29-30 */
229 #elif defined(TARGET_ALPHA)
233 #elif defined(TARGET_CRIS)
234 flags
= env
->pregs
[PR_CCS
] & (S_FLAG
| P_FLAG
| U_FLAG
| X_FLAG
);
238 #elif defined(TARGET_IA64)
240 cs_base
= 0; /* XXXXX */
243 #error unsupported CPU
245 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
246 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
247 tb
->flags
!= flags
)) {
248 tb
= tb_find_slow(pc
, cs_base
, flags
);
253 /* main execution loop */
255 int cpu_exec(CPUState
*env1
)
257 #define DECLARE_HOST_REGS 1
258 #include "hostregs_helper.h"
259 int ret
, interrupt_request
;
260 TranslationBlock
*tb
;
262 unsigned long next_tb
;
264 if (cpu_halted(env1
) == EXCP_HALTED
)
267 cpu_single_env
= env1
;
269 /* first we save global registers */
270 #define SAVE_HOST_REGS 1
271 #include "hostregs_helper.h"
275 #if defined(TARGET_I386)
276 /* put eflags in CPU temporary format */
277 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
278 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
279 CC_OP
= CC_OP_EFLAGS
;
280 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
281 #elif defined(TARGET_SPARC)
282 #elif defined(TARGET_M68K)
283 env
->cc_op
= CC_OP_FLAGS
;
284 env
->cc_dest
= env
->sr
& 0xf;
285 env
->cc_x
= (env
->sr
>> 4) & 1;
286 #elif defined(TARGET_ALPHA)
287 #elif defined(TARGET_ARM)
288 #elif defined(TARGET_PPC)
289 #elif defined(TARGET_MIPS)
290 #elif defined(TARGET_SH4)
291 #elif defined(TARGET_CRIS)
292 #elif defined(TARGET_IA64)
295 #error unsupported target CPU
297 env
->exception_index
= -1;
299 /* prepare setjmp context for exception handling */
301 if (setjmp(env
->jmp_env
) == 0) {
302 env
->current_tb
= NULL
;
303 /* if an exception is pending, we execute it here */
304 if (env
->exception_index
>= 0) {
305 if (env
->exception_index
>= EXCP_INTERRUPT
) {
306 /* exit request from the cpu execution loop */
307 ret
= env
->exception_index
;
309 } else if (env
->user_mode_only
) {
310 /* if user mode only, we simulate a fake exception
311 which will be handled outside the cpu execution
313 #if defined(TARGET_I386)
314 do_interrupt_user(env
->exception_index
,
315 env
->exception_is_int
,
317 env
->exception_next_eip
);
318 /* successfully delivered */
319 env
->old_exception
= -1;
321 ret
= env
->exception_index
;
324 #if defined(TARGET_I386)
325 /* simulate a real cpu exception. On i386, it can
326 trigger new exceptions, but we do not handle
327 double or triple faults yet. */
328 do_interrupt(env
->exception_index
,
329 env
->exception_is_int
,
331 env
->exception_next_eip
, 0);
332 /* successfully delivered */
333 env
->old_exception
= -1;
334 #elif defined(TARGET_PPC)
336 #elif defined(TARGET_MIPS)
338 #elif defined(TARGET_SPARC)
340 #elif defined(TARGET_ARM)
342 #elif defined(TARGET_SH4)
344 #elif defined(TARGET_ALPHA)
346 #elif defined(TARGET_CRIS)
348 #elif defined(TARGET_M68K)
350 #elif defined(TARGET_IA64)
354 env
->exception_index
= -1;
357 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
359 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
360 ret
= kqemu_cpu_exec(env
);
361 /* put eflags in CPU temporary format */
362 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
363 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
364 CC_OP
= CC_OP_EFLAGS
;
365 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
368 longjmp(env
->jmp_env
, 1);
369 } else if (ret
== 2) {
370 /* softmmu execution needed */
372 if (env
->interrupt_request
!= 0) {
373 /* hardware interrupt will be executed just after */
375 /* otherwise, we restart */
376 longjmp(env
->jmp_env
, 1);
382 /* kvm vcpu threads */
385 longjmp(env
->jmp_env
, 1);
390 longjmp(env
->jmp_env
, 1);
393 next_tb
= 0; /* force lookup of first TB */
395 interrupt_request
= env
->interrupt_request
;
396 if (unlikely(interrupt_request
)) {
397 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
398 /* Mask out external interrupts for this step. */
399 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
404 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
405 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
406 env
->exception_index
= EXCP_DEBUG
;
409 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
410 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
411 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
412 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
414 env
->exception_index
= EXCP_HLT
;
418 #if defined(TARGET_I386)
419 if (env
->hflags2
& HF2_GIF_MASK
) {
420 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
421 !(env
->hflags
& HF_SMM_MASK
)) {
422 svm_check_intercept(SVM_EXIT_SMI
);
423 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
426 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
427 !(env
->hflags2
& HF2_NMI_MASK
)) {
428 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
429 env
->hflags2
|= HF2_NMI_MASK
;
430 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
432 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
433 (((env
->hflags2
& HF2_VINTR_MASK
) &&
434 (env
->hflags2
& HF2_HIF_MASK
)) ||
435 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
436 (env
->eflags
& IF_MASK
&&
437 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
439 svm_check_intercept(SVM_EXIT_INTR
);
440 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
441 intno
= cpu_get_pic_interrupt(env
);
442 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
443 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
445 do_interrupt(intno
, 0, 0, 0, 1);
446 /* ensure that no TB jump will be modified as
447 the program flow was changed */
449 #if !defined(CONFIG_USER_ONLY)
450 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
451 (env
->eflags
& IF_MASK
) &&
452 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
454 /* FIXME: this should respect TPR */
455 svm_check_intercept(SVM_EXIT_VINTR
);
456 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
457 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
458 if (loglevel
& CPU_LOG_TB_IN_ASM
)
459 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
460 do_interrupt(intno
, 0, 0, 0, 1);
465 #elif defined(TARGET_PPC)
467 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
471 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
472 ppc_hw_interrupt(env
);
473 if (env
->pending_interrupts
== 0)
474 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
477 #elif defined(TARGET_MIPS)
478 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
479 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
480 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
481 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
482 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
483 !(env
->hflags
& MIPS_HFLAG_DM
)) {
485 env
->exception_index
= EXCP_EXT_INTERRUPT
;
490 #elif defined(TARGET_SPARC)
491 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
493 int pil
= env
->interrupt_index
& 15;
494 int type
= env
->interrupt_index
& 0xf0;
496 if (((type
== TT_EXTINT
) &&
497 (pil
== 15 || pil
> env
->psrpil
)) ||
499 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
500 env
->exception_index
= env
->interrupt_index
;
502 env
->interrupt_index
= 0;
503 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
508 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
509 //do_interrupt(0, 0, 0, 0, 0);
510 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
512 #elif defined(TARGET_ARM)
513 if (interrupt_request
& CPU_INTERRUPT_FIQ
514 && !(env
->uncached_cpsr
& CPSR_F
)) {
515 env
->exception_index
= EXCP_FIQ
;
519 /* ARMv7-M interrupt return works by loading a magic value
520 into the PC. On real hardware the load causes the
521 return to occur. The qemu implementation performs the
522 jump normally, then does the exception return when the
523 CPU tries to execute code at the magic address.
524 This will cause the magic PC value to be pushed to
525 the stack if an interrupt occured at the wrong time.
526 We avoid this by disabling interrupts when
527 pc contains a magic address. */
528 if (interrupt_request
& CPU_INTERRUPT_HARD
529 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
530 || !(env
->uncached_cpsr
& CPSR_I
))) {
531 env
->exception_index
= EXCP_IRQ
;
535 #elif defined(TARGET_SH4)
536 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
540 #elif defined(TARGET_ALPHA)
541 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
545 #elif defined(TARGET_CRIS)
546 if (interrupt_request
& CPU_INTERRUPT_HARD
547 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
548 env
->exception_index
= EXCP_IRQ
;
552 if (interrupt_request
& CPU_INTERRUPT_NMI
553 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
554 env
->exception_index
= EXCP_NMI
;
558 #elif defined(TARGET_M68K)
559 if (interrupt_request
& CPU_INTERRUPT_HARD
560 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
561 < env
->pending_level
) {
562 /* Real hardware gets the interrupt vector via an
563 IACK cycle at this point. Current emulated
564 hardware doesn't rely on this, so we
565 provide/save the vector when the interrupt is
567 env
->exception_index
= env
->pending_vector
;
572 /* Don't use the cached interupt_request value,
573 do_interrupt may have updated the EXITTB flag. */
574 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
575 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
576 /* ensure that no TB jump will be modified as
577 the program flow was changed */
580 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
581 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
582 env
->exception_index
= EXCP_INTERRUPT
;
587 if ((loglevel
& CPU_LOG_TB_CPU
)) {
588 /* restore flags in standard format */
590 #if defined(TARGET_I386)
591 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
592 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
593 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
594 #elif defined(TARGET_ARM)
595 cpu_dump_state(env
, logfile
, fprintf
, 0);
596 #elif defined(TARGET_SPARC)
597 cpu_dump_state(env
, logfile
, fprintf
, 0);
598 #elif defined(TARGET_PPC)
599 cpu_dump_state(env
, logfile
, fprintf
, 0);
600 #elif defined(TARGET_M68K)
601 cpu_m68k_flush_flags(env
, env
->cc_op
);
602 env
->cc_op
= CC_OP_FLAGS
;
603 env
->sr
= (env
->sr
& 0xffe0)
604 | env
->cc_dest
| (env
->cc_x
<< 4);
605 cpu_dump_state(env
, logfile
, fprintf
, 0);
606 #elif defined(TARGET_MIPS)
607 cpu_dump_state(env
, logfile
, fprintf
, 0);
608 #elif defined(TARGET_SH4)
609 cpu_dump_state(env
, logfile
, fprintf
, 0);
610 #elif defined(TARGET_ALPHA)
611 cpu_dump_state(env
, logfile
, fprintf
, 0);
612 #elif defined(TARGET_CRIS)
613 cpu_dump_state(env
, logfile
, fprintf
, 0);
615 #error unsupported target CPU
621 /* Note: we do it here to avoid a gcc bug on Mac OS X when
622 doing it in tb_find_slow */
623 if (tb_invalidated_flag
) {
624 /* as some TB could have been invalidated because
625 of memory exceptions while generating the code, we
626 must recompute the hash index here */
628 tb_invalidated_flag
= 0;
631 if ((loglevel
& CPU_LOG_EXEC
)) {
632 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
633 (long)tb
->tc_ptr
, tb
->pc
,
634 lookup_symbol(tb
->pc
));
637 /* see if we can patch the calling TB. When the TB
638 spans two pages, we cannot safely do a direct
643 (env
->kqemu_enabled
!= 2) &&
645 tb
->page_addr
[1] == -1) {
646 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
649 spin_unlock(&tb_lock
);
650 env
->current_tb
= tb
;
652 /* cpu_interrupt might be called while translating the
653 TB, but before it is linked into a potentially
654 infinite loop and becomes env->current_tb. Avoid
655 starting execution if there is a pending interrupt. */
656 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
657 env
->current_tb
= NULL
;
659 while (env
->current_tb
) {
661 /* execute the generated code */
662 #if defined(__sparc__) && !defined(HOST_SOLARIS)
664 env
= cpu_single_env
;
665 #define env cpu_single_env
667 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
668 env
->current_tb
= NULL
;
669 if ((next_tb
& 3) == 2) {
670 /* Instruction counter expired. */
672 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
674 CPU_PC_FROM_TB(env
, tb
);
675 insns_left
= env
->icount_decr
.u32
;
676 if (env
->icount_extra
&& insns_left
>= 0) {
677 /* Refill decrementer and continue execution. */
678 env
->icount_extra
+= insns_left
;
679 if (env
->icount_extra
> 0xffff) {
682 insns_left
= env
->icount_extra
;
684 env
->icount_extra
-= insns_left
;
685 env
->icount_decr
.u16
.low
= insns_left
;
687 if (insns_left
> 0) {
688 /* Execute remaining instructions. */
689 cpu_exec_nocache(insns_left
, tb
);
691 env
->exception_index
= EXCP_INTERRUPT
;
697 /* reset soft MMU for next block (it can currently
698 only be set by a memory fault) */
699 #if defined(USE_KQEMU)
700 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
701 if (kqemu_is_ok(env
) &&
702 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
713 #if defined(TARGET_I386)
714 /* restore flags in standard format */
715 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
716 #elif defined(TARGET_ARM)
717 /* XXX: Save/restore host fpu exception state?. */
718 #elif defined(TARGET_SPARC)
719 #elif defined(TARGET_PPC)
720 #elif defined(TARGET_M68K)
721 cpu_m68k_flush_flags(env
, env
->cc_op
);
722 env
->cc_op
= CC_OP_FLAGS
;
723 env
->sr
= (env
->sr
& 0xffe0)
724 | env
->cc_dest
| (env
->cc_x
<< 4);
725 #elif defined(TARGET_MIPS)
726 #elif defined(TARGET_SH4)
727 #elif defined(TARGET_IA64)
728 #elif defined(TARGET_ALPHA)
729 #elif defined(TARGET_CRIS)
732 #error unsupported target CPU
735 /* restore global registers */
736 #include "hostregs_helper.h"
738 /* fail safe : never use cpu_single_env outside cpu_exec() */
739 cpu_single_env
= NULL
;
743 /* must only be called from the generated code as an exception can be
745 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
747 /* XXX: cannot enable it yet because it yields to MMU exception
748 where NIP != read address on PowerPC */
750 target_ulong phys_addr
;
751 phys_addr
= get_phys_addr_code(env
, start
);
752 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
756 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
758 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
760 CPUX86State
*saved_env
;
764 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
766 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
767 (selector
<< 4), 0xffff, 0);
769 helper_load_seg(seg_reg
, selector
);
774 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
776 CPUX86State
*saved_env
;
781 helper_fsave(ptr
, data32
);
786 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
788 CPUX86State
*saved_env
;
793 helper_frstor(ptr
, data32
);
798 #endif /* TARGET_I386 */
800 #if !defined(CONFIG_SOFTMMU)
802 #if defined(TARGET_I386)
804 /* 'pc' is the host PC at which the exception was raised. 'address' is
805 the effective address of the memory exception. 'is_write' is 1 if a
806 write caused the exception and otherwise 0'. 'old_set' is the
807 signal set which should be restored */
808 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
809 int is_write
, sigset_t
*old_set
,
812 TranslationBlock
*tb
;
816 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
817 #if defined(DEBUG_SIGNAL)
818 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
819 pc
, address
, is_write
, *(unsigned long *)old_set
);
821 /* XXX: locking issue */
822 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
826 /* see if it is an MMU fault */
827 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
829 return 0; /* not an MMU fault */
831 return 1; /* the MMU fault was handled without causing real CPU fault */
832 /* now we have a real cpu fault */
835 /* the PC is inside the translated code. It means that we have
836 a virtual CPU fault */
837 cpu_restore_state(tb
, env
, pc
, puc
);
841 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
842 env
->eip
, env
->cr
[2], env
->error_code
);
844 /* we restore the process signal mask as the sigreturn should
845 do it (XXX: use sigsetjmp) */
846 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
847 raise_exception_err(env
->exception_index
, env
->error_code
);
849 /* activate soft MMU for this block */
850 env
->hflags
|= HF_SOFTMMU_MASK
;
851 cpu_resume_from_signal(env
, puc
);
853 /* never comes here */
857 #elif defined(TARGET_ARM)
858 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
859 int is_write
, sigset_t
*old_set
,
862 TranslationBlock
*tb
;
866 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
867 #if defined(DEBUG_SIGNAL)
868 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
869 pc
, address
, is_write
, *(unsigned long *)old_set
);
871 /* XXX: locking issue */
872 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
875 /* see if it is an MMU fault */
876 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
878 return 0; /* not an MMU fault */
880 return 1; /* the MMU fault was handled without causing real CPU fault */
881 /* now we have a real cpu fault */
884 /* the PC is inside the translated code. It means that we have
885 a virtual CPU fault */
886 cpu_restore_state(tb
, env
, pc
, puc
);
888 /* we restore the process signal mask as the sigreturn should
889 do it (XXX: use sigsetjmp) */
890 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
892 /* never comes here */
895 #elif defined(TARGET_SPARC)
896 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
897 int is_write
, sigset_t
*old_set
,
900 TranslationBlock
*tb
;
904 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
905 #if defined(DEBUG_SIGNAL)
906 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
907 pc
, address
, is_write
, *(unsigned long *)old_set
);
909 /* XXX: locking issue */
910 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
913 /* see if it is an MMU fault */
914 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
916 return 0; /* not an MMU fault */
918 return 1; /* the MMU fault was handled without causing real CPU fault */
919 /* now we have a real cpu fault */
922 /* the PC is inside the translated code. It means that we have
923 a virtual CPU fault */
924 cpu_restore_state(tb
, env
, pc
, puc
);
926 /* we restore the process signal mask as the sigreturn should
927 do it (XXX: use sigsetjmp) */
928 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
930 /* never comes here */
933 #elif defined (TARGET_PPC)
934 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
935 int is_write
, sigset_t
*old_set
,
938 TranslationBlock
*tb
;
942 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
943 #if defined(DEBUG_SIGNAL)
944 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
945 pc
, address
, is_write
, *(unsigned long *)old_set
);
947 /* XXX: locking issue */
948 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
952 /* see if it is an MMU fault */
953 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
955 return 0; /* not an MMU fault */
957 return 1; /* the MMU fault was handled without causing real CPU fault */
959 /* now we have a real cpu fault */
962 /* the PC is inside the translated code. It means that we have
963 a virtual CPU fault */
964 cpu_restore_state(tb
, env
, pc
, puc
);
968 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
969 env
->nip
, env
->error_code
, tb
);
971 /* we restore the process signal mask as the sigreturn should
972 do it (XXX: use sigsetjmp) */
973 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
974 do_raise_exception_err(env
->exception_index
, env
->error_code
);
976 /* activate soft MMU for this block */
977 cpu_resume_from_signal(env
, puc
);
979 /* never comes here */
983 #elif defined(TARGET_M68K)
984 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
985 int is_write
, sigset_t
*old_set
,
988 TranslationBlock
*tb
;
992 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
993 #if defined(DEBUG_SIGNAL)
994 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
995 pc
, address
, is_write
, *(unsigned long *)old_set
);
997 /* XXX: locking issue */
998 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1001 /* see if it is an MMU fault */
1002 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1004 return 0; /* not an MMU fault */
1006 return 1; /* the MMU fault was handled without causing real CPU fault */
1007 /* now we have a real cpu fault */
1008 tb
= tb_find_pc(pc
);
1010 /* the PC is inside the translated code. It means that we have
1011 a virtual CPU fault */
1012 cpu_restore_state(tb
, env
, pc
, puc
);
1014 /* we restore the process signal mask as the sigreturn should
1015 do it (XXX: use sigsetjmp) */
1016 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1018 /* never comes here */
1022 #elif defined (TARGET_MIPS)
1023 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1024 int is_write
, sigset_t
*old_set
,
1027 TranslationBlock
*tb
;
1031 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1032 #if defined(DEBUG_SIGNAL)
1033 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1034 pc
, address
, is_write
, *(unsigned long *)old_set
);
1036 /* XXX: locking issue */
1037 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1041 /* see if it is an MMU fault */
1042 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1044 return 0; /* not an MMU fault */
1046 return 1; /* the MMU fault was handled without causing real CPU fault */
1048 /* now we have a real cpu fault */
1049 tb
= tb_find_pc(pc
);
1051 /* the PC is inside the translated code. It means that we have
1052 a virtual CPU fault */
1053 cpu_restore_state(tb
, env
, pc
, puc
);
1057 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1058 env
->PC
, env
->error_code
, tb
);
1060 /* we restore the process signal mask as the sigreturn should
1061 do it (XXX: use sigsetjmp) */
1062 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1063 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1065 /* activate soft MMU for this block */
1066 cpu_resume_from_signal(env
, puc
);
1068 /* never comes here */
1072 #elif defined (TARGET_SH4)
1073 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1074 int is_write
, sigset_t
*old_set
,
1077 TranslationBlock
*tb
;
1081 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1082 #if defined(DEBUG_SIGNAL)
1083 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1084 pc
, address
, is_write
, *(unsigned long *)old_set
);
1086 /* XXX: locking issue */
1087 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1091 /* see if it is an MMU fault */
1092 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1094 return 0; /* not an MMU fault */
1096 return 1; /* the MMU fault was handled without causing real CPU fault */
1098 /* now we have a real cpu fault */
1099 tb
= tb_find_pc(pc
);
1101 /* the PC is inside the translated code. It means that we have
1102 a virtual CPU fault */
1103 cpu_restore_state(tb
, env
, pc
, puc
);
1106 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1107 env
->nip
, env
->error_code
, tb
);
1109 /* we restore the process signal mask as the sigreturn should
1110 do it (XXX: use sigsetjmp) */
1111 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1113 /* never comes here */
1117 #elif defined (TARGET_ALPHA)
1118 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1119 int is_write
, sigset_t
*old_set
,
1122 TranslationBlock
*tb
;
1126 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1127 #if defined(DEBUG_SIGNAL)
1128 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1129 pc
, address
, is_write
, *(unsigned long *)old_set
);
1131 /* XXX: locking issue */
1132 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1136 /* see if it is an MMU fault */
1137 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1139 return 0; /* not an MMU fault */
1141 return 1; /* the MMU fault was handled without causing real CPU fault */
1143 /* now we have a real cpu fault */
1144 tb
= tb_find_pc(pc
);
1146 /* the PC is inside the translated code. It means that we have
1147 a virtual CPU fault */
1148 cpu_restore_state(tb
, env
, pc
, puc
);
1151 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1152 env
->nip
, env
->error_code
, tb
);
1154 /* we restore the process signal mask as the sigreturn should
1155 do it (XXX: use sigsetjmp) */
1156 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1158 /* never comes here */
1161 #elif defined (TARGET_CRIS)
1162 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1163 int is_write
, sigset_t
*old_set
,
1166 TranslationBlock
*tb
;
1170 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1171 #if defined(DEBUG_SIGNAL)
1172 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1173 pc
, address
, is_write
, *(unsigned long *)old_set
);
1175 /* XXX: locking issue */
1176 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1180 /* see if it is an MMU fault */
1181 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1183 return 0; /* not an MMU fault */
1185 return 1; /* the MMU fault was handled without causing real CPU fault */
1187 /* now we have a real cpu fault */
1188 tb
= tb_find_pc(pc
);
1190 /* the PC is inside the translated code. It means that we have
1191 a virtual CPU fault */
1192 cpu_restore_state(tb
, env
, pc
, puc
);
1194 /* we restore the process signal mask as the sigreturn should
1195 do it (XXX: use sigsetjmp) */
1196 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1198 /* never comes here */
1203 #error unsupported target CPU
1206 #if defined(__i386__)
1208 #if defined(__APPLE__)
1209 # include <sys/ucontext.h>
1211 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1212 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1213 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1215 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1216 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1217 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1220 int cpu_signal_handler(int host_signum
, void *pinfo
,
1223 siginfo_t
*info
= pinfo
;
1224 struct ucontext
*uc
= puc
;
1232 #define REG_TRAPNO TRAPNO
1235 trapno
= TRAP_sig(uc
);
1236 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1238 (ERROR_sig(uc
) >> 1) & 1 : 0,
1239 &uc
->uc_sigmask
, puc
);
1242 #elif defined(__x86_64__)
1244 int cpu_signal_handler(int host_signum
, void *pinfo
,
1247 siginfo_t
*info
= pinfo
;
1248 struct ucontext
*uc
= puc
;
1251 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1252 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1253 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1254 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1255 &uc
->uc_sigmask
, puc
);
1258 #elif defined(__powerpc__)
1260 /***********************************************************************
1261 * signal context platform-specific definitions
1265 /* All Registers access - only for local access */
1266 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1267 /* Gpr Registers access */
1268 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1269 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1270 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1271 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1272 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1273 # define LR_sig(context) REG_sig(link, context) /* Link register */
1274 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1275 /* Float Registers access */
1276 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1277 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1278 /* Exception Registers access */
1279 # define DAR_sig(context) REG_sig(dar, context)
1280 # define DSISR_sig(context) REG_sig(dsisr, context)
1281 # define TRAP_sig(context) REG_sig(trap, context)
1285 # include <sys/ucontext.h>
1286 typedef struct ucontext SIGCONTEXT
;
1287 /* All Registers access - only for local access */
1288 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1289 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1290 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1291 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1292 /* Gpr Registers access */
1293 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1294 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1295 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1296 # define CTR_sig(context) REG_sig(ctr, context)
1297 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1298 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1299 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1300 /* Float Registers access */
1301 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1302 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1303 /* Exception Registers access */
1304 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1305 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1306 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1307 #endif /* __APPLE__ */
1309 int cpu_signal_handler(int host_signum
, void *pinfo
,
1312 siginfo_t
*info
= pinfo
;
1313 struct ucontext
*uc
= puc
;
1321 if (DSISR_sig(uc
) & 0x00800000)
1324 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1327 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1328 is_write
, &uc
->uc_sigmask
, puc
);
1331 #elif defined(__alpha__)
1333 int cpu_signal_handler(int host_signum
, void *pinfo
,
1336 siginfo_t
*info
= pinfo
;
1337 struct ucontext
*uc
= puc
;
1338 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1339 uint32_t insn
= *pc
;
1342 /* XXX: need kernel patch to get write flag faster */
1343 switch (insn
>> 26) {
1358 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1359 is_write
, &uc
->uc_sigmask
, puc
);
1361 #elif defined(__sparc__)
1363 int cpu_signal_handler(int host_signum
, void *pinfo
,
1366 siginfo_t
*info
= pinfo
;
1369 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1370 uint32_t *regs
= (uint32_t *)(info
+ 1);
1371 void *sigmask
= (regs
+ 20);
1372 /* XXX: is there a standard glibc define ? */
1373 unsigned long pc
= regs
[1];
1376 struct sigcontext
*sc
= puc
;
1377 unsigned long pc
= sc
->sigc_regs
.tpc
;
1378 void *sigmask
= (void *)sc
->sigc_mask
;
1379 #elif defined(__OpenBSD__)
1380 struct sigcontext
*uc
= puc
;
1381 unsigned long pc
= uc
->sc_pc
;
1382 void *sigmask
= (void *)(long)uc
->sc_mask
;
1386 /* XXX: need kernel patch to get write flag faster */
1388 insn
= *(uint32_t *)pc
;
1389 if ((insn
>> 30) == 3) {
1390 switch((insn
>> 19) & 0x3f) {
1402 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1403 is_write
, sigmask
, NULL
);
1406 #elif defined(__arm__)
1408 int cpu_signal_handler(int host_signum
, void *pinfo
,
1411 siginfo_t
*info
= pinfo
;
1412 struct ucontext
*uc
= puc
;
1416 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1417 pc
= uc
->uc_mcontext
.gregs
[R15
];
1419 pc
= uc
->uc_mcontext
.arm_pc
;
1421 /* XXX: compute is_write */
1423 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1425 &uc
->uc_sigmask
, puc
);
1428 #elif defined(__mc68000)
1430 int cpu_signal_handler(int host_signum
, void *pinfo
,
1433 siginfo_t
*info
= pinfo
;
1434 struct ucontext
*uc
= puc
;
1438 pc
= uc
->uc_mcontext
.gregs
[16];
1439 /* XXX: compute is_write */
1441 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1443 &uc
->uc_sigmask
, puc
);
1446 #elif defined(__ia64)
1449 /* This ought to be in <bits/siginfo.h>... */
1450 # define __ISR_VALID 1
1453 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1455 siginfo_t
*info
= pinfo
;
1456 struct ucontext
*uc
= puc
;
1460 ip
= uc
->uc_mcontext
.sc_ip
;
1461 switch (host_signum
) {
1467 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1468 /* ISR.W (write-access) is bit 33: */
1469 is_write
= (info
->si_isr
>> 33) & 1;
1475 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1477 &uc
->uc_sigmask
, puc
);
1480 #elif defined(__s390__)
1482 int cpu_signal_handler(int host_signum
, void *pinfo
,
1485 siginfo_t
*info
= pinfo
;
1486 struct ucontext
*uc
= puc
;
1490 pc
= uc
->uc_mcontext
.psw
.addr
;
1491 /* XXX: compute is_write */
1493 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1494 is_write
, &uc
->uc_sigmask
, puc
);
1497 #elif defined(__mips__)
1499 int cpu_signal_handler(int host_signum
, void *pinfo
,
1502 siginfo_t
*info
= pinfo
;
1503 struct ucontext
*uc
= puc
;
1504 greg_t pc
= uc
->uc_mcontext
.pc
;
1507 /* XXX: compute is_write */
1509 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1510 is_write
, &uc
->uc_sigmask
, puc
);
1513 #elif defined(__hppa__)
1515 int cpu_signal_handler(int host_signum
, void *pinfo
,
1518 struct siginfo
*info
= pinfo
;
1519 struct ucontext
*uc
= puc
;
1523 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1524 /* FIXME: compute is_write */
1526 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1528 &uc
->uc_sigmask
, puc
);
1533 #error host CPU specific signal handler needed
1537 #endif /* !defined(CONFIG_SOFTMMU) */