2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
40 extern int kvm_allowed
;
43 int tb_invalidated_flag
;
46 //#define DEBUG_SIGNAL
48 void cpu_loop_exit(void)
50 /* NOTE: the register at this point must be saved by hand because
51 longjmp restore them */
53 longjmp(env
->jmp_env
, 1);
56 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
60 /* exit the current TB from a signal handler. The host registers are
61 restored in a state compatible with the CPU emulator
63 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
65 #if !defined(CONFIG_SOFTMMU)
66 struct ucontext
*uc
= puc
;
71 /* XXX: restore cpu registers saved in host registers */
73 #if !defined(CONFIG_SOFTMMU)
75 /* XXX: use siglongjmp ? */
76 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
79 longjmp(env
->jmp_env
, 1);
83 static TranslationBlock
*tb_find_slow(target_ulong pc
,
87 TranslationBlock
*tb
, **ptb1
;
90 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
95 tb_invalidated_flag
= 0;
97 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
99 /* find translated block using physical mappings */
100 phys_pc
= get_phys_addr_code(env
, pc
);
101 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
103 h
= tb_phys_hash_func(phys_pc
);
104 ptb1
= &tb_phys_hash
[h
];
110 tb
->page_addr
[0] == phys_page1
&&
111 tb
->cs_base
== cs_base
&&
112 tb
->flags
== flags
) {
113 /* check next page if needed */
114 if (tb
->page_addr
[1] != -1) {
115 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
117 phys_page2
= get_phys_addr_code(env
, virt_page2
);
118 if (tb
->page_addr
[1] == phys_page2
)
124 ptb1
= &tb
->phys_hash_next
;
127 /* if no translated code available, then translate it now */
130 /* flush must be done */
132 /* cannot fail at this point */
134 /* don't forget to invalidate previous TB info */
135 tb_invalidated_flag
= 1;
137 tc_ptr
= code_gen_ptr
;
139 tb
->cs_base
= cs_base
;
141 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
142 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
144 /* check next page if needed */
145 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
147 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
148 phys_page2
= get_phys_addr_code(env
, virt_page2
);
150 tb_link_phys(tb
, phys_pc
, phys_page2
);
153 /* we add the TB in the virtual pc hash table */
154 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
155 spin_unlock(&tb_lock
);
159 static inline TranslationBlock
*tb_find_fast(void)
161 TranslationBlock
*tb
;
162 target_ulong cs_base
, pc
;
165 /* we record a subset of the CPU state. It will
166 always be the same before a given translated block
168 #if defined(TARGET_I386)
170 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
171 flags
|= env
->intercept
;
172 cs_base
= env
->segs
[R_CS
].base
;
173 pc
= cs_base
+ env
->eip
;
174 #elif defined(TARGET_ARM)
175 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
176 | (env
->vfp
.vec_stride
<< 4);
177 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
179 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
183 #elif defined(TARGET_SPARC)
184 #ifdef TARGET_SPARC64
185 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
186 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
187 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
189 // FPU enable . MMU Boot . MMU enabled . MMU no-fault . Supervisor
190 flags
= (env
->psref
<< 4) | (((env
->mmuregs
[0] & MMU_BM
) >> 14) << 3)
191 | ((env
->mmuregs
[0] & (MMU_E
| MMU_NF
)) << 1)
196 #elif defined(TARGET_PPC)
200 #elif defined(TARGET_MIPS)
201 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
203 pc
= env
->PC
[env
->current_tc
];
204 #elif defined(TARGET_M68K)
205 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
206 | (env
->sr
& SR_S
) /* Bit 13 */
207 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
210 #elif defined(TARGET_SH4)
211 flags
= env
->sr
& (SR_MD
| SR_RB
);
212 cs_base
= 0; /* XXXXX */
214 #elif defined(TARGET_ALPHA)
219 #error unsupported CPU
221 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
222 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
223 tb
->flags
!= flags
, 0)) {
224 tb
= tb_find_slow(pc
, cs_base
, flags
);
225 /* Note: we do it here to avoid a gcc bug on Mac OS X when
226 doing it in tb_find_slow */
227 if (tb_invalidated_flag
) {
228 /* as some TB could have been invalidated because
229 of memory exceptions while generating the code, we
230 must recompute the hash index here */
238 /* main execution loop */
240 int cpu_exec(CPUState
*env1
)
242 #define DECLARE_HOST_REGS 1
243 #include "hostregs_helper.h"
244 #if defined(TARGET_SPARC)
245 #if defined(reg_REGWPTR)
246 uint32_t *saved_regwptr
;
249 #if defined(__sparc__) && !defined(HOST_SOLARIS)
253 int ret
, interrupt_request
;
254 void (*gen_func
)(void);
255 TranslationBlock
*tb
;
258 if (cpu_halted(env1
) == EXCP_HALTED
)
261 cpu_single_env
= env1
;
263 /* first we save global registers */
264 #define SAVE_HOST_REGS 1
265 #include "hostregs_helper.h"
267 #if defined(__sparc__) && !defined(HOST_SOLARIS)
268 /* we also save i7 because longjmp may not restore it */
269 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
273 #if defined(TARGET_I386)
274 /* put eflags in CPU temporary format */
275 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
276 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
277 CC_OP
= CC_OP_EFLAGS
;
278 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
279 #elif defined(TARGET_SPARC)
280 #if defined(reg_REGWPTR)
281 saved_regwptr
= REGWPTR
;
283 #elif defined(TARGET_M68K)
284 env
->cc_op
= CC_OP_FLAGS
;
285 env
->cc_dest
= env
->sr
& 0xf;
286 env
->cc_x
= (env
->sr
>> 4) & 1;
287 #elif defined(TARGET_ALPHA)
288 #elif defined(TARGET_ARM)
289 #elif defined(TARGET_PPC)
290 #elif defined(TARGET_MIPS)
291 #elif defined(TARGET_SH4)
294 #error unsupported target CPU
296 env
->exception_index
= -1;
298 /* prepare setjmp context for exception handling */
300 if (setjmp(env
->jmp_env
) == 0) {
301 env
->current_tb
= NULL
;
302 /* if an exception is pending, we execute it here */
303 if (env
->exception_index
>= 0) {
304 if (env
->exception_index
>= EXCP_INTERRUPT
) {
305 /* exit request from the cpu execution loop */
306 ret
= env
->exception_index
;
308 } else if (env
->user_mode_only
) {
309 /* if user mode only, we simulate a fake exception
310 which will be handled outside the cpu execution
312 #if defined(TARGET_I386)
313 do_interrupt_user(env
->exception_index
,
314 env
->exception_is_int
,
316 env
->exception_next_eip
);
318 ret
= env
->exception_index
;
321 #if defined(TARGET_I386)
322 /* simulate a real cpu exception. On i386, it can
323 trigger new exceptions, but we do not handle
324 double or triple faults yet. */
325 do_interrupt(env
->exception_index
,
326 env
->exception_is_int
,
328 env
->exception_next_eip
, 0);
329 /* successfully delivered */
330 env
->old_exception
= -1;
331 #elif defined(TARGET_PPC)
333 #elif defined(TARGET_MIPS)
335 #elif defined(TARGET_SPARC)
336 do_interrupt(env
->exception_index
);
337 #elif defined(TARGET_ARM)
339 #elif defined(TARGET_SH4)
341 #elif defined(TARGET_ALPHA)
343 #elif defined(TARGET_M68K)
347 env
->exception_index
= -1;
350 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
352 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
353 ret
= kqemu_cpu_exec(env
);
354 /* put eflags in CPU temporary format */
355 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
356 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
357 CC_OP
= CC_OP_EFLAGS
;
358 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
361 longjmp(env
->jmp_env
, 1);
362 } else if (ret
== 2) {
363 /* softmmu execution needed */
365 if (env
->interrupt_request
!= 0) {
366 /* hardware interrupt will be executed just after */
368 /* otherwise, we restart */
369 longjmp(env
->jmp_env
, 1);
378 longjmp(env
->jmp_env
, 1);
381 T0
= 0; /* force lookup of first TB */
383 #if defined(__sparc__) && !defined(HOST_SOLARIS)
384 /* g1 can be modified by some libc? functions */
387 interrupt_request
= env
->interrupt_request
;
388 if (__builtin_expect(interrupt_request
, 0)
389 #if defined(TARGET_I386)
390 && env
->hflags
& HF_GIF_MASK
393 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
394 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
395 env
->exception_index
= EXCP_DEBUG
;
398 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
399 defined(TARGET_PPC) || defined(TARGET_ALPHA)
400 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
401 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
403 env
->exception_index
= EXCP_HLT
;
407 #if defined(TARGET_I386)
408 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
409 !(env
->hflags
& HF_SMM_MASK
)) {
410 svm_check_intercept(SVM_EXIT_SMI
);
411 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
413 #if defined(__sparc__) && !defined(HOST_SOLARIS)
418 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
419 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
420 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
422 svm_check_intercept(SVM_EXIT_INTR
);
423 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
424 intno
= cpu_get_pic_interrupt(env
);
425 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
426 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
428 do_interrupt(intno
, 0, 0, 0, 1);
429 /* ensure that no TB jump will be modified as
430 the program flow was changed */
431 #if defined(__sparc__) && !defined(HOST_SOLARIS)
436 #if !defined(CONFIG_USER_ONLY)
437 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
438 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
440 /* FIXME: this should respect TPR */
441 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
442 svm_check_intercept(SVM_EXIT_VINTR
);
443 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
444 if (loglevel
& CPU_LOG_TB_IN_ASM
)
445 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
446 do_interrupt(intno
, 0, 0, -1, 1);
447 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
448 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
449 #if defined(__sparc__) && !defined(HOST_SOLARIS)
456 #elif defined(TARGET_PPC)
458 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
462 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
463 ppc_hw_interrupt(env
);
464 if (env
->pending_interrupts
== 0)
465 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
466 #if defined(__sparc__) && !defined(HOST_SOLARIS)
472 #elif defined(TARGET_MIPS)
473 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
474 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
475 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
476 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
477 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
478 !(env
->hflags
& MIPS_HFLAG_DM
)) {
480 env
->exception_index
= EXCP_EXT_INTERRUPT
;
483 #if defined(__sparc__) && !defined(HOST_SOLARIS)
489 #elif defined(TARGET_SPARC)
490 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
492 int pil
= env
->interrupt_index
& 15;
493 int type
= env
->interrupt_index
& 0xf0;
495 if (((type
== TT_EXTINT
) &&
496 (pil
== 15 || pil
> env
->psrpil
)) ||
498 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
499 do_interrupt(env
->interrupt_index
);
500 env
->interrupt_index
= 0;
501 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
504 #if defined(__sparc__) && !defined(HOST_SOLARIS)
510 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
511 //do_interrupt(0, 0, 0, 0, 0);
512 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
514 #elif defined(TARGET_ARM)
515 if (interrupt_request
& CPU_INTERRUPT_FIQ
516 && !(env
->uncached_cpsr
& CPSR_F
)) {
517 env
->exception_index
= EXCP_FIQ
;
520 if (interrupt_request
& CPU_INTERRUPT_HARD
521 && !(env
->uncached_cpsr
& CPSR_I
)) {
522 env
->exception_index
= EXCP_IRQ
;
525 #elif defined(TARGET_SH4)
527 #elif defined(TARGET_ALPHA)
528 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
531 #elif defined(TARGET_M68K)
532 if (interrupt_request
& CPU_INTERRUPT_HARD
533 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
534 < env
->pending_level
) {
535 /* Real hardware gets the interrupt vector via an
536 IACK cycle at this point. Current emulated
537 hardware doesn't rely on this, so we
538 provide/save the vector when the interrupt is
540 env
->exception_index
= env
->pending_vector
;
544 /* Don't use the cached interupt_request value,
545 do_interrupt may have updated the EXITTB flag. */
546 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
547 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
548 /* ensure that no TB jump will be modified as
549 the program flow was changed */
550 #if defined(__sparc__) && !defined(HOST_SOLARIS)
556 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
557 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
558 env
->exception_index
= EXCP_INTERRUPT
;
563 if ((loglevel
& CPU_LOG_TB_CPU
)) {
564 /* restore flags in standard format */
566 #if defined(TARGET_I386)
567 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
568 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
569 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
570 #elif defined(TARGET_ARM)
571 cpu_dump_state(env
, logfile
, fprintf
, 0);
572 #elif defined(TARGET_SPARC)
573 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
574 env
->regwptr
= REGWPTR
;
575 cpu_dump_state(env
, logfile
, fprintf
, 0);
576 #elif defined(TARGET_PPC)
577 cpu_dump_state(env
, logfile
, fprintf
, 0);
578 #elif defined(TARGET_M68K)
579 cpu_m68k_flush_flags(env
, env
->cc_op
);
580 env
->cc_op
= CC_OP_FLAGS
;
581 env
->sr
= (env
->sr
& 0xffe0)
582 | env
->cc_dest
| (env
->cc_x
<< 4);
583 cpu_dump_state(env
, logfile
, fprintf
, 0);
584 #elif defined(TARGET_MIPS)
585 cpu_dump_state(env
, logfile
, fprintf
, 0);
586 #elif defined(TARGET_SH4)
587 cpu_dump_state(env
, logfile
, fprintf
, 0);
588 #elif defined(TARGET_ALPHA)
589 cpu_dump_state(env
, logfile
, fprintf
, 0);
591 #error unsupported target CPU
597 if ((loglevel
& CPU_LOG_EXEC
)) {
598 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
599 (long)tb
->tc_ptr
, tb
->pc
,
600 lookup_symbol(tb
->pc
));
603 #if defined(__sparc__) && !defined(HOST_SOLARIS)
606 /* see if we can patch the calling TB. When the TB
607 spans two pages, we cannot safely do a direct
612 (env
->kqemu_enabled
!= 2) &&
614 tb
->page_addr
[1] == -1
615 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
616 && (tb
->cflags
& CF_CODE_COPY
) ==
617 (((TranslationBlock
*)(T0
& ~3))->cflags
& CF_CODE_COPY
)
621 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
622 #if defined(USE_CODE_COPY)
623 /* propagates the FP use info */
624 ((TranslationBlock
*)(T0
& ~3))->cflags
|=
625 (tb
->cflags
& CF_FP_USED
);
627 spin_unlock(&tb_lock
);
631 env
->current_tb
= tb
;
632 /* execute the generated code */
633 gen_func
= (void *)tc_ptr
;
634 #if defined(__sparc__)
635 __asm__
__volatile__("call %0\n\t"
639 : "i0", "i1", "i2", "i3", "i4", "i5",
640 "o0", "o1", "o2", "o3", "o4", "o5",
641 "l0", "l1", "l2", "l3", "l4", "l5",
643 #elif defined(__arm__)
644 asm volatile ("mov pc, %0\n\t"
645 ".global exec_loop\n\t"
649 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
650 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
652 if (!(tb
->cflags
& CF_CODE_COPY
)) {
653 if ((tb
->cflags
& CF_FP_USED
) && env
->native_fp_regs
) {
654 save_native_fp_state(env
);
658 if ((tb
->cflags
& CF_FP_USED
) && !env
->native_fp_regs
) {
659 restore_native_fp_state(env
);
661 /* we work with native eflags */
662 CC_SRC
= cc_table
[CC_OP
].compute_all();
663 CC_OP
= CC_OP_EFLAGS
;
664 asm(".globl exec_loop\n"
669 " fs movl %11, %%eax\n"
670 " andl $0x400, %%eax\n"
671 " fs orl %8, %%eax\n"
674 " fs movl %%esp, %12\n"
675 " fs movl %0, %%eax\n"
676 " fs movl %1, %%ecx\n"
677 " fs movl %2, %%edx\n"
678 " fs movl %3, %%ebx\n"
679 " fs movl %4, %%esp\n"
680 " fs movl %5, %%ebp\n"
681 " fs movl %6, %%esi\n"
682 " fs movl %7, %%edi\n"
685 " fs movl %%esp, %4\n"
686 " fs movl %12, %%esp\n"
687 " fs movl %%eax, %0\n"
688 " fs movl %%ecx, %1\n"
689 " fs movl %%edx, %2\n"
690 " fs movl %%ebx, %3\n"
691 " fs movl %%ebp, %5\n"
692 " fs movl %%esi, %6\n"
693 " fs movl %%edi, %7\n"
696 " movl %%eax, %%ecx\n"
697 " andl $0x400, %%ecx\n"
699 " andl $0x8d5, %%eax\n"
700 " fs movl %%eax, %8\n"
702 " subl %%ecx, %%eax\n"
703 " fs movl %%eax, %11\n"
704 " fs movl %9, %%ebx\n" /* get T0 value */
707 : "m" (*(uint8_t *)offsetof(CPUState
, regs
[0])),
708 "m" (*(uint8_t *)offsetof(CPUState
, regs
[1])),
709 "m" (*(uint8_t *)offsetof(CPUState
, regs
[2])),
710 "m" (*(uint8_t *)offsetof(CPUState
, regs
[3])),
711 "m" (*(uint8_t *)offsetof(CPUState
, regs
[4])),
712 "m" (*(uint8_t *)offsetof(CPUState
, regs
[5])),
713 "m" (*(uint8_t *)offsetof(CPUState
, regs
[6])),
714 "m" (*(uint8_t *)offsetof(CPUState
, regs
[7])),
715 "m" (*(uint8_t *)offsetof(CPUState
, cc_src
)),
716 "m" (*(uint8_t *)offsetof(CPUState
, tmp0
)),
718 "m" (*(uint8_t *)offsetof(CPUState
, df
)),
719 "m" (*(uint8_t *)offsetof(CPUState
, saved_esp
))
724 #elif defined(__ia64)
731 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
732 (*(void (*)(void)) &fp
)();
736 env
->current_tb
= NULL
;
737 /* reset soft MMU for next block (it can currently
738 only be set by a memory fault) */
739 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
740 if (env
->hflags
& HF_SOFTMMU_MASK
) {
741 env
->hflags
&= ~HF_SOFTMMU_MASK
;
742 /* do not allow linking to another block */
746 #if defined(USE_KQEMU)
747 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
748 if (kqemu_is_ok(env
) &&
749 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
760 #if defined(TARGET_I386)
761 #if defined(USE_CODE_COPY)
762 if (env
->native_fp_regs
) {
763 save_native_fp_state(env
);
766 /* restore flags in standard format */
767 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
768 #elif defined(TARGET_ARM)
769 /* XXX: Save/restore host fpu exception state?. */
770 #elif defined(TARGET_SPARC)
771 #if defined(reg_REGWPTR)
772 REGWPTR
= saved_regwptr
;
774 #elif defined(TARGET_PPC)
775 #elif defined(TARGET_M68K)
776 cpu_m68k_flush_flags(env
, env
->cc_op
);
777 env
->cc_op
= CC_OP_FLAGS
;
778 env
->sr
= (env
->sr
& 0xffe0)
779 | env
->cc_dest
| (env
->cc_x
<< 4);
780 #elif defined(TARGET_MIPS)
781 #elif defined(TARGET_SH4)
782 #elif defined(TARGET_ALPHA)
785 #error unsupported target CPU
788 /* restore global registers */
789 #if defined(__sparc__) && !defined(HOST_SOLARIS)
790 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
792 #include "hostregs_helper.h"
794 /* fail safe : never use cpu_single_env outside cpu_exec() */
795 cpu_single_env
= NULL
;
799 /* must only be called from the generated code as an exception can be
801 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
803 /* XXX: cannot enable it yet because it yields to MMU exception
804 where NIP != read address on PowerPC */
806 target_ulong phys_addr
;
807 phys_addr
= get_phys_addr_code(env
, start
);
808 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
812 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
814 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
816 CPUX86State
*saved_env
;
820 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
822 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
823 (selector
<< 4), 0xffff, 0);
825 load_seg(seg_reg
, selector
);
830 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
832 CPUX86State
*saved_env
;
837 helper_fsave((target_ulong
)ptr
, data32
);
842 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
844 CPUX86State
*saved_env
;
849 helper_frstor((target_ulong
)ptr
, data32
);
854 #endif /* TARGET_I386 */
856 #if !defined(CONFIG_SOFTMMU)
858 #if defined(TARGET_I386)
860 /* 'pc' is the host PC at which the exception was raised. 'address' is
861 the effective address of the memory exception. 'is_write' is 1 if a
862 write caused the exception and otherwise 0'. 'old_set' is the
863 signal set which should be restored */
864 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
865 int is_write
, sigset_t
*old_set
,
868 TranslationBlock
*tb
;
872 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
873 #if defined(DEBUG_SIGNAL)
874 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
875 pc
, address
, is_write
, *(unsigned long *)old_set
);
877 /* XXX: locking issue */
878 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
882 /* see if it is an MMU fault */
883 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
,
884 ((env
->hflags
& HF_CPL_MASK
) == 3), 0);
886 return 0; /* not an MMU fault */
888 return 1; /* the MMU fault was handled without causing real CPU fault */
889 /* now we have a real cpu fault */
892 /* the PC is inside the translated code. It means that we have
893 a virtual CPU fault */
894 cpu_restore_state(tb
, env
, pc
, puc
);
898 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
899 env
->eip
, env
->cr
[2], env
->error_code
);
901 /* we restore the process signal mask as the sigreturn should
902 do it (XXX: use sigsetjmp) */
903 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
904 raise_exception_err(env
->exception_index
, env
->error_code
);
906 /* activate soft MMU for this block */
907 env
->hflags
|= HF_SOFTMMU_MASK
;
908 cpu_resume_from_signal(env
, puc
);
910 /* never comes here */
914 #elif defined(TARGET_ARM)
915 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
916 int is_write
, sigset_t
*old_set
,
919 TranslationBlock
*tb
;
923 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
924 #if defined(DEBUG_SIGNAL)
925 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
926 pc
, address
, is_write
, *(unsigned long *)old_set
);
928 /* XXX: locking issue */
929 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
932 /* see if it is an MMU fault */
933 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, 1, 0);
935 return 0; /* not an MMU fault */
937 return 1; /* the MMU fault was handled without causing real CPU fault */
938 /* now we have a real cpu fault */
941 /* the PC is inside the translated code. It means that we have
942 a virtual CPU fault */
943 cpu_restore_state(tb
, env
, pc
, puc
);
945 /* we restore the process signal mask as the sigreturn should
946 do it (XXX: use sigsetjmp) */
947 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
950 #elif defined(TARGET_SPARC)
951 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
952 int is_write
, sigset_t
*old_set
,
955 TranslationBlock
*tb
;
959 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
960 #if defined(DEBUG_SIGNAL)
961 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
962 pc
, address
, is_write
, *(unsigned long *)old_set
);
964 /* XXX: locking issue */
965 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
968 /* see if it is an MMU fault */
969 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, 1, 0);
971 return 0; /* not an MMU fault */
973 return 1; /* the MMU fault was handled without causing real CPU fault */
974 /* now we have a real cpu fault */
977 /* the PC is inside the translated code. It means that we have
978 a virtual CPU fault */
979 cpu_restore_state(tb
, env
, pc
, puc
);
981 /* we restore the process signal mask as the sigreturn should
982 do it (XXX: use sigsetjmp) */
983 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
986 #elif defined (TARGET_PPC)
987 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
988 int is_write
, sigset_t
*old_set
,
991 TranslationBlock
*tb
;
995 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
996 #if defined(DEBUG_SIGNAL)
997 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
998 pc
, address
, is_write
, *(unsigned long *)old_set
);
1000 /* XXX: locking issue */
1001 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1005 /* see if it is an MMU fault */
1006 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, msr_pr
, 0);
1008 return 0; /* not an MMU fault */
1010 return 1; /* the MMU fault was handled without causing real CPU fault */
1012 /* now we have a real cpu fault */
1013 tb
= tb_find_pc(pc
);
1015 /* the PC is inside the translated code. It means that we have
1016 a virtual CPU fault */
1017 cpu_restore_state(tb
, env
, pc
, puc
);
1021 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1022 env
->nip
, env
->error_code
, tb
);
1024 /* we restore the process signal mask as the sigreturn should
1025 do it (XXX: use sigsetjmp) */
1026 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1027 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1029 /* activate soft MMU for this block */
1030 cpu_resume_from_signal(env
, puc
);
1032 /* never comes here */
1036 #elif defined(TARGET_M68K)
1037 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1038 int is_write
, sigset_t
*old_set
,
1041 TranslationBlock
*tb
;
1045 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1046 #if defined(DEBUG_SIGNAL)
1047 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1048 pc
, address
, is_write
, *(unsigned long *)old_set
);
1050 /* XXX: locking issue */
1051 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1054 /* see if it is an MMU fault */
1055 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1057 return 0; /* not an MMU fault */
1059 return 1; /* the MMU fault was handled without causing real CPU fault */
1060 /* now we have a real cpu fault */
1061 tb
= tb_find_pc(pc
);
1063 /* the PC is inside the translated code. It means that we have
1064 a virtual CPU fault */
1065 cpu_restore_state(tb
, env
, pc
, puc
);
1067 /* we restore the process signal mask as the sigreturn should
1068 do it (XXX: use sigsetjmp) */
1069 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1071 /* never comes here */
1075 #elif defined (TARGET_MIPS)
1076 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1077 int is_write
, sigset_t
*old_set
,
1080 TranslationBlock
*tb
;
1084 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1085 #if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc
, address
, is_write
, *(unsigned long *)old_set
);
1089 /* XXX: locking issue */
1090 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1094 /* see if it is an MMU fault */
1095 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1097 return 0; /* not an MMU fault */
1099 return 1; /* the MMU fault was handled without causing real CPU fault */
1101 /* now we have a real cpu fault */
1102 tb
= tb_find_pc(pc
);
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb
, env
, pc
, puc
);
1110 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1111 env
->PC
, env
->error_code
, tb
);
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1116 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1118 /* activate soft MMU for this block */
1119 cpu_resume_from_signal(env
, puc
);
1121 /* never comes here */
1125 #elif defined (TARGET_SH4)
1126 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1127 int is_write
, sigset_t
*old_set
,
1130 TranslationBlock
*tb
;
1134 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1135 #if defined(DEBUG_SIGNAL)
1136 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1137 pc
, address
, is_write
, *(unsigned long *)old_set
);
1139 /* XXX: locking issue */
1140 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1144 /* see if it is an MMU fault */
1145 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1147 return 0; /* not an MMU fault */
1149 return 1; /* the MMU fault was handled without causing real CPU fault */
1151 /* now we have a real cpu fault */
1152 tb
= tb_find_pc(pc
);
1154 /* the PC is inside the translated code. It means that we have
1155 a virtual CPU fault */
1156 cpu_restore_state(tb
, env
, pc
, puc
);
1159 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1160 env
->nip
, env
->error_code
, tb
);
1162 /* we restore the process signal mask as the sigreturn should
1163 do it (XXX: use sigsetjmp) */
1164 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1166 /* never comes here */
1170 #elif defined (TARGET_ALPHA)
1171 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1172 int is_write
, sigset_t
*old_set
,
1175 TranslationBlock
*tb
;
1179 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1180 #if defined(DEBUG_SIGNAL)
1181 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1182 pc
, address
, is_write
, *(unsigned long *)old_set
);
1184 /* XXX: locking issue */
1185 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1189 /* see if it is an MMU fault */
1190 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, 1, 0);
1192 return 0; /* not an MMU fault */
1194 return 1; /* the MMU fault was handled without causing real CPU fault */
1196 /* now we have a real cpu fault */
1197 tb
= tb_find_pc(pc
);
1199 /* the PC is inside the translated code. It means that we have
1200 a virtual CPU fault */
1201 cpu_restore_state(tb
, env
, pc
, puc
);
1204 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1205 env
->nip
, env
->error_code
, tb
);
1207 /* we restore the process signal mask as the sigreturn should
1208 do it (XXX: use sigsetjmp) */
1209 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1211 /* never comes here */
1215 #error unsupported target CPU
1218 #if defined(__i386__)
1220 #if defined(__APPLE__)
1221 # include <sys/ucontext.h>
1223 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1224 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1225 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1227 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1228 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1229 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1232 #if defined(USE_CODE_COPY)
1233 static void cpu_send_trap(unsigned long pc
, int trap
,
1234 struct ucontext
*uc
)
1236 TranslationBlock
*tb
;
1239 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1240 /* now we have a real cpu fault */
1241 tb
= tb_find_pc(pc
);
1243 /* the PC is inside the translated code. It means that we have
1244 a virtual CPU fault */
1245 cpu_restore_state(tb
, env
, pc
, uc
);
1247 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
1248 raise_exception_err(trap
, env
->error_code
);
1252 int cpu_signal_handler(int host_signum
, void *pinfo
,
1255 siginfo_t
*info
= pinfo
;
1256 struct ucontext
*uc
= puc
;
1264 #define REG_TRAPNO TRAPNO
1267 trapno
= TRAP_sig(uc
);
1268 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1269 if (trapno
== 0x00 || trapno
== 0x05) {
1270 /* send division by zero or bound exception */
1271 cpu_send_trap(pc
, trapno
, uc
);
1275 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1277 (ERROR_sig(uc
) >> 1) & 1 : 0,
1278 &uc
->uc_sigmask
, puc
);
1281 #elif defined(__x86_64__)
1283 int cpu_signal_handler(int host_signum
, void *pinfo
,
1286 siginfo_t
*info
= pinfo
;
1287 struct ucontext
*uc
= puc
;
1290 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1291 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1292 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1293 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1294 &uc
->uc_sigmask
, puc
);
1297 #elif defined(__powerpc__)
1299 /***********************************************************************
1300 * signal context platform-specific definitions
1304 /* All Registers access - only for local access */
1305 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1306 /* Gpr Registers access */
1307 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1308 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1309 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1310 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1311 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1312 # define LR_sig(context) REG_sig(link, context) /* Link register */
1313 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1314 /* Float Registers access */
1315 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1316 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1317 /* Exception Registers access */
1318 # define DAR_sig(context) REG_sig(dar, context)
1319 # define DSISR_sig(context) REG_sig(dsisr, context)
1320 # define TRAP_sig(context) REG_sig(trap, context)
1324 # include <sys/ucontext.h>
1325 typedef struct ucontext SIGCONTEXT
;
1326 /* All Registers access - only for local access */
1327 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1328 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1329 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1330 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1331 /* Gpr Registers access */
1332 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1333 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1334 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1335 # define CTR_sig(context) REG_sig(ctr, context)
1336 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1337 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1338 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1339 /* Float Registers access */
1340 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1341 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1342 /* Exception Registers access */
1343 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1344 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1345 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1346 #endif /* __APPLE__ */
1348 int cpu_signal_handler(int host_signum
, void *pinfo
,
1351 siginfo_t
*info
= pinfo
;
1352 struct ucontext
*uc
= puc
;
1360 if (DSISR_sig(uc
) & 0x00800000)
1363 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1366 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1367 is_write
, &uc
->uc_sigmask
, puc
);
1370 #elif defined(__alpha__)
1372 int cpu_signal_handler(int host_signum
, void *pinfo
,
1375 siginfo_t
*info
= pinfo
;
1376 struct ucontext
*uc
= puc
;
1377 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1378 uint32_t insn
= *pc
;
1381 /* XXX: need kernel patch to get write flag faster */
1382 switch (insn
>> 26) {
1397 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1398 is_write
, &uc
->uc_sigmask
, puc
);
1400 #elif defined(__sparc__)
1402 int cpu_signal_handler(int host_signum
, void *pinfo
,
1405 siginfo_t
*info
= pinfo
;
1406 uint32_t *regs
= (uint32_t *)(info
+ 1);
1407 void *sigmask
= (regs
+ 20);
1412 /* XXX: is there a standard glibc define ? */
1414 /* XXX: need kernel patch to get write flag faster */
1416 insn
= *(uint32_t *)pc
;
1417 if ((insn
>> 30) == 3) {
1418 switch((insn
>> 19) & 0x3f) {
1430 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1431 is_write
, sigmask
, NULL
);
1434 #elif defined(__arm__)
1436 int cpu_signal_handler(int host_signum
, void *pinfo
,
1439 siginfo_t
*info
= pinfo
;
1440 struct ucontext
*uc
= puc
;
1444 pc
= uc
->uc_mcontext
.gregs
[R15
];
1445 /* XXX: compute is_write */
1447 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1449 &uc
->uc_sigmask
, puc
);
1452 #elif defined(__mc68000)
1454 int cpu_signal_handler(int host_signum
, void *pinfo
,
1457 siginfo_t
*info
= pinfo
;
1458 struct ucontext
*uc
= puc
;
1462 pc
= uc
->uc_mcontext
.gregs
[16];
1463 /* XXX: compute is_write */
1465 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1467 &uc
->uc_sigmask
, puc
);
1470 #elif defined(__ia64)
1473 /* This ought to be in <bits/siginfo.h>... */
1474 # define __ISR_VALID 1
1477 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1479 siginfo_t
*info
= pinfo
;
1480 struct ucontext
*uc
= puc
;
1484 ip
= uc
->uc_mcontext
.sc_ip
;
1485 switch (host_signum
) {
1491 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1492 /* ISR.W (write-access) is bit 33: */
1493 is_write
= (info
->si_isr
>> 33) & 1;
1499 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1501 &uc
->uc_sigmask
, puc
);
1504 #elif defined(__s390__)
1506 int cpu_signal_handler(int host_signum
, void *pinfo
,
1509 siginfo_t
*info
= pinfo
;
1510 struct ucontext
*uc
= puc
;
1514 pc
= uc
->uc_mcontext
.psw
.addr
;
1515 /* XXX: compute is_write */
1517 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1518 is_write
, &uc
->uc_sigmask
, puc
);
1521 #elif defined(__mips__)
1523 int cpu_signal_handler(int host_signum
, void *pinfo
,
1526 siginfo_t
*info
= pinfo
;
1527 struct ucontext
*uc
= puc
;
1528 greg_t pc
= uc
->uc_mcontext
.pc
;
1531 /* XXX: compute is_write */
1533 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1534 is_write
, &uc
->uc_sigmask
, puc
);
1539 #error host CPU specific signal handler needed
1543 #endif /* !defined(CONFIG_SOFTMMU) */