2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag
;
41 //#define DEBUG_SIGNAL
43 void cpu_loop_exit(void)
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
48 longjmp(env
->jmp_env
, 1);
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
58 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext
*uc
= puc
;
66 /* XXX: restore cpu registers saved in host registers */
68 #if !defined(CONFIG_SOFTMMU)
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
74 longjmp(env
->jmp_env
, 1);
78 static TranslationBlock
*tb_find_slow(target_ulong pc
,
82 TranslationBlock
*tb
, **ptb1
;
85 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
90 tb_invalidated_flag
= 0;
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
94 /* find translated block using physical mappings */
95 phys_pc
= get_phys_addr_code(env
, pc
);
96 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
98 h
= tb_phys_hash_func(phys_pc
);
99 ptb1
= &tb_phys_hash
[h
];
105 tb
->page_addr
[0] == phys_page1
&&
106 tb
->cs_base
== cs_base
&&
107 tb
->flags
== flags
) {
108 /* check next page if needed */
109 if (tb
->page_addr
[1] != -1) {
110 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
112 phys_page2
= get_phys_addr_code(env
, virt_page2
);
113 if (tb
->page_addr
[1] == phys_page2
)
119 ptb1
= &tb
->phys_hash_next
;
122 /* if no translated code available, then translate it now */
125 /* flush must be done */
127 /* cannot fail at this point */
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag
= 1;
132 tc_ptr
= code_gen_ptr
;
134 tb
->cs_base
= cs_base
;
136 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
137 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
139 /* check next page if needed */
140 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
142 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
143 phys_page2
= get_phys_addr_code(env
, virt_page2
);
145 tb_link_phys(tb
, phys_pc
, phys_page2
);
148 /* we add the TB in the virtual pc hash table */
149 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
150 spin_unlock(&tb_lock
);
154 static inline TranslationBlock
*tb_find_fast(void)
156 TranslationBlock
*tb
;
157 target_ulong cs_base
, pc
;
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
163 #if defined(TARGET_I386)
165 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
166 flags
|= env
->intercept
;
167 cs_base
= env
->segs
[R_CS
].base
;
168 pc
= cs_base
+ env
->eip
;
169 #elif defined(TARGET_ARM)
170 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
171 | (env
->vfp
.vec_stride
<< 4);
172 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
174 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
178 #elif defined(TARGET_SPARC)
179 #ifdef TARGET_SPARC64
180 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
181 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
182 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
184 // FPU enable . MMU Boot . MMU enabled . MMU no-fault . Supervisor
185 flags
= (env
->psref
<< 4) | (((env
->mmuregs
[0] & MMU_BM
) >> 14) << 3)
186 | ((env
->mmuregs
[0] & (MMU_E
| MMU_NF
)) << 1)
191 #elif defined(TARGET_PPC)
195 #elif defined(TARGET_MIPS)
196 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
198 pc
= env
->PC
[env
->current_tc
];
199 #elif defined(TARGET_M68K)
200 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
201 | (env
->sr
& SR_S
) /* Bit 13 */
202 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
205 #elif defined(TARGET_SH4)
206 flags
= env
->sr
& (SR_MD
| SR_RB
);
207 cs_base
= 0; /* XXXXX */
209 #elif defined(TARGET_ALPHA)
213 #elif defined(TARGET_CRIS)
218 #error unsupported CPU
220 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
221 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
222 tb
->flags
!= flags
, 0)) {
223 tb
= tb_find_slow(pc
, cs_base
, flags
);
224 /* Note: we do it here to avoid a gcc bug on Mac OS X when
225 doing it in tb_find_slow */
226 if (tb_invalidated_flag
) {
227 /* as some TB could have been invalidated because
228 of memory exceptions while generating the code, we
229 must recompute the hash index here */
237 /* main execution loop */
239 int cpu_exec(CPUState
*env1
)
241 #define DECLARE_HOST_REGS 1
242 #include "hostregs_helper.h"
243 #if defined(TARGET_SPARC)
244 #if defined(reg_REGWPTR)
245 uint32_t *saved_regwptr
;
248 #if defined(__sparc__) && !defined(HOST_SOLARIS)
252 int ret
, interrupt_request
;
253 void (*gen_func
)(void);
254 TranslationBlock
*tb
;
257 if (cpu_halted(env1
) == EXCP_HALTED
)
260 cpu_single_env
= env1
;
262 /* first we save global registers */
263 #define SAVE_HOST_REGS 1
264 #include "hostregs_helper.h"
266 #if defined(__sparc__) && !defined(HOST_SOLARIS)
267 /* we also save i7 because longjmp may not restore it */
268 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
272 #if defined(TARGET_I386)
273 /* put eflags in CPU temporary format */
274 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
275 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
276 CC_OP
= CC_OP_EFLAGS
;
277 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
278 #elif defined(TARGET_SPARC)
279 #if defined(reg_REGWPTR)
280 saved_regwptr
= REGWPTR
;
282 #elif defined(TARGET_M68K)
283 env
->cc_op
= CC_OP_FLAGS
;
284 env
->cc_dest
= env
->sr
& 0xf;
285 env
->cc_x
= (env
->sr
>> 4) & 1;
286 #elif defined(TARGET_ALPHA)
287 #elif defined(TARGET_ARM)
288 #elif defined(TARGET_PPC)
289 #elif defined(TARGET_MIPS)
290 #elif defined(TARGET_SH4)
291 #elif defined(TARGET_CRIS)
294 #error unsupported target CPU
296 env
->exception_index
= -1;
298 /* prepare setjmp context for exception handling */
300 if (setjmp(env
->jmp_env
) == 0) {
301 env
->current_tb
= NULL
;
302 /* if an exception is pending, we execute it here */
303 if (env
->exception_index
>= 0) {
304 if (env
->exception_index
>= EXCP_INTERRUPT
) {
305 /* exit request from the cpu execution loop */
306 ret
= env
->exception_index
;
308 } else if (env
->user_mode_only
) {
309 /* if user mode only, we simulate a fake exception
310 which will be handled outside the cpu execution
312 #if defined(TARGET_I386)
313 do_interrupt_user(env
->exception_index
,
314 env
->exception_is_int
,
316 env
->exception_next_eip
);
318 ret
= env
->exception_index
;
321 #if defined(TARGET_I386)
322 /* simulate a real cpu exception. On i386, it can
323 trigger new exceptions, but we do not handle
324 double or triple faults yet. */
325 do_interrupt(env
->exception_index
,
326 env
->exception_is_int
,
328 env
->exception_next_eip
, 0);
329 /* successfully delivered */
330 env
->old_exception
= -1;
331 #elif defined(TARGET_PPC)
333 #elif defined(TARGET_MIPS)
335 #elif defined(TARGET_SPARC)
336 do_interrupt(env
->exception_index
);
337 #elif defined(TARGET_ARM)
339 #elif defined(TARGET_SH4)
341 #elif defined(TARGET_ALPHA)
343 #elif defined(TARGET_CRIS)
345 #elif defined(TARGET_M68K)
349 env
->exception_index
= -1;
352 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
354 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
355 ret
= kqemu_cpu_exec(env
);
356 /* put eflags in CPU temporary format */
357 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
358 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
359 CC_OP
= CC_OP_EFLAGS
;
360 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
363 longjmp(env
->jmp_env
, 1);
364 } else if (ret
== 2) {
365 /* softmmu execution needed */
367 if (env
->interrupt_request
!= 0) {
368 /* hardware interrupt will be executed just after */
370 /* otherwise, we restart */
371 longjmp(env
->jmp_env
, 1);
377 T0
= 0; /* force lookup of first TB */
379 #if defined(__sparc__) && !defined(HOST_SOLARIS)
380 /* g1 can be modified by some libc? functions */
383 interrupt_request
= env
->interrupt_request
;
384 if (__builtin_expect(interrupt_request
, 0)
385 #if defined(TARGET_I386)
386 && env
->hflags
& HF_GIF_MASK
389 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
390 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
391 env
->exception_index
= EXCP_DEBUG
;
394 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
395 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
396 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
397 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
399 env
->exception_index
= EXCP_HLT
;
403 #if defined(TARGET_I386)
404 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
405 !(env
->hflags
& HF_SMM_MASK
)) {
406 svm_check_intercept(SVM_EXIT_SMI
);
407 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
409 #if defined(__sparc__) && !defined(HOST_SOLARIS)
414 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
415 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
416 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
418 svm_check_intercept(SVM_EXIT_INTR
);
419 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
420 intno
= cpu_get_pic_interrupt(env
);
421 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
422 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
424 do_interrupt(intno
, 0, 0, 0, 1);
425 /* ensure that no TB jump will be modified as
426 the program flow was changed */
427 #if defined(__sparc__) && !defined(HOST_SOLARIS)
432 #if !defined(CONFIG_USER_ONLY)
433 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
434 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
436 /* FIXME: this should respect TPR */
437 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
438 svm_check_intercept(SVM_EXIT_VINTR
);
439 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
440 if (loglevel
& CPU_LOG_TB_IN_ASM
)
441 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
442 do_interrupt(intno
, 0, 0, -1, 1);
443 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
444 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
445 #if defined(__sparc__) && !defined(HOST_SOLARIS)
452 #elif defined(TARGET_PPC)
454 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
458 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
459 ppc_hw_interrupt(env
);
460 if (env
->pending_interrupts
== 0)
461 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
462 #if defined(__sparc__) && !defined(HOST_SOLARIS)
468 #elif defined(TARGET_MIPS)
469 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
470 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
471 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
472 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
473 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
474 !(env
->hflags
& MIPS_HFLAG_DM
)) {
476 env
->exception_index
= EXCP_EXT_INTERRUPT
;
479 #if defined(__sparc__) && !defined(HOST_SOLARIS)
485 #elif defined(TARGET_SPARC)
486 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
488 int pil
= env
->interrupt_index
& 15;
489 int type
= env
->interrupt_index
& 0xf0;
491 if (((type
== TT_EXTINT
) &&
492 (pil
== 15 || pil
> env
->psrpil
)) ||
494 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
495 do_interrupt(env
->interrupt_index
);
496 env
->interrupt_index
= 0;
497 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
500 #if defined(__sparc__) && !defined(HOST_SOLARIS)
506 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
507 //do_interrupt(0, 0, 0, 0, 0);
508 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
510 #elif defined(TARGET_ARM)
511 if (interrupt_request
& CPU_INTERRUPT_FIQ
512 && !(env
->uncached_cpsr
& CPSR_F
)) {
513 env
->exception_index
= EXCP_FIQ
;
516 if (interrupt_request
& CPU_INTERRUPT_HARD
517 && !(env
->uncached_cpsr
& CPSR_I
)) {
518 env
->exception_index
= EXCP_IRQ
;
521 #elif defined(TARGET_SH4)
523 #elif defined(TARGET_ALPHA)
524 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
527 #elif defined(TARGET_CRIS)
528 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
530 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
532 #elif defined(TARGET_M68K)
533 if (interrupt_request
& CPU_INTERRUPT_HARD
534 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
535 < env
->pending_level
) {
536 /* Real hardware gets the interrupt vector via an
537 IACK cycle at this point. Current emulated
538 hardware doesn't rely on this, so we
539 provide/save the vector when the interrupt is
541 env
->exception_index
= env
->pending_vector
;
545 /* Don't use the cached interupt_request value,
546 do_interrupt may have updated the EXITTB flag. */
547 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
548 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
549 /* ensure that no TB jump will be modified as
550 the program flow was changed */
551 #if defined(__sparc__) && !defined(HOST_SOLARIS)
557 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
558 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
559 env
->exception_index
= EXCP_INTERRUPT
;
564 if ((loglevel
& CPU_LOG_TB_CPU
)) {
565 /* restore flags in standard format */
567 #if defined(TARGET_I386)
568 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
569 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
570 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
571 #elif defined(TARGET_ARM)
572 cpu_dump_state(env
, logfile
, fprintf
, 0);
573 #elif defined(TARGET_SPARC)
574 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
575 env
->regwptr
= REGWPTR
;
576 cpu_dump_state(env
, logfile
, fprintf
, 0);
577 #elif defined(TARGET_PPC)
578 cpu_dump_state(env
, logfile
, fprintf
, 0);
579 #elif defined(TARGET_M68K)
580 cpu_m68k_flush_flags(env
, env
->cc_op
);
581 env
->cc_op
= CC_OP_FLAGS
;
582 env
->sr
= (env
->sr
& 0xffe0)
583 | env
->cc_dest
| (env
->cc_x
<< 4);
584 cpu_dump_state(env
, logfile
, fprintf
, 0);
585 #elif defined(TARGET_MIPS)
586 cpu_dump_state(env
, logfile
, fprintf
, 0);
587 #elif defined(TARGET_SH4)
588 cpu_dump_state(env
, logfile
, fprintf
, 0);
589 #elif defined(TARGET_ALPHA)
590 cpu_dump_state(env
, logfile
, fprintf
, 0);
591 #elif defined(TARGET_CRIS)
592 cpu_dump_state(env
, logfile
, fprintf
, 0);
594 #error unsupported target CPU
600 if ((loglevel
& CPU_LOG_EXEC
)) {
601 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
602 (long)tb
->tc_ptr
, tb
->pc
,
603 lookup_symbol(tb
->pc
));
606 #if defined(__sparc__) && !defined(HOST_SOLARIS)
609 /* see if we can patch the calling TB. When the TB
610 spans two pages, we cannot safely do a direct
615 (env
->kqemu_enabled
!= 2) &&
617 tb
->page_addr
[1] == -1
618 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
619 && (tb
->cflags
& CF_CODE_COPY
) ==
620 (((TranslationBlock
*)(T0
& ~3))->cflags
& CF_CODE_COPY
)
624 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
625 #if defined(USE_CODE_COPY)
626 /* propagates the FP use info */
627 ((TranslationBlock
*)(T0
& ~3))->cflags
|=
628 (tb
->cflags
& CF_FP_USED
);
630 spin_unlock(&tb_lock
);
634 env
->current_tb
= tb
;
635 /* execute the generated code */
636 gen_func
= (void *)tc_ptr
;
637 #if defined(__sparc__)
638 __asm__
__volatile__("call %0\n\t"
642 : "i0", "i1", "i2", "i3", "i4", "i5",
643 "o0", "o1", "o2", "o3", "o4", "o5",
644 "l0", "l1", "l2", "l3", "l4", "l5",
646 #elif defined(__arm__)
647 asm volatile ("mov pc, %0\n\t"
648 ".global exec_loop\n\t"
652 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
653 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
655 if (!(tb
->cflags
& CF_CODE_COPY
)) {
656 if ((tb
->cflags
& CF_FP_USED
) && env
->native_fp_regs
) {
657 save_native_fp_state(env
);
661 if ((tb
->cflags
& CF_FP_USED
) && !env
->native_fp_regs
) {
662 restore_native_fp_state(env
);
664 /* we work with native eflags */
665 CC_SRC
= cc_table
[CC_OP
].compute_all();
666 CC_OP
= CC_OP_EFLAGS
;
667 asm(".globl exec_loop\n"
672 " fs movl %11, %%eax\n"
673 " andl $0x400, %%eax\n"
674 " fs orl %8, %%eax\n"
677 " fs movl %%esp, %12\n"
678 " fs movl %0, %%eax\n"
679 " fs movl %1, %%ecx\n"
680 " fs movl %2, %%edx\n"
681 " fs movl %3, %%ebx\n"
682 " fs movl %4, %%esp\n"
683 " fs movl %5, %%ebp\n"
684 " fs movl %6, %%esi\n"
685 " fs movl %7, %%edi\n"
688 " fs movl %%esp, %4\n"
689 " fs movl %12, %%esp\n"
690 " fs movl %%eax, %0\n"
691 " fs movl %%ecx, %1\n"
692 " fs movl %%edx, %2\n"
693 " fs movl %%ebx, %3\n"
694 " fs movl %%ebp, %5\n"
695 " fs movl %%esi, %6\n"
696 " fs movl %%edi, %7\n"
699 " movl %%eax, %%ecx\n"
700 " andl $0x400, %%ecx\n"
702 " andl $0x8d5, %%eax\n"
703 " fs movl %%eax, %8\n"
705 " subl %%ecx, %%eax\n"
706 " fs movl %%eax, %11\n"
707 " fs movl %9, %%ebx\n" /* get T0 value */
710 : "m" (*(uint8_t *)offsetof(CPUState
, regs
[0])),
711 "m" (*(uint8_t *)offsetof(CPUState
, regs
[1])),
712 "m" (*(uint8_t *)offsetof(CPUState
, regs
[2])),
713 "m" (*(uint8_t *)offsetof(CPUState
, regs
[3])),
714 "m" (*(uint8_t *)offsetof(CPUState
, regs
[4])),
715 "m" (*(uint8_t *)offsetof(CPUState
, regs
[5])),
716 "m" (*(uint8_t *)offsetof(CPUState
, regs
[6])),
717 "m" (*(uint8_t *)offsetof(CPUState
, regs
[7])),
718 "m" (*(uint8_t *)offsetof(CPUState
, cc_src
)),
719 "m" (*(uint8_t *)offsetof(CPUState
, tmp0
)),
721 "m" (*(uint8_t *)offsetof(CPUState
, df
)),
722 "m" (*(uint8_t *)offsetof(CPUState
, saved_esp
))
727 #elif defined(__ia64)
734 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
735 (*(void (*)(void)) &fp
)();
739 env
->current_tb
= NULL
;
740 /* reset soft MMU for next block (it can currently
741 only be set by a memory fault) */
742 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
743 if (env
->hflags
& HF_SOFTMMU_MASK
) {
744 env
->hflags
&= ~HF_SOFTMMU_MASK
;
745 /* do not allow linking to another block */
749 #if defined(USE_KQEMU)
750 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
751 if (kqemu_is_ok(env
) &&
752 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
763 #if defined(TARGET_I386)
764 #if defined(USE_CODE_COPY)
765 if (env
->native_fp_regs
) {
766 save_native_fp_state(env
);
769 /* restore flags in standard format */
770 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
771 #elif defined(TARGET_ARM)
772 /* XXX: Save/restore host fpu exception state?. */
773 #elif defined(TARGET_SPARC)
774 #if defined(reg_REGWPTR)
775 REGWPTR
= saved_regwptr
;
777 #elif defined(TARGET_PPC)
778 #elif defined(TARGET_M68K)
779 cpu_m68k_flush_flags(env
, env
->cc_op
);
780 env
->cc_op
= CC_OP_FLAGS
;
781 env
->sr
= (env
->sr
& 0xffe0)
782 | env
->cc_dest
| (env
->cc_x
<< 4);
783 #elif defined(TARGET_MIPS)
784 #elif defined(TARGET_SH4)
785 #elif defined(TARGET_ALPHA)
786 #elif defined(TARGET_CRIS)
789 #error unsupported target CPU
792 /* restore global registers */
793 #if defined(__sparc__) && !defined(HOST_SOLARIS)
794 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
796 #include "hostregs_helper.h"
798 /* fail safe : never use cpu_single_env outside cpu_exec() */
799 cpu_single_env
= NULL
;
803 /* must only be called from the generated code as an exception can be
805 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
807 /* XXX: cannot enable it yet because it yields to MMU exception
808 where NIP != read address on PowerPC */
810 target_ulong phys_addr
;
811 phys_addr
= get_phys_addr_code(env
, start
);
812 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
816 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
818 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
820 CPUX86State
*saved_env
;
824 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
826 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
827 (selector
<< 4), 0xffff, 0);
829 load_seg(seg_reg
, selector
);
834 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
836 CPUX86State
*saved_env
;
841 helper_fsave((target_ulong
)ptr
, data32
);
846 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
848 CPUX86State
*saved_env
;
853 helper_frstor((target_ulong
)ptr
, data32
);
858 #endif /* TARGET_I386 */
860 #if !defined(CONFIG_SOFTMMU)
862 #if defined(TARGET_I386)
864 /* 'pc' is the host PC at which the exception was raised. 'address' is
865 the effective address of the memory exception. 'is_write' is 1 if a
866 write caused the exception and otherwise 0'. 'old_set' is the
867 signal set which should be restored */
868 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
869 int is_write
, sigset_t
*old_set
,
872 TranslationBlock
*tb
;
876 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
877 #if defined(DEBUG_SIGNAL)
878 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
879 pc
, address
, is_write
, *(unsigned long *)old_set
);
881 /* XXX: locking issue */
882 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
886 /* see if it is an MMU fault */
887 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
889 return 0; /* not an MMU fault */
891 return 1; /* the MMU fault was handled without causing real CPU fault */
892 /* now we have a real cpu fault */
895 /* the PC is inside the translated code. It means that we have
896 a virtual CPU fault */
897 cpu_restore_state(tb
, env
, pc
, puc
);
901 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
902 env
->eip
, env
->cr
[2], env
->error_code
);
904 /* we restore the process signal mask as the sigreturn should
905 do it (XXX: use sigsetjmp) */
906 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
907 raise_exception_err(env
->exception_index
, env
->error_code
);
909 /* activate soft MMU for this block */
910 env
->hflags
|= HF_SOFTMMU_MASK
;
911 cpu_resume_from_signal(env
, puc
);
913 /* never comes here */
917 #elif defined(TARGET_ARM)
918 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
919 int is_write
, sigset_t
*old_set
,
922 TranslationBlock
*tb
;
926 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
927 #if defined(DEBUG_SIGNAL)
928 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
929 pc
, address
, is_write
, *(unsigned long *)old_set
);
931 /* XXX: locking issue */
932 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
935 /* see if it is an MMU fault */
936 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
938 return 0; /* not an MMU fault */
940 return 1; /* the MMU fault was handled without causing real CPU fault */
941 /* now we have a real cpu fault */
944 /* the PC is inside the translated code. It means that we have
945 a virtual CPU fault */
946 cpu_restore_state(tb
, env
, pc
, puc
);
948 /* we restore the process signal mask as the sigreturn should
949 do it (XXX: use sigsetjmp) */
950 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
953 #elif defined(TARGET_SPARC)
954 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
955 int is_write
, sigset_t
*old_set
,
958 TranslationBlock
*tb
;
962 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
963 #if defined(DEBUG_SIGNAL)
964 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
965 pc
, address
, is_write
, *(unsigned long *)old_set
);
967 /* XXX: locking issue */
968 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
971 /* see if it is an MMU fault */
972 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
974 return 0; /* not an MMU fault */
976 return 1; /* the MMU fault was handled without causing real CPU fault */
977 /* now we have a real cpu fault */
980 /* the PC is inside the translated code. It means that we have
981 a virtual CPU fault */
982 cpu_restore_state(tb
, env
, pc
, puc
);
984 /* we restore the process signal mask as the sigreturn should
985 do it (XXX: use sigsetjmp) */
986 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
989 #elif defined (TARGET_PPC)
990 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
991 int is_write
, sigset_t
*old_set
,
994 TranslationBlock
*tb
;
998 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
999 #if defined(DEBUG_SIGNAL)
1000 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1001 pc
, address
, is_write
, *(unsigned long *)old_set
);
1003 /* XXX: locking issue */
1004 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1008 /* see if it is an MMU fault */
1009 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1011 return 0; /* not an MMU fault */
1013 return 1; /* the MMU fault was handled without causing real CPU fault */
1015 /* now we have a real cpu fault */
1016 tb
= tb_find_pc(pc
);
1018 /* the PC is inside the translated code. It means that we have
1019 a virtual CPU fault */
1020 cpu_restore_state(tb
, env
, pc
, puc
);
1024 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1025 env
->nip
, env
->error_code
, tb
);
1027 /* we restore the process signal mask as the sigreturn should
1028 do it (XXX: use sigsetjmp) */
1029 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1030 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1032 /* activate soft MMU for this block */
1033 cpu_resume_from_signal(env
, puc
);
1035 /* never comes here */
1039 #elif defined(TARGET_M68K)
1040 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1041 int is_write
, sigset_t
*old_set
,
1044 TranslationBlock
*tb
;
1048 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1049 #if defined(DEBUG_SIGNAL)
1050 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1051 pc
, address
, is_write
, *(unsigned long *)old_set
);
1053 /* XXX: locking issue */
1054 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1057 /* see if it is an MMU fault */
1058 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1060 return 0; /* not an MMU fault */
1062 return 1; /* the MMU fault was handled without causing real CPU fault */
1063 /* now we have a real cpu fault */
1064 tb
= tb_find_pc(pc
);
1066 /* the PC is inside the translated code. It means that we have
1067 a virtual CPU fault */
1068 cpu_restore_state(tb
, env
, pc
, puc
);
1070 /* we restore the process signal mask as the sigreturn should
1071 do it (XXX: use sigsetjmp) */
1072 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1074 /* never comes here */
1078 #elif defined (TARGET_MIPS)
1079 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1080 int is_write
, sigset_t
*old_set
,
1083 TranslationBlock
*tb
;
1087 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1088 #if defined(DEBUG_SIGNAL)
1089 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1090 pc
, address
, is_write
, *(unsigned long *)old_set
);
1092 /* XXX: locking issue */
1093 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1097 /* see if it is an MMU fault */
1098 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1100 return 0; /* not an MMU fault */
1102 return 1; /* the MMU fault was handled without causing real CPU fault */
1104 /* now we have a real cpu fault */
1105 tb
= tb_find_pc(pc
);
1107 /* the PC is inside the translated code. It means that we have
1108 a virtual CPU fault */
1109 cpu_restore_state(tb
, env
, pc
, puc
);
1113 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1114 env
->PC
, env
->error_code
, tb
);
1116 /* we restore the process signal mask as the sigreturn should
1117 do it (XXX: use sigsetjmp) */
1118 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1119 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1121 /* activate soft MMU for this block */
1122 cpu_resume_from_signal(env
, puc
);
1124 /* never comes here */
1128 #elif defined (TARGET_SH4)
1129 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1130 int is_write
, sigset_t
*old_set
,
1133 TranslationBlock
*tb
;
1137 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1138 #if defined(DEBUG_SIGNAL)
1139 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1140 pc
, address
, is_write
, *(unsigned long *)old_set
);
1142 /* XXX: locking issue */
1143 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1147 /* see if it is an MMU fault */
1148 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1150 return 0; /* not an MMU fault */
1152 return 1; /* the MMU fault was handled without causing real CPU fault */
1154 /* now we have a real cpu fault */
1155 tb
= tb_find_pc(pc
);
1157 /* the PC is inside the translated code. It means that we have
1158 a virtual CPU fault */
1159 cpu_restore_state(tb
, env
, pc
, puc
);
1162 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1163 env
->nip
, env
->error_code
, tb
);
1165 /* we restore the process signal mask as the sigreturn should
1166 do it (XXX: use sigsetjmp) */
1167 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1169 /* never comes here */
1173 #elif defined (TARGET_ALPHA)
1174 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1175 int is_write
, sigset_t
*old_set
,
1178 TranslationBlock
*tb
;
1182 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1183 #if defined(DEBUG_SIGNAL)
1184 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1185 pc
, address
, is_write
, *(unsigned long *)old_set
);
1187 /* XXX: locking issue */
1188 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1192 /* see if it is an MMU fault */
1193 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1195 return 0; /* not an MMU fault */
1197 return 1; /* the MMU fault was handled without causing real CPU fault */
1199 /* now we have a real cpu fault */
1200 tb
= tb_find_pc(pc
);
1202 /* the PC is inside the translated code. It means that we have
1203 a virtual CPU fault */
1204 cpu_restore_state(tb
, env
, pc
, puc
);
1207 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1208 env
->nip
, env
->error_code
, tb
);
1210 /* we restore the process signal mask as the sigreturn should
1211 do it (XXX: use sigsetjmp) */
1212 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1214 /* never comes here */
1217 #elif defined (TARGET_CRIS)
1218 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1219 int is_write
, sigset_t
*old_set
,
1222 TranslationBlock
*tb
;
1226 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1227 #if defined(DEBUG_SIGNAL)
1228 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1229 pc
, address
, is_write
, *(unsigned long *)old_set
);
1231 /* XXX: locking issue */
1232 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1236 /* see if it is an MMU fault */
1237 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1239 return 0; /* not an MMU fault */
1241 return 1; /* the MMU fault was handled without causing real CPU fault */
1243 /* now we have a real cpu fault */
1244 tb
= tb_find_pc(pc
);
1246 /* the PC is inside the translated code. It means that we have
1247 a virtual CPU fault */
1248 cpu_restore_state(tb
, env
, pc
, puc
);
1251 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1252 env
->nip
, env
->error_code
, tb
);
1254 /* we restore the process signal mask as the sigreturn should
1255 do it (XXX: use sigsetjmp) */
1256 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1258 /* never comes here */
1263 #error unsupported target CPU
1266 #if defined(__i386__)
1268 #if defined(__APPLE__)
1269 # include <sys/ucontext.h>
1271 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1272 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1273 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1275 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1276 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1277 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1280 #if defined(USE_CODE_COPY)
1281 static void cpu_send_trap(unsigned long pc
, int trap
,
1282 struct ucontext
*uc
)
1284 TranslationBlock
*tb
;
1287 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1288 /* now we have a real cpu fault */
1289 tb
= tb_find_pc(pc
);
1291 /* the PC is inside the translated code. It means that we have
1292 a virtual CPU fault */
1293 cpu_restore_state(tb
, env
, pc
, uc
);
1295 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
1296 raise_exception_err(trap
, env
->error_code
);
1300 int cpu_signal_handler(int host_signum
, void *pinfo
,
1303 siginfo_t
*info
= pinfo
;
1304 struct ucontext
*uc
= puc
;
1312 #define REG_TRAPNO TRAPNO
1315 trapno
= TRAP_sig(uc
);
1316 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1317 if (trapno
== 0x00 || trapno
== 0x05) {
1318 /* send division by zero or bound exception */
1319 cpu_send_trap(pc
, trapno
, uc
);
1323 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1325 (ERROR_sig(uc
) >> 1) & 1 : 0,
1326 &uc
->uc_sigmask
, puc
);
1329 #elif defined(__x86_64__)
1331 int cpu_signal_handler(int host_signum
, void *pinfo
,
1334 siginfo_t
*info
= pinfo
;
1335 struct ucontext
*uc
= puc
;
1338 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1339 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1340 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1341 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1342 &uc
->uc_sigmask
, puc
);
1345 #elif defined(__powerpc__)
1347 /***********************************************************************
1348 * signal context platform-specific definitions
1352 /* All Registers access - only for local access */
1353 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1354 /* Gpr Registers access */
1355 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1356 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1357 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1358 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1359 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1360 # define LR_sig(context) REG_sig(link, context) /* Link register */
1361 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1362 /* Float Registers access */
1363 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1364 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1365 /* Exception Registers access */
1366 # define DAR_sig(context) REG_sig(dar, context)
1367 # define DSISR_sig(context) REG_sig(dsisr, context)
1368 # define TRAP_sig(context) REG_sig(trap, context)
1372 # include <sys/ucontext.h>
1373 typedef struct ucontext SIGCONTEXT
;
1374 /* All Registers access - only for local access */
1375 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1376 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1377 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1378 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1379 /* Gpr Registers access */
1380 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1381 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1382 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1383 # define CTR_sig(context) REG_sig(ctr, context)
1384 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1385 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1386 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1387 /* Float Registers access */
1388 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1389 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1390 /* Exception Registers access */
1391 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1392 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1393 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1394 #endif /* __APPLE__ */
1396 int cpu_signal_handler(int host_signum
, void *pinfo
,
1399 siginfo_t
*info
= pinfo
;
1400 struct ucontext
*uc
= puc
;
1408 if (DSISR_sig(uc
) & 0x00800000)
1411 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1414 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1415 is_write
, &uc
->uc_sigmask
, puc
);
1418 #elif defined(__alpha__)
1420 int cpu_signal_handler(int host_signum
, void *pinfo
,
1423 siginfo_t
*info
= pinfo
;
1424 struct ucontext
*uc
= puc
;
1425 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1426 uint32_t insn
= *pc
;
1429 /* XXX: need kernel patch to get write flag faster */
1430 switch (insn
>> 26) {
1445 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1446 is_write
, &uc
->uc_sigmask
, puc
);
1448 #elif defined(__sparc__)
1450 int cpu_signal_handler(int host_signum
, void *pinfo
,
1453 siginfo_t
*info
= pinfo
;
1454 uint32_t *regs
= (uint32_t *)(info
+ 1);
1455 void *sigmask
= (regs
+ 20);
1460 /* XXX: is there a standard glibc define ? */
1462 /* XXX: need kernel patch to get write flag faster */
1464 insn
= *(uint32_t *)pc
;
1465 if ((insn
>> 30) == 3) {
1466 switch((insn
>> 19) & 0x3f) {
1478 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1479 is_write
, sigmask
, NULL
);
1482 #elif defined(__arm__)
1484 int cpu_signal_handler(int host_signum
, void *pinfo
,
1487 siginfo_t
*info
= pinfo
;
1488 struct ucontext
*uc
= puc
;
1492 pc
= uc
->uc_mcontext
.gregs
[R15
];
1493 /* XXX: compute is_write */
1495 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1497 &uc
->uc_sigmask
, puc
);
1500 #elif defined(__mc68000)
1502 int cpu_signal_handler(int host_signum
, void *pinfo
,
1505 siginfo_t
*info
= pinfo
;
1506 struct ucontext
*uc
= puc
;
1510 pc
= uc
->uc_mcontext
.gregs
[16];
1511 /* XXX: compute is_write */
1513 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1515 &uc
->uc_sigmask
, puc
);
1518 #elif defined(__ia64)
1521 /* This ought to be in <bits/siginfo.h>... */
1522 # define __ISR_VALID 1
1525 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1527 siginfo_t
*info
= pinfo
;
1528 struct ucontext
*uc
= puc
;
1532 ip
= uc
->uc_mcontext
.sc_ip
;
1533 switch (host_signum
) {
1539 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1540 /* ISR.W (write-access) is bit 33: */
1541 is_write
= (info
->si_isr
>> 33) & 1;
1547 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1549 &uc
->uc_sigmask
, puc
);
1552 #elif defined(__s390__)
1554 int cpu_signal_handler(int host_signum
, void *pinfo
,
1557 siginfo_t
*info
= pinfo
;
1558 struct ucontext
*uc
= puc
;
1562 pc
= uc
->uc_mcontext
.psw
.addr
;
1563 /* XXX: compute is_write */
1565 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1566 is_write
, &uc
->uc_sigmask
, puc
);
1569 #elif defined(__mips__)
1571 int cpu_signal_handler(int host_signum
, void *pinfo
,
1574 siginfo_t
*info
= pinfo
;
1575 struct ucontext
*uc
= puc
;
1576 greg_t pc
= uc
->uc_mcontext
.pc
;
1579 /* XXX: compute is_write */
1581 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1582 is_write
, &uc
->uc_sigmask
, puc
);
1587 #error host CPU specific signal handler needed
1591 #endif /* !defined(CONFIG_SOFTMMU) */