2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
40 int tb_invalidated_flag
;
41 static unsigned long next_tb
;
44 //#define DEBUG_SIGNAL
46 #define SAVE_GLOBALS()
47 #define RESTORE_GLOBALS()
49 #if defined(__sparc__) && !defined(HOST_SOLARIS)
51 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53 // Work around ugly bugs in glibc that mangle global register contents
55 static volatile void *saved_env
;
56 static volatile unsigned long saved_t0
, saved_i7
;
58 #define SAVE_GLOBALS() do { \
61 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
64 #undef RESTORE_GLOBALS
65 #define RESTORE_GLOBALS() do { \
66 env = (void *)saved_env; \
68 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
71 static int sparc_setjmp(jmp_buf buf
)
81 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83 static void sparc_longjmp(jmp_buf buf
, int val
)
88 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
92 void cpu_loop_exit(void)
94 /* NOTE: the register at this point must be saved by hand because
95 longjmp restore them */
97 longjmp(env
->jmp_env
, 1);
100 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
104 /* exit the current TB from a signal handler. The host registers are
105 restored in a state compatible with the CPU emulator
107 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
109 #if !defined(CONFIG_SOFTMMU)
110 struct ucontext
*uc
= puc
;
115 /* XXX: restore cpu registers saved in host registers */
117 #if !defined(CONFIG_SOFTMMU)
119 /* XXX: use siglongjmp ? */
120 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
123 longjmp(env
->jmp_env
, 1);
126 static TranslationBlock
*tb_find_slow(target_ulong pc
,
127 target_ulong cs_base
,
130 TranslationBlock
*tb
, **ptb1
;
133 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
138 tb_invalidated_flag
= 0;
140 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
142 /* find translated block using physical mappings */
143 phys_pc
= get_phys_addr_code(env
, pc
);
144 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
146 h
= tb_phys_hash_func(phys_pc
);
147 ptb1
= &tb_phys_hash
[h
];
153 tb
->page_addr
[0] == phys_page1
&&
154 tb
->cs_base
== cs_base
&&
155 tb
->flags
== flags
) {
156 /* check next page if needed */
157 if (tb
->page_addr
[1] != -1) {
158 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
160 phys_page2
= get_phys_addr_code(env
, virt_page2
);
161 if (tb
->page_addr
[1] == phys_page2
)
167 ptb1
= &tb
->phys_hash_next
;
170 /* if no translated code available, then translate it now */
173 /* flush must be done */
175 /* cannot fail at this point */
177 /* don't forget to invalidate previous TB info */
178 tb_invalidated_flag
= 1;
180 tc_ptr
= code_gen_ptr
;
182 tb
->cs_base
= cs_base
;
185 cpu_gen_code(env
, tb
, &code_gen_size
);
187 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
189 /* check next page if needed */
190 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
192 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
193 phys_page2
= get_phys_addr_code(env
, virt_page2
);
195 tb_link_phys(tb
, phys_pc
, phys_page2
);
198 /* we add the TB in the virtual pc hash table */
199 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
200 spin_unlock(&tb_lock
);
204 static inline TranslationBlock
*tb_find_fast(void)
206 TranslationBlock
*tb
;
207 target_ulong cs_base
, pc
;
210 /* we record a subset of the CPU state. It will
211 always be the same before a given translated block
213 #if defined(TARGET_I386)
215 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
216 flags
|= env
->intercept
;
217 cs_base
= env
->segs
[R_CS
].base
;
218 pc
= cs_base
+ env
->eip
;
219 #elif defined(TARGET_ARM)
220 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
221 | (env
->vfp
.vec_stride
<< 4);
222 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
224 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
226 flags
|= (env
->condexec_bits
<< 8);
229 #elif defined(TARGET_SPARC)
230 #ifdef TARGET_SPARC64
231 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
232 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
233 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
235 // FPU enable . Supervisor
236 flags
= (env
->psref
<< 4) | env
->psrs
;
240 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MIPS)
245 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
247 pc
= env
->PC
[env
->current_tc
];
248 #elif defined(TARGET_M68K)
249 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
250 | (env
->sr
& SR_S
) /* Bit 13 */
251 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
254 #elif defined(TARGET_SH4)
258 #elif defined(TARGET_ALPHA)
262 #elif defined(TARGET_CRIS)
263 flags
= env
->pregs
[PR_CCS
] & U_FLAG
;
266 #elif defined(TARGET_IA64)
268 cs_base
= 0; /* XXXXX */
271 #error unsupported CPU
273 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
274 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
275 tb
->flags
!= flags
, 0)) {
276 tb
= tb_find_slow(pc
, cs_base
, flags
);
277 /* Note: we do it here to avoid a gcc bug on Mac OS X when
278 doing it in tb_find_slow */
279 if (tb_invalidated_flag
) {
280 /* as some TB could have been invalidated because
281 of memory exceptions while generating the code, we
282 must recompute the hash index here */
289 /* main execution loop */
291 int cpu_exec(CPUState
*env1
)
293 #define DECLARE_HOST_REGS 1
294 #include "hostregs_helper.h"
295 #if defined(TARGET_SPARC)
296 #if defined(reg_REGWPTR)
297 uint32_t *saved_regwptr
;
300 int ret
, interrupt_request
;
301 unsigned long (*gen_func
)(void);
302 TranslationBlock
*tb
;
305 if (cpu_halted(env1
) == EXCP_HALTED
)
308 cpu_single_env
= env1
;
310 /* first we save global registers */
311 #define SAVE_HOST_REGS 1
312 #include "hostregs_helper.h"
317 #if defined(TARGET_I386)
318 /* put eflags in CPU temporary format */
319 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
320 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
321 CC_OP
= CC_OP_EFLAGS
;
322 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
323 #elif defined(TARGET_SPARC)
324 #if defined(reg_REGWPTR)
325 saved_regwptr
= REGWPTR
;
327 #elif defined(TARGET_M68K)
328 env
->cc_op
= CC_OP_FLAGS
;
329 env
->cc_dest
= env
->sr
& 0xf;
330 env
->cc_x
= (env
->sr
>> 4) & 1;
331 #elif defined(TARGET_ALPHA)
332 #elif defined(TARGET_ARM)
333 #elif defined(TARGET_PPC)
334 #elif defined(TARGET_MIPS)
335 #elif defined(TARGET_SH4)
336 #elif defined(TARGET_CRIS)
337 #elif defined(TARGET_IA64)
340 #error unsupported target CPU
342 env
->exception_index
= -1;
344 /* prepare setjmp context for exception handling */
346 if (setjmp(env
->jmp_env
) == 0) {
347 env
->current_tb
= NULL
;
348 /* if an exception is pending, we execute it here */
349 if (env
->exception_index
>= 0) {
350 if (env
->exception_index
>= EXCP_INTERRUPT
) {
351 /* exit request from the cpu execution loop */
352 ret
= env
->exception_index
;
354 } else if (env
->user_mode_only
) {
355 /* if user mode only, we simulate a fake exception
356 which will be handled outside the cpu execution
358 #if defined(TARGET_I386)
359 do_interrupt_user(env
->exception_index
,
360 env
->exception_is_int
,
362 env
->exception_next_eip
);
364 ret
= env
->exception_index
;
367 #if defined(TARGET_I386)
368 /* simulate a real cpu exception. On i386, it can
369 trigger new exceptions, but we do not handle
370 double or triple faults yet. */
371 do_interrupt(env
->exception_index
,
372 env
->exception_is_int
,
374 env
->exception_next_eip
, 0);
375 /* successfully delivered */
376 env
->old_exception
= -1;
377 #elif defined(TARGET_PPC)
379 #elif defined(TARGET_MIPS)
381 #elif defined(TARGET_SPARC)
382 do_interrupt(env
->exception_index
);
383 #elif defined(TARGET_ARM)
385 #elif defined(TARGET_SH4)
387 #elif defined(TARGET_ALPHA)
389 #elif defined(TARGET_CRIS)
391 #elif defined(TARGET_M68K)
393 #elif defined(TARGET_IA64)
397 env
->exception_index
= -1;
400 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
402 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
403 ret
= kqemu_cpu_exec(env
);
404 /* put eflags in CPU temporary format */
405 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
406 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
407 CC_OP
= CC_OP_EFLAGS
;
408 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
411 longjmp(env
->jmp_env
, 1);
412 } else if (ret
== 2) {
413 /* softmmu execution needed */
415 if (env
->interrupt_request
!= 0) {
416 /* hardware interrupt will be executed just after */
418 /* otherwise, we restart */
419 longjmp(env
->jmp_env
, 1);
427 longjmp(env
->jmp_env
, 1);
429 next_tb
= 0; /* force lookup of first TB */
432 interrupt_request
= env
->interrupt_request
;
433 if (__builtin_expect(interrupt_request
, 0)
434 #if defined(TARGET_I386)
435 && env
->hflags
& HF_GIF_MASK
438 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
439 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
440 env
->exception_index
= EXCP_DEBUG
;
443 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
444 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
445 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
446 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
448 env
->exception_index
= EXCP_HLT
;
452 #if defined(TARGET_I386)
453 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
454 !(env
->hflags
& HF_SMM_MASK
)) {
455 svm_check_intercept(SVM_EXIT_SMI
);
456 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
459 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
460 !(env
->hflags
& HF_NMI_MASK
)) {
461 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
462 env
->hflags
|= HF_NMI_MASK
;
463 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
465 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
466 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
467 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
469 svm_check_intercept(SVM_EXIT_INTR
);
470 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
471 intno
= cpu_get_pic_interrupt(env
);
472 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
473 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
475 do_interrupt(intno
, 0, 0, 0, 1);
476 /* ensure that no TB jump will be modified as
477 the program flow was changed */
479 #if !defined(CONFIG_USER_ONLY)
480 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
481 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
483 /* FIXME: this should respect TPR */
484 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
485 svm_check_intercept(SVM_EXIT_VINTR
);
486 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
487 if (loglevel
& CPU_LOG_TB_IN_ASM
)
488 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
489 do_interrupt(intno
, 0, 0, -1, 1);
490 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
491 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
495 #elif defined(TARGET_PPC)
497 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
501 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
502 ppc_hw_interrupt(env
);
503 if (env
->pending_interrupts
== 0)
504 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
507 #elif defined(TARGET_MIPS)
508 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
509 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
510 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
511 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
512 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
513 !(env
->hflags
& MIPS_HFLAG_DM
)) {
515 env
->exception_index
= EXCP_EXT_INTERRUPT
;
520 #elif defined(TARGET_SPARC)
521 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
523 int pil
= env
->interrupt_index
& 15;
524 int type
= env
->interrupt_index
& 0xf0;
526 if (((type
== TT_EXTINT
) &&
527 (pil
== 15 || pil
> env
->psrpil
)) ||
529 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
530 do_interrupt(env
->interrupt_index
);
531 env
->interrupt_index
= 0;
532 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
537 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
538 //do_interrupt(0, 0, 0, 0, 0);
539 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
541 #elif defined(TARGET_ARM)
542 if (interrupt_request
& CPU_INTERRUPT_FIQ
543 && !(env
->uncached_cpsr
& CPSR_F
)) {
544 env
->exception_index
= EXCP_FIQ
;
548 /* ARMv7-M interrupt return works by loading a magic value
549 into the PC. On real hardware the load causes the
550 return to occur. The qemu implementation performs the
551 jump normally, then does the exception return when the
552 CPU tries to execute code at the magic address.
553 This will cause the magic PC value to be pushed to
554 the stack if an interrupt occured at the wrong time.
555 We avoid this by disabling interrupts when
556 pc contains a magic address. */
557 if (interrupt_request
& CPU_INTERRUPT_HARD
558 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
559 || !(env
->uncached_cpsr
& CPSR_I
))) {
560 env
->exception_index
= EXCP_IRQ
;
564 #elif defined(TARGET_SH4)
565 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
569 #elif defined(TARGET_ALPHA)
570 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
574 #elif defined(TARGET_CRIS)
575 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
579 #elif defined(TARGET_M68K)
580 if (interrupt_request
& CPU_INTERRUPT_HARD
581 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
582 < env
->pending_level
) {
583 /* Real hardware gets the interrupt vector via an
584 IACK cycle at this point. Current emulated
585 hardware doesn't rely on this, so we
586 provide/save the vector when the interrupt is
588 env
->exception_index
= env
->pending_vector
;
593 /* Don't use the cached interupt_request value,
594 do_interrupt may have updated the EXITTB flag. */
595 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
596 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
597 /* ensure that no TB jump will be modified as
598 the program flow was changed */
601 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
602 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
603 env
->exception_index
= EXCP_INTERRUPT
;
608 if ((loglevel
& CPU_LOG_TB_CPU
)) {
609 /* restore flags in standard format */
611 #if defined(TARGET_I386)
612 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
613 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
614 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
615 #elif defined(TARGET_ARM)
616 cpu_dump_state(env
, logfile
, fprintf
, 0);
617 #elif defined(TARGET_SPARC)
618 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
619 env
->regwptr
= REGWPTR
;
620 cpu_dump_state(env
, logfile
, fprintf
, 0);
621 #elif defined(TARGET_PPC)
622 cpu_dump_state(env
, logfile
, fprintf
, 0);
623 #elif defined(TARGET_M68K)
624 cpu_m68k_flush_flags(env
, env
->cc_op
);
625 env
->cc_op
= CC_OP_FLAGS
;
626 env
->sr
= (env
->sr
& 0xffe0)
627 | env
->cc_dest
| (env
->cc_x
<< 4);
628 cpu_dump_state(env
, logfile
, fprintf
, 0);
629 #elif defined(TARGET_MIPS)
630 cpu_dump_state(env
, logfile
, fprintf
, 0);
631 #elif defined(TARGET_SH4)
632 cpu_dump_state(env
, logfile
, fprintf
, 0);
633 #elif defined(TARGET_ALPHA)
634 cpu_dump_state(env
, logfile
, fprintf
, 0);
635 #elif defined(TARGET_CRIS)
636 cpu_dump_state(env
, logfile
, fprintf
, 0);
638 #error unsupported target CPU
644 if ((loglevel
& CPU_LOG_EXEC
)) {
645 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
646 (long)tb
->tc_ptr
, tb
->pc
,
647 lookup_symbol(tb
->pc
));
651 /* see if we can patch the calling TB. When the TB
652 spans two pages, we cannot safely do a direct
657 (env
->kqemu_enabled
!= 2) &&
659 tb
->page_addr
[1] == -1) {
661 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
662 spin_unlock(&tb_lock
);
666 env
->current_tb
= tb
;
667 /* execute the generated code */
668 gen_func
= (void *)tc_ptr
;
669 #if defined(__sparc__)
670 __asm__
__volatile__("call %0\n\t"
674 : "i0", "i1", "i2", "i3", "i4", "i5",
675 "o0", "o1", "o2", "o3", "o4", "o5",
676 "l0", "l1", "l2", "l3", "l4", "l5",
678 #elif defined(__hppa__)
679 asm volatile ("ble 0(%%sr4,%1)\n"
684 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
685 "r8", "r9", "r10", "r11", "r12", "r13",
686 "r18", "r19", "r20", "r21", "r22", "r23",
687 "r24", "r25", "r26", "r27", "r28", "r29",
689 #elif defined(__arm__)
690 asm volatile ("mov pc, %0\n\t"
691 ".global exec_loop\n\t"
695 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
696 #elif defined(__ia64)
703 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
704 (*(void (*)(void)) &fp
)();
705 #elif defined(__i386)
706 asm volatile ("sub $12, %%esp\n\t"
713 : "ebx", "ecx", "edx", "esi", "edi", "cc",
715 #elif defined(__x86_64__)
716 asm volatile ("sub $8, %%rsp\n\t"
723 : "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9",
724 "r10", "r11", "r12", "r13", "r14", "r15", "cc",
727 next_tb
= gen_func();
729 env
->current_tb
= NULL
;
730 /* reset soft MMU for next block (it can currently
731 only be set by a memory fault) */
732 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
733 if (env
->hflags
& HF_SOFTMMU_MASK
) {
734 env
->hflags
&= ~HF_SOFTMMU_MASK
;
735 /* do not allow linking to another block */
739 #if defined(USE_KQEMU)
740 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
741 if (kqemu_is_ok(env
) &&
742 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
753 #if defined(TARGET_I386)
754 /* restore flags in standard format */
755 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
756 #elif defined(TARGET_ARM)
757 /* XXX: Save/restore host fpu exception state?. */
758 #elif defined(TARGET_SPARC)
759 #if defined(reg_REGWPTR)
760 REGWPTR
= saved_regwptr
;
762 #elif defined(TARGET_PPC)
763 #elif defined(TARGET_M68K)
764 cpu_m68k_flush_flags(env
, env
->cc_op
);
765 env
->cc_op
= CC_OP_FLAGS
;
766 env
->sr
= (env
->sr
& 0xffe0)
767 | env
->cc_dest
| (env
->cc_x
<< 4);
768 #elif defined(TARGET_MIPS)
769 #elif defined(TARGET_SH4)
770 #elif defined(TARGET_IA64)
771 #elif defined(TARGET_ALPHA)
772 #elif defined(TARGET_CRIS)
775 #error unsupported target CPU
778 /* restore global registers */
780 #include "hostregs_helper.h"
782 /* fail safe : never use cpu_single_env outside cpu_exec() */
783 cpu_single_env
= NULL
;
787 /* must only be called from the generated code as an exception can be
789 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
791 /* XXX: cannot enable it yet because it yields to MMU exception
792 where NIP != read address on PowerPC */
794 target_ulong phys_addr
;
795 phys_addr
= get_phys_addr_code(env
, start
);
796 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
800 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
802 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
804 CPUX86State
*saved_env
;
808 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
810 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
811 (selector
<< 4), 0xffff, 0);
813 load_seg(seg_reg
, selector
);
818 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
820 CPUX86State
*saved_env
;
825 helper_fsave(ptr
, data32
);
830 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
832 CPUX86State
*saved_env
;
837 helper_frstor(ptr
, data32
);
842 #endif /* TARGET_I386 */
844 #if !defined(CONFIG_SOFTMMU)
846 #if defined(TARGET_I386)
848 /* 'pc' is the host PC at which the exception was raised. 'address' is
849 the effective address of the memory exception. 'is_write' is 1 if a
850 write caused the exception and otherwise 0'. 'old_set' is the
851 signal set which should be restored */
852 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
853 int is_write
, sigset_t
*old_set
,
856 TranslationBlock
*tb
;
860 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
861 #if defined(DEBUG_SIGNAL)
862 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
863 pc
, address
, is_write
, *(unsigned long *)old_set
);
865 /* XXX: locking issue */
866 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
870 /* see if it is an MMU fault */
871 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
873 return 0; /* not an MMU fault */
875 return 1; /* the MMU fault was handled without causing real CPU fault */
876 /* now we have a real cpu fault */
879 /* the PC is inside the translated code. It means that we have
880 a virtual CPU fault */
881 cpu_restore_state(tb
, env
, pc
, puc
);
885 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
886 env
->eip
, env
->cr
[2], env
->error_code
);
888 /* we restore the process signal mask as the sigreturn should
889 do it (XXX: use sigsetjmp) */
890 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
891 raise_exception_err(env
->exception_index
, env
->error_code
);
893 /* activate soft MMU for this block */
894 env
->hflags
|= HF_SOFTMMU_MASK
;
895 cpu_resume_from_signal(env
, puc
);
897 /* never comes here */
901 #elif defined(TARGET_ARM)
902 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
903 int is_write
, sigset_t
*old_set
,
906 TranslationBlock
*tb
;
910 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
911 #if defined(DEBUG_SIGNAL)
912 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
913 pc
, address
, is_write
, *(unsigned long *)old_set
);
915 /* XXX: locking issue */
916 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
919 /* see if it is an MMU fault */
920 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
922 return 0; /* not an MMU fault */
924 return 1; /* the MMU fault was handled without causing real CPU fault */
925 /* now we have a real cpu fault */
928 /* the PC is inside the translated code. It means that we have
929 a virtual CPU fault */
930 cpu_restore_state(tb
, env
, pc
, puc
);
932 /* we restore the process signal mask as the sigreturn should
933 do it (XXX: use sigsetjmp) */
934 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
936 /* never comes here */
939 #elif defined(TARGET_SPARC)
940 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
941 int is_write
, sigset_t
*old_set
,
944 TranslationBlock
*tb
;
948 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
949 #if defined(DEBUG_SIGNAL)
950 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
951 pc
, address
, is_write
, *(unsigned long *)old_set
);
953 /* XXX: locking issue */
954 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
957 /* see if it is an MMU fault */
958 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
960 return 0; /* not an MMU fault */
962 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb
, env
, pc
, puc
);
970 /* we restore the process signal mask as the sigreturn should
971 do it (XXX: use sigsetjmp) */
972 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
974 /* never comes here */
977 #elif defined (TARGET_PPC)
978 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
979 int is_write
, sigset_t
*old_set
,
982 TranslationBlock
*tb
;
986 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
987 #if defined(DEBUG_SIGNAL)
988 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
989 pc
, address
, is_write
, *(unsigned long *)old_set
);
991 /* XXX: locking issue */
992 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
996 /* see if it is an MMU fault */
997 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
999 return 0; /* not an MMU fault */
1001 return 1; /* the MMU fault was handled without causing real CPU fault */
1003 /* now we have a real cpu fault */
1004 tb
= tb_find_pc(pc
);
1006 /* the PC is inside the translated code. It means that we have
1007 a virtual CPU fault */
1008 cpu_restore_state(tb
, env
, pc
, puc
);
1012 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1013 env
->nip
, env
->error_code
, tb
);
1015 /* we restore the process signal mask as the sigreturn should
1016 do it (XXX: use sigsetjmp) */
1017 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1018 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1020 /* activate soft MMU for this block */
1021 cpu_resume_from_signal(env
, puc
);
1023 /* never comes here */
1027 #elif defined(TARGET_M68K)
1028 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1029 int is_write
, sigset_t
*old_set
,
1032 TranslationBlock
*tb
;
1036 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1037 #if defined(DEBUG_SIGNAL)
1038 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1039 pc
, address
, is_write
, *(unsigned long *)old_set
);
1041 /* XXX: locking issue */
1042 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1045 /* see if it is an MMU fault */
1046 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1048 return 0; /* not an MMU fault */
1050 return 1; /* the MMU fault was handled without causing real CPU fault */
1051 /* now we have a real cpu fault */
1052 tb
= tb_find_pc(pc
);
1054 /* the PC is inside the translated code. It means that we have
1055 a virtual CPU fault */
1056 cpu_restore_state(tb
, env
, pc
, puc
);
1058 /* we restore the process signal mask as the sigreturn should
1059 do it (XXX: use sigsetjmp) */
1060 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1062 /* never comes here */
1066 #elif defined (TARGET_MIPS)
1067 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1068 int is_write
, sigset_t
*old_set
,
1071 TranslationBlock
*tb
;
1075 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1076 #if defined(DEBUG_SIGNAL)
1077 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1078 pc
, address
, is_write
, *(unsigned long *)old_set
);
1080 /* XXX: locking issue */
1081 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1085 /* see if it is an MMU fault */
1086 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1088 return 0; /* not an MMU fault */
1090 return 1; /* the MMU fault was handled without causing real CPU fault */
1092 /* now we have a real cpu fault */
1093 tb
= tb_find_pc(pc
);
1095 /* the PC is inside the translated code. It means that we have
1096 a virtual CPU fault */
1097 cpu_restore_state(tb
, env
, pc
, puc
);
1101 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1102 env
->PC
, env
->error_code
, tb
);
1104 /* we restore the process signal mask as the sigreturn should
1105 do it (XXX: use sigsetjmp) */
1106 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1107 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1109 /* activate soft MMU for this block */
1110 cpu_resume_from_signal(env
, puc
);
1112 /* never comes here */
1116 #elif defined (TARGET_SH4)
1117 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1118 int is_write
, sigset_t
*old_set
,
1121 TranslationBlock
*tb
;
1125 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1126 #if defined(DEBUG_SIGNAL)
1127 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1128 pc
, address
, is_write
, *(unsigned long *)old_set
);
1130 /* XXX: locking issue */
1131 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1135 /* see if it is an MMU fault */
1136 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1138 return 0; /* not an MMU fault */
1140 return 1; /* the MMU fault was handled without causing real CPU fault */
1142 /* now we have a real cpu fault */
1143 tb
= tb_find_pc(pc
);
1145 /* the PC is inside the translated code. It means that we have
1146 a virtual CPU fault */
1147 cpu_restore_state(tb
, env
, pc
, puc
);
1150 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1151 env
->nip
, env
->error_code
, tb
);
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1157 /* never comes here */
1161 #elif defined (TARGET_ALPHA)
1162 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1163 int is_write
, sigset_t
*old_set
,
1166 TranslationBlock
*tb
;
1170 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1171 #if defined(DEBUG_SIGNAL)
1172 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1173 pc
, address
, is_write
, *(unsigned long *)old_set
);
1175 /* XXX: locking issue */
1176 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1180 /* see if it is an MMU fault */
1181 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1183 return 0; /* not an MMU fault */
1185 return 1; /* the MMU fault was handled without causing real CPU fault */
1187 /* now we have a real cpu fault */
1188 tb
= tb_find_pc(pc
);
1190 /* the PC is inside the translated code. It means that we have
1191 a virtual CPU fault */
1192 cpu_restore_state(tb
, env
, pc
, puc
);
1195 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1196 env
->nip
, env
->error_code
, tb
);
1198 /* we restore the process signal mask as the sigreturn should
1199 do it (XXX: use sigsetjmp) */
1200 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1202 /* never comes here */
1205 #elif defined (TARGET_CRIS)
1206 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1207 int is_write
, sigset_t
*old_set
,
1210 TranslationBlock
*tb
;
1214 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1215 #if defined(DEBUG_SIGNAL)
1216 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1217 pc
, address
, is_write
, *(unsigned long *)old_set
);
1219 /* XXX: locking issue */
1220 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1224 /* see if it is an MMU fault */
1225 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1227 return 0; /* not an MMU fault */
1229 return 1; /* the MMU fault was handled without causing real CPU fault */
1231 /* now we have a real cpu fault */
1232 tb
= tb_find_pc(pc
);
1234 /* the PC is inside the translated code. It means that we have
1235 a virtual CPU fault */
1236 cpu_restore_state(tb
, env
, pc
, puc
);
1238 /* we restore the process signal mask as the sigreturn should
1239 do it (XXX: use sigsetjmp) */
1240 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1242 /* never comes here */
1247 #error unsupported target CPU
1250 #if defined(__i386__)
1252 #if defined(__APPLE__)
1253 # include <sys/ucontext.h>
1255 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1256 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1257 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1259 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1260 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1261 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1264 int cpu_signal_handler(int host_signum
, void *pinfo
,
1267 siginfo_t
*info
= pinfo
;
1268 struct ucontext
*uc
= puc
;
1276 #define REG_TRAPNO TRAPNO
1279 trapno
= TRAP_sig(uc
);
1280 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1282 (ERROR_sig(uc
) >> 1) & 1 : 0,
1283 &uc
->uc_sigmask
, puc
);
1286 #elif defined(__x86_64__)
1288 int cpu_signal_handler(int host_signum
, void *pinfo
,
1291 siginfo_t
*info
= pinfo
;
1292 struct ucontext
*uc
= puc
;
1295 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1296 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1297 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1298 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1299 &uc
->uc_sigmask
, puc
);
1302 #elif defined(__powerpc__)
1304 /***********************************************************************
1305 * signal context platform-specific definitions
1309 /* All Registers access - only for local access */
1310 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1311 /* Gpr Registers access */
1312 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1313 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1314 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1315 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1316 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1317 # define LR_sig(context) REG_sig(link, context) /* Link register */
1318 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1319 /* Float Registers access */
1320 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1321 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1322 /* Exception Registers access */
1323 # define DAR_sig(context) REG_sig(dar, context)
1324 # define DSISR_sig(context) REG_sig(dsisr, context)
1325 # define TRAP_sig(context) REG_sig(trap, context)
1329 # include <sys/ucontext.h>
1330 typedef struct ucontext SIGCONTEXT
;
1331 /* All Registers access - only for local access */
1332 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1333 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1334 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1335 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1336 /* Gpr Registers access */
1337 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1338 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1339 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1340 # define CTR_sig(context) REG_sig(ctr, context)
1341 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1342 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1343 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1344 /* Float Registers access */
1345 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1346 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1347 /* Exception Registers access */
1348 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1349 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1350 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1351 #endif /* __APPLE__ */
1353 int cpu_signal_handler(int host_signum
, void *pinfo
,
1356 siginfo_t
*info
= pinfo
;
1357 struct ucontext
*uc
= puc
;
1365 if (DSISR_sig(uc
) & 0x00800000)
1368 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1371 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1372 is_write
, &uc
->uc_sigmask
, puc
);
1375 #elif defined(__alpha__)
1377 int cpu_signal_handler(int host_signum
, void *pinfo
,
1380 siginfo_t
*info
= pinfo
;
1381 struct ucontext
*uc
= puc
;
1382 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1383 uint32_t insn
= *pc
;
1386 /* XXX: need kernel patch to get write flag faster */
1387 switch (insn
>> 26) {
1402 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1403 is_write
, &uc
->uc_sigmask
, puc
);
1405 #elif defined(__sparc__)
1407 int cpu_signal_handler(int host_signum
, void *pinfo
,
1410 siginfo_t
*info
= pinfo
;
1411 uint32_t *regs
= (uint32_t *)(info
+ 1);
1412 void *sigmask
= (regs
+ 20);
1417 /* XXX: is there a standard glibc define ? */
1419 /* XXX: need kernel patch to get write flag faster */
1421 insn
= *(uint32_t *)pc
;
1422 if ((insn
>> 30) == 3) {
1423 switch((insn
>> 19) & 0x3f) {
1435 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1436 is_write
, sigmask
, NULL
);
1439 #elif defined(__arm__)
1441 int cpu_signal_handler(int host_signum
, void *pinfo
,
1444 siginfo_t
*info
= pinfo
;
1445 struct ucontext
*uc
= puc
;
1449 pc
= uc
->uc_mcontext
.arm_pc
;
1450 /* XXX: compute is_write */
1452 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1454 &uc
->uc_sigmask
, puc
);
1457 #elif defined(__mc68000)
1459 int cpu_signal_handler(int host_signum
, void *pinfo
,
1462 siginfo_t
*info
= pinfo
;
1463 struct ucontext
*uc
= puc
;
1467 pc
= uc
->uc_mcontext
.gregs
[16];
1468 /* XXX: compute is_write */
1470 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1472 &uc
->uc_sigmask
, puc
);
1475 #elif defined(__ia64)
1478 /* This ought to be in <bits/siginfo.h>... */
1479 # define __ISR_VALID 1
1482 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1484 siginfo_t
*info
= pinfo
;
1485 struct ucontext
*uc
= puc
;
1489 ip
= uc
->uc_mcontext
.sc_ip
;
1490 switch (host_signum
) {
1496 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1497 /* ISR.W (write-access) is bit 33: */
1498 is_write
= (info
->si_isr
>> 33) & 1;
1504 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1506 &uc
->uc_sigmask
, puc
);
1509 #elif defined(__s390__)
1511 int cpu_signal_handler(int host_signum
, void *pinfo
,
1514 siginfo_t
*info
= pinfo
;
1515 struct ucontext
*uc
= puc
;
1519 pc
= uc
->uc_mcontext
.psw
.addr
;
1520 /* XXX: compute is_write */
1522 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1523 is_write
, &uc
->uc_sigmask
, puc
);
1526 #elif defined(__mips__)
1528 int cpu_signal_handler(int host_signum
, void *pinfo
,
1531 siginfo_t
*info
= pinfo
;
1532 struct ucontext
*uc
= puc
;
1533 greg_t pc
= uc
->uc_mcontext
.pc
;
1536 /* XXX: compute is_write */
1538 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1539 is_write
, &uc
->uc_sigmask
, puc
);
1542 #elif defined(__hppa__)
1544 int cpu_signal_handler(int host_signum
, void *pinfo
,
1547 struct siginfo
*info
= pinfo
;
1548 struct ucontext
*uc
= puc
;
1552 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1553 /* FIXME: compute is_write */
1555 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1557 &uc
->uc_sigmask
, puc
);
1562 #error host CPU specific signal handler needed
1566 #endif /* !defined(CONFIG_SOFTMMU) */