2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 #if !defined(TARGET_IA64)
28 #if !defined(CONFIG_SOFTMMU)
40 #include <sys/ucontext.h>
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
49 #define env cpu_single_env
52 int tb_invalidated_flag
;
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState
*env
)
59 return cpu_has_work(env
);
62 void cpu_loop_exit(void)
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
67 longjmp(env
->jmp_env
, 1);
70 /* exit the current TB from a signal handler. The host registers are
71 restored in a state compatible with the CPU emulator
73 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
75 #if !defined(CONFIG_SOFTMMU)
77 struct ucontext
*uc
= puc
;
78 #elif defined(__OpenBSD__)
79 struct sigcontext
*uc
= puc
;
85 /* XXX: restore cpu registers saved in host registers */
87 #if !defined(CONFIG_SOFTMMU)
89 /* XXX: use siglongjmp ? */
91 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
92 #elif defined(__OpenBSD__)
93 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
97 env
->exception_index
= -1;
98 longjmp(env
->jmp_env
, 1);
101 /* Execute the code without caching the generated code. An interpreter
102 could be used if available. */
103 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
105 unsigned long next_tb
;
106 TranslationBlock
*tb
;
108 /* Should never happen.
109 We only end up here when an existing TB is too long. */
110 if (max_cycles
> CF_COUNT_MASK
)
111 max_cycles
= CF_COUNT_MASK
;
113 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
115 env
->current_tb
= tb
;
116 /* execute the generated code */
117 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
119 if ((next_tb
& 3) == 2) {
120 /* Restore PC. This may happen if async event occurs before
121 the TB starts executing. */
122 cpu_pc_from_tb(env
, tb
);
124 tb_phys_invalidate(tb
, -1);
128 static TranslationBlock
*tb_find_slow(target_ulong pc
,
129 target_ulong cs_base
,
132 TranslationBlock
*tb
, **ptb1
;
134 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
136 tb_invalidated_flag
= 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc
= get_phys_addr_code(env
, pc
);
142 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
144 h
= tb_phys_hash_func(phys_pc
);
145 ptb1
= &tb_phys_hash
[h
];
151 tb
->page_addr
[0] == phys_page1
&&
152 tb
->cs_base
== cs_base
&&
153 tb
->flags
== flags
) {
154 /* check next page if needed */
155 if (tb
->page_addr
[1] != -1) {
156 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
158 phys_page2
= get_phys_addr_code(env
, virt_page2
);
159 if (tb
->page_addr
[1] == phys_page2
)
165 ptb1
= &tb
->phys_hash_next
;
168 /* if no translated code available, then translate it now */
169 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
172 /* we add the TB in the virtual pc hash table */
173 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
177 static inline TranslationBlock
*tb_find_fast(void)
179 TranslationBlock
*tb
;
180 target_ulong cs_base
, pc
;
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
186 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
187 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
188 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
189 tb
->flags
!= flags
)) {
190 tb
= tb_find_slow(pc
, cs_base
, flags
);
195 static CPUDebugExcpHandler
*debug_excp_handler
;
197 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
199 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
201 debug_excp_handler
= handler
;
205 static void cpu_handle_debug_exception(CPUState
*env
)
209 if (!env
->watchpoint_hit
)
210 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
211 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
213 if (debug_excp_handler
)
214 debug_excp_handler(env
);
217 /* main execution loop */
219 int cpu_exec(CPUState
*env1
)
221 #define DECLARE_HOST_REGS 1
222 #include "hostregs_helper.h"
223 int ret
, interrupt_request
;
224 TranslationBlock
*tb
;
226 unsigned long next_tb
;
228 if (cpu_halted(env1
) == EXCP_HALTED
)
231 cpu_single_env
= env1
;
233 /* first we save global registers */
234 #define SAVE_HOST_REGS 1
235 #include "hostregs_helper.h"
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
242 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
243 CC_OP
= CC_OP_EFLAGS
;
244 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env
->cc_op
= CC_OP_FLAGS
;
248 env
->cc_dest
= env
->sr
& 0xf;
249 env
->cc_x
= (env
->sr
>> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MIPS)
254 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_CRIS)
256 #elif defined(TARGET_IA64)
259 #error unsupported target CPU
261 env
->exception_index
= -1;
263 /* prepare setjmp context for exception handling */
265 if (setjmp(env
->jmp_env
) == 0) {
266 #if defined(__sparc__) && !defined(HOST_SOLARIS)
268 env
= cpu_single_env
;
269 #define env cpu_single_env
271 env
->current_tb
= NULL
;
272 /* if an exception is pending, we execute it here */
273 if (env
->exception_index
>= 0) {
274 if (env
->exception_index
>= EXCP_INTERRUPT
) {
275 /* exit request from the cpu execution loop */
276 ret
= env
->exception_index
;
277 if (ret
== EXCP_DEBUG
)
278 cpu_handle_debug_exception(env
);
281 #if defined(CONFIG_USER_ONLY)
282 /* if user mode only, we simulate a fake exception
283 which will be handled outside the cpu execution
285 #if defined(TARGET_I386)
286 do_interrupt_user(env
->exception_index
,
287 env
->exception_is_int
,
289 env
->exception_next_eip
);
290 /* successfully delivered */
291 env
->old_exception
= -1;
293 ret
= env
->exception_index
;
296 #if defined(TARGET_I386)
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 do_interrupt(env
->exception_index
,
301 env
->exception_is_int
,
303 env
->exception_next_eip
, 0);
304 /* successfully delivered */
305 env
->old_exception
= -1;
306 #elif defined(TARGET_PPC)
308 #elif defined(TARGET_MIPS)
310 #elif defined(TARGET_SPARC)
312 #elif defined(TARGET_ARM)
314 #elif defined(TARGET_SH4)
316 #elif defined(TARGET_ALPHA)
318 #elif defined(TARGET_CRIS)
320 #elif defined(TARGET_M68K)
322 #elif defined(TARGET_IA64)
327 env
->exception_index
= -1;
330 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
332 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
333 ret
= kqemu_cpu_exec(env
);
334 /* put eflags in CPU temporary format */
335 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
336 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
337 CC_OP
= CC_OP_EFLAGS
;
338 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
341 longjmp(env
->jmp_env
, 1);
342 } else if (ret
== 2) {
343 /* softmmu execution needed */
345 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
346 /* hardware interrupt will be executed just after */
348 /* otherwise, we restart */
349 longjmp(env
->jmp_env
, 1);
357 longjmp(env
->jmp_env
, 1);
360 next_tb
= 0; /* force lookup of first TB */
362 interrupt_request
= env
->interrupt_request
;
363 if (unlikely(interrupt_request
)) {
364 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
365 /* Mask out external interrupts for this step. */
366 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
371 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
372 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
373 env
->exception_index
= EXCP_DEBUG
;
376 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
377 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
378 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
379 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
381 env
->exception_index
= EXCP_HLT
;
385 #if defined(TARGET_I386)
386 if (env
->hflags2
& HF2_GIF_MASK
) {
387 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
388 !(env
->hflags
& HF_SMM_MASK
)) {
389 svm_check_intercept(SVM_EXIT_SMI
);
390 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
393 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
394 !(env
->hflags2
& HF2_NMI_MASK
)) {
395 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
396 env
->hflags2
|= HF2_NMI_MASK
;
397 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
399 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
400 (((env
->hflags2
& HF2_VINTR_MASK
) &&
401 (env
->hflags2
& HF2_HIF_MASK
)) ||
402 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
403 (env
->eflags
& IF_MASK
&&
404 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
406 svm_check_intercept(SVM_EXIT_INTR
);
407 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
408 intno
= cpu_get_pic_interrupt(env
);
409 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
410 #if defined(__sparc__) && !defined(HOST_SOLARIS)
412 env
= cpu_single_env
;
413 #define env cpu_single_env
415 do_interrupt(intno
, 0, 0, 0, 1);
416 /* ensure that no TB jump will be modified as
417 the program flow was changed */
419 #if !defined(CONFIG_USER_ONLY)
420 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
421 (env
->eflags
& IF_MASK
) &&
422 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
424 /* FIXME: this should respect TPR */
425 svm_check_intercept(SVM_EXIT_VINTR
);
426 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
427 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
428 do_interrupt(intno
, 0, 0, 0, 1);
429 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
434 #elif defined(TARGET_PPC)
436 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
440 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
441 ppc_hw_interrupt(env
);
442 if (env
->pending_interrupts
== 0)
443 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
446 #elif defined(TARGET_MIPS)
447 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
448 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
449 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
450 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
451 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
452 !(env
->hflags
& MIPS_HFLAG_DM
)) {
454 env
->exception_index
= EXCP_EXT_INTERRUPT
;
459 #elif defined(TARGET_SPARC)
460 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
462 int pil
= env
->interrupt_index
& 15;
463 int type
= env
->interrupt_index
& 0xf0;
465 if (((type
== TT_EXTINT
) &&
466 (pil
== 15 || pil
> env
->psrpil
)) ||
468 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
469 env
->exception_index
= env
->interrupt_index
;
471 env
->interrupt_index
= 0;
472 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
477 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
478 //do_interrupt(0, 0, 0, 0, 0);
479 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
481 #elif defined(TARGET_ARM)
482 if (interrupt_request
& CPU_INTERRUPT_FIQ
483 && !(env
->uncached_cpsr
& CPSR_F
)) {
484 env
->exception_index
= EXCP_FIQ
;
488 /* ARMv7-M interrupt return works by loading a magic value
489 into the PC. On real hardware the load causes the
490 return to occur. The qemu implementation performs the
491 jump normally, then does the exception return when the
492 CPU tries to execute code at the magic address.
493 This will cause the magic PC value to be pushed to
494 the stack if an interrupt occured at the wrong time.
495 We avoid this by disabling interrupts when
496 pc contains a magic address. */
497 if (interrupt_request
& CPU_INTERRUPT_HARD
498 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
499 || !(env
->uncached_cpsr
& CPSR_I
))) {
500 env
->exception_index
= EXCP_IRQ
;
504 #elif defined(TARGET_SH4)
505 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
509 #elif defined(TARGET_ALPHA)
510 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
514 #elif defined(TARGET_CRIS)
515 if (interrupt_request
& CPU_INTERRUPT_HARD
516 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
517 env
->exception_index
= EXCP_IRQ
;
521 if (interrupt_request
& CPU_INTERRUPT_NMI
522 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
523 env
->exception_index
= EXCP_NMI
;
527 #elif defined(TARGET_M68K)
528 if (interrupt_request
& CPU_INTERRUPT_HARD
529 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
530 < env
->pending_level
) {
531 /* Real hardware gets the interrupt vector via an
532 IACK cycle at this point. Current emulated
533 hardware doesn't rely on this, so we
534 provide/save the vector when the interrupt is
536 env
->exception_index
= env
->pending_vector
;
541 /* Don't use the cached interupt_request value,
542 do_interrupt may have updated the EXITTB flag. */
543 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
544 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
545 /* ensure that no TB jump will be modified as
546 the program flow was changed */
550 if (unlikely(env
->exit_request
)) {
551 env
->exit_request
= 0;
552 env
->exception_index
= EXCP_INTERRUPT
;
556 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
557 /* restore flags in standard format */
559 #if defined(TARGET_I386)
560 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
561 log_cpu_state(env
, X86_DUMP_CCOP
);
562 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
563 #elif defined(TARGET_ARM)
564 log_cpu_state(env
, 0);
565 #elif defined(TARGET_SPARC)
566 log_cpu_state(env
, 0);
567 #elif defined(TARGET_PPC)
568 log_cpu_state(env
, 0);
569 #elif defined(TARGET_M68K)
570 cpu_m68k_flush_flags(env
, env
->cc_op
);
571 env
->cc_op
= CC_OP_FLAGS
;
572 env
->sr
= (env
->sr
& 0xffe0)
573 | env
->cc_dest
| (env
->cc_x
<< 4);
574 log_cpu_state(env
, 0);
575 #elif defined(TARGET_MIPS)
576 log_cpu_state(env
, 0);
577 #elif defined(TARGET_SH4)
578 log_cpu_state(env
, 0);
579 #elif defined(TARGET_ALPHA)
580 log_cpu_state(env
, 0);
581 #elif defined(TARGET_CRIS)
582 log_cpu_state(env
, 0);
584 #error unsupported target CPU
590 /* Note: we do it here to avoid a gcc bug on Mac OS X when
591 doing it in tb_find_slow */
592 if (tb_invalidated_flag
) {
593 /* as some TB could have been invalidated because
594 of memory exceptions while generating the code, we
595 must recompute the hash index here */
597 tb_invalidated_flag
= 0;
600 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
601 (long)tb
->tc_ptr
, tb
->pc
,
602 lookup_symbol(tb
->pc
));
604 /* see if we can patch the calling TB. When the TB
605 spans two pages, we cannot safely do a direct
610 (env
->kqemu_enabled
!= 2) &&
612 tb
->page_addr
[1] == -1) {
613 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
616 spin_unlock(&tb_lock
);
617 env
->current_tb
= tb
;
619 /* cpu_interrupt might be called while translating the
620 TB, but before it is linked into a potentially
621 infinite loop and becomes env->current_tb. Avoid
622 starting execution if there is a pending interrupt. */
623 if (unlikely (env
->exit_request
))
624 env
->current_tb
= NULL
;
626 while (env
->current_tb
) {
628 /* execute the generated code */
629 #if defined(__sparc__) && !defined(HOST_SOLARIS)
631 env
= cpu_single_env
;
632 #define env cpu_single_env
634 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
635 env
->current_tb
= NULL
;
636 if ((next_tb
& 3) == 2) {
637 /* Instruction counter expired. */
639 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
641 cpu_pc_from_tb(env
, tb
);
642 insns_left
= env
->icount_decr
.u32
;
643 if (env
->icount_extra
&& insns_left
>= 0) {
644 /* Refill decrementer and continue execution. */
645 env
->icount_extra
+= insns_left
;
646 if (env
->icount_extra
> 0xffff) {
649 insns_left
= env
->icount_extra
;
651 env
->icount_extra
-= insns_left
;
652 env
->icount_decr
.u16
.low
= insns_left
;
654 if (insns_left
> 0) {
655 /* Execute remaining instructions. */
656 cpu_exec_nocache(insns_left
, tb
);
658 env
->exception_index
= EXCP_INTERRUPT
;
664 /* reset soft MMU for next block (it can currently
665 only be set by a memory fault) */
666 #if defined(CONFIG_KQEMU)
667 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
668 if (kqemu_is_ok(env
) &&
669 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
680 #if defined(TARGET_I386)
681 /* restore flags in standard format */
682 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
683 #elif defined(TARGET_ARM)
684 /* XXX: Save/restore host fpu exception state?. */
685 #elif defined(TARGET_SPARC)
686 #elif defined(TARGET_PPC)
687 #elif defined(TARGET_M68K)
688 cpu_m68k_flush_flags(env
, env
->cc_op
);
689 env
->cc_op
= CC_OP_FLAGS
;
690 env
->sr
= (env
->sr
& 0xffe0)
691 | env
->cc_dest
| (env
->cc_x
<< 4);
692 #elif defined(TARGET_MIPS)
693 #elif defined(TARGET_SH4)
694 #elif defined(TARGET_IA64)
695 #elif defined(TARGET_ALPHA)
696 #elif defined(TARGET_CRIS)
699 #error unsupported target CPU
702 /* restore global registers */
703 #include "hostregs_helper.h"
705 /* fail safe : never use cpu_single_env outside cpu_exec() */
706 cpu_single_env
= NULL
;
710 /* must only be called from the generated code as an exception can be
712 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
714 /* XXX: cannot enable it yet because it yields to MMU exception
715 where NIP != read address on PowerPC */
717 target_ulong phys_addr
;
718 phys_addr
= get_phys_addr_code(env
, start
);
719 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
723 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
725 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
727 CPUX86State
*saved_env
;
731 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
733 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
734 (selector
<< 4), 0xffff, 0);
736 helper_load_seg(seg_reg
, selector
);
741 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
743 CPUX86State
*saved_env
;
748 helper_fsave(ptr
, data32
);
753 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
755 CPUX86State
*saved_env
;
760 helper_frstor(ptr
, data32
);
765 #endif /* TARGET_I386 */
767 #if !defined(CONFIG_SOFTMMU)
769 #if defined(TARGET_I386)
771 /* 'pc' is the host PC at which the exception was raised. 'address' is
772 the effective address of the memory exception. 'is_write' is 1 if a
773 write caused the exception and otherwise 0'. 'old_set' is the
774 signal set which should be restored */
775 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
776 int is_write
, sigset_t
*old_set
,
779 TranslationBlock
*tb
;
783 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
784 #if defined(DEBUG_SIGNAL)
785 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
786 pc
, address
, is_write
, *(unsigned long *)old_set
);
788 /* XXX: locking issue */
789 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
793 /* see if it is an MMU fault */
794 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
796 return 0; /* not an MMU fault */
798 return 1; /* the MMU fault was handled without causing real CPU fault */
799 /* now we have a real cpu fault */
802 /* the PC is inside the translated code. It means that we have
803 a virtual CPU fault */
804 cpu_restore_state(tb
, env
, pc
, puc
);
808 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
809 env
->eip
, env
->cr
[2], env
->error_code
);
811 /* we restore the process signal mask as the sigreturn should
812 do it (XXX: use sigsetjmp) */
813 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
814 raise_exception_err(env
->exception_index
, env
->error_code
);
816 /* activate soft MMU for this block */
817 env
->hflags
|= HF_SOFTMMU_MASK
;
818 cpu_resume_from_signal(env
, puc
);
820 /* never comes here */
824 #elif defined(TARGET_ARM)
825 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
826 int is_write
, sigset_t
*old_set
,
829 TranslationBlock
*tb
;
833 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
834 #if defined(DEBUG_SIGNAL)
835 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
836 pc
, address
, is_write
, *(unsigned long *)old_set
);
838 /* XXX: locking issue */
839 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
842 /* see if it is an MMU fault */
843 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
845 return 0; /* not an MMU fault */
847 return 1; /* the MMU fault was handled without causing real CPU fault */
848 /* now we have a real cpu fault */
851 /* the PC is inside the translated code. It means that we have
852 a virtual CPU fault */
853 cpu_restore_state(tb
, env
, pc
, puc
);
855 /* we restore the process signal mask as the sigreturn should
856 do it (XXX: use sigsetjmp) */
857 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
859 /* never comes here */
862 #elif defined(TARGET_SPARC)
863 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
864 int is_write
, sigset_t
*old_set
,
867 TranslationBlock
*tb
;
871 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
872 #if defined(DEBUG_SIGNAL)
873 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
874 pc
, address
, is_write
, *(unsigned long *)old_set
);
876 /* XXX: locking issue */
877 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
880 /* see if it is an MMU fault */
881 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
883 return 0; /* not an MMU fault */
885 return 1; /* the MMU fault was handled without causing real CPU fault */
886 /* now we have a real cpu fault */
889 /* the PC is inside the translated code. It means that we have
890 a virtual CPU fault */
891 cpu_restore_state(tb
, env
, pc
, puc
);
893 /* we restore the process signal mask as the sigreturn should
894 do it (XXX: use sigsetjmp) */
895 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
897 /* never comes here */
900 #elif defined (TARGET_PPC)
901 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
902 int is_write
, sigset_t
*old_set
,
905 TranslationBlock
*tb
;
909 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
910 #if defined(DEBUG_SIGNAL)
911 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
912 pc
, address
, is_write
, *(unsigned long *)old_set
);
914 /* XXX: locking issue */
915 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
919 /* see if it is an MMU fault */
920 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
922 return 0; /* not an MMU fault */
924 return 1; /* the MMU fault was handled without causing real CPU fault */
926 /* now we have a real cpu fault */
929 /* the PC is inside the translated code. It means that we have
930 a virtual CPU fault */
931 cpu_restore_state(tb
, env
, pc
, puc
);
935 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
936 env
->nip
, env
->error_code
, tb
);
938 /* we restore the process signal mask as the sigreturn should
939 do it (XXX: use sigsetjmp) */
940 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
943 /* activate soft MMU for this block */
944 cpu_resume_from_signal(env
, puc
);
946 /* never comes here */
950 #elif defined(TARGET_M68K)
951 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
952 int is_write
, sigset_t
*old_set
,
955 TranslationBlock
*tb
;
959 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
960 #if defined(DEBUG_SIGNAL)
961 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
962 pc
, address
, is_write
, *(unsigned long *)old_set
);
964 /* XXX: locking issue */
965 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
968 /* see if it is an MMU fault */
969 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
971 return 0; /* not an MMU fault */
973 return 1; /* the MMU fault was handled without causing real CPU fault */
974 /* now we have a real cpu fault */
977 /* the PC is inside the translated code. It means that we have
978 a virtual CPU fault */
979 cpu_restore_state(tb
, env
, pc
, puc
);
981 /* we restore the process signal mask as the sigreturn should
982 do it (XXX: use sigsetjmp) */
983 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
985 /* never comes here */
989 #elif defined (TARGET_MIPS)
990 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
991 int is_write
, sigset_t
*old_set
,
994 TranslationBlock
*tb
;
998 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
999 #if defined(DEBUG_SIGNAL)
1000 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1001 pc
, address
, is_write
, *(unsigned long *)old_set
);
1003 /* XXX: locking issue */
1004 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1008 /* see if it is an MMU fault */
1009 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1011 return 0; /* not an MMU fault */
1013 return 1; /* the MMU fault was handled without causing real CPU fault */
1015 /* now we have a real cpu fault */
1016 tb
= tb_find_pc(pc
);
1018 /* the PC is inside the translated code. It means that we have
1019 a virtual CPU fault */
1020 cpu_restore_state(tb
, env
, pc
, puc
);
1024 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1025 env
->PC
, env
->error_code
, tb
);
1027 /* we restore the process signal mask as the sigreturn should
1028 do it (XXX: use sigsetjmp) */
1029 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1032 /* activate soft MMU for this block */
1033 cpu_resume_from_signal(env
, puc
);
1035 /* never comes here */
1039 #elif defined (TARGET_SH4)
1040 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1041 int is_write
, sigset_t
*old_set
,
1044 TranslationBlock
*tb
;
1048 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1049 #if defined(DEBUG_SIGNAL)
1050 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1051 pc
, address
, is_write
, *(unsigned long *)old_set
);
1053 /* XXX: locking issue */
1054 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1058 /* see if it is an MMU fault */
1059 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1061 return 0; /* not an MMU fault */
1063 return 1; /* the MMU fault was handled without causing real CPU fault */
1065 /* now we have a real cpu fault */
1066 tb
= tb_find_pc(pc
);
1068 /* the PC is inside the translated code. It means that we have
1069 a virtual CPU fault */
1070 cpu_restore_state(tb
, env
, pc
, puc
);
1073 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1074 env
->nip
, env
->error_code
, tb
);
1076 /* we restore the process signal mask as the sigreturn should
1077 do it (XXX: use sigsetjmp) */
1078 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1080 /* never comes here */
1084 #elif defined (TARGET_ALPHA)
1085 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1086 int is_write
, sigset_t
*old_set
,
1089 TranslationBlock
*tb
;
1093 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1094 #if defined(DEBUG_SIGNAL)
1095 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1096 pc
, address
, is_write
, *(unsigned long *)old_set
);
1098 /* XXX: locking issue */
1099 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1103 /* see if it is an MMU fault */
1104 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1106 return 0; /* not an MMU fault */
1108 return 1; /* the MMU fault was handled without causing real CPU fault */
1110 /* now we have a real cpu fault */
1111 tb
= tb_find_pc(pc
);
1113 /* the PC is inside the translated code. It means that we have
1114 a virtual CPU fault */
1115 cpu_restore_state(tb
, env
, pc
, puc
);
1118 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1119 env
->nip
, env
->error_code
, tb
);
1121 /* we restore the process signal mask as the sigreturn should
1122 do it (XXX: use sigsetjmp) */
1123 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1125 /* never comes here */
1128 #elif defined (TARGET_CRIS)
1129 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1130 int is_write
, sigset_t
*old_set
,
1133 TranslationBlock
*tb
;
1137 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1138 #if defined(DEBUG_SIGNAL)
1139 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1140 pc
, address
, is_write
, *(unsigned long *)old_set
);
1142 /* XXX: locking issue */
1143 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1147 /* see if it is an MMU fault */
1148 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1150 return 0; /* not an MMU fault */
1152 return 1; /* the MMU fault was handled without causing real CPU fault */
1154 /* now we have a real cpu fault */
1155 tb
= tb_find_pc(pc
);
1157 /* the PC is inside the translated code. It means that we have
1158 a virtual CPU fault */
1159 cpu_restore_state(tb
, env
, pc
, puc
);
1161 /* we restore the process signal mask as the sigreturn should
1162 do it (XXX: use sigsetjmp) */
1163 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1165 /* never comes here */
1170 #error unsupported target CPU
1173 #if defined(__i386__)
1175 #if defined(__APPLE__)
1176 # include <sys/ucontext.h>
1178 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1179 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1180 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1181 # define MASK_sig(context) ((context)->uc_sigmask)
1182 #elif defined(__OpenBSD__)
1183 # define EIP_sig(context) ((context)->sc_eip)
1184 # define TRAP_sig(context) ((context)->sc_trapno)
1185 # define ERROR_sig(context) ((context)->sc_err)
1186 # define MASK_sig(context) ((context)->sc_mask)
1188 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1189 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1190 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1191 # define MASK_sig(context) ((context)->uc_sigmask)
1194 int cpu_signal_handler(int host_signum
, void *pinfo
,
1197 siginfo_t
*info
= pinfo
;
1198 #if defined(__OpenBSD__)
1199 struct sigcontext
*uc
= puc
;
1201 struct ucontext
*uc
= puc
;
1210 #define REG_TRAPNO TRAPNO
1213 trapno
= TRAP_sig(uc
);
1214 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1216 (ERROR_sig(uc
) >> 1) & 1 : 0,
1217 &MASK_sig(uc
), puc
);
1220 #elif defined(__x86_64__)
1223 #define PC_sig(context) _UC_MACHINE_PC(context)
1224 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1225 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1226 #define MASK_sig(context) ((context)->uc_sigmask)
1227 #elif defined(__OpenBSD__)
1228 #define PC_sig(context) ((context)->sc_rip)
1229 #define TRAP_sig(context) ((context)->sc_trapno)
1230 #define ERROR_sig(context) ((context)->sc_err)
1231 #define MASK_sig(context) ((context)->sc_mask)
1233 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1234 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1235 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1236 #define MASK_sig(context) ((context)->uc_sigmask)
1239 int cpu_signal_handler(int host_signum
, void *pinfo
,
1242 siginfo_t
*info
= pinfo
;
1245 ucontext_t
*uc
= puc
;
1246 #elif defined(__OpenBSD__)
1247 struct sigcontext
*uc
= puc
;
1249 struct ucontext
*uc
= puc
;
1253 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1254 TRAP_sig(uc
) == 0xe ?
1255 (ERROR_sig(uc
) >> 1) & 1 : 0,
1256 &MASK_sig(uc
), puc
);
1259 #elif defined(_ARCH_PPC)
1261 /***********************************************************************
1262 * signal context platform-specific definitions
1266 /* All Registers access - only for local access */
1267 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1268 /* Gpr Registers access */
1269 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1270 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1271 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1272 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1273 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1274 # define LR_sig(context) REG_sig(link, context) /* Link register */
1275 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1276 /* Float Registers access */
1277 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1278 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1279 /* Exception Registers access */
1280 # define DAR_sig(context) REG_sig(dar, context)
1281 # define DSISR_sig(context) REG_sig(dsisr, context)
1282 # define TRAP_sig(context) REG_sig(trap, context)
1286 # include <sys/ucontext.h>
1287 typedef struct ucontext SIGCONTEXT
;
1288 /* All Registers access - only for local access */
1289 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1290 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1291 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1292 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1293 /* Gpr Registers access */
1294 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1295 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1296 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1297 # define CTR_sig(context) REG_sig(ctr, context)
1298 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1299 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1300 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1301 /* Float Registers access */
1302 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1303 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1304 /* Exception Registers access */
1305 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1306 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1307 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1308 #endif /* __APPLE__ */
1310 int cpu_signal_handler(int host_signum
, void *pinfo
,
1313 siginfo_t
*info
= pinfo
;
1314 struct ucontext
*uc
= puc
;
1322 if (DSISR_sig(uc
) & 0x00800000)
1325 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1328 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1329 is_write
, &uc
->uc_sigmask
, puc
);
1332 #elif defined(__alpha__)
1334 int cpu_signal_handler(int host_signum
, void *pinfo
,
1337 siginfo_t
*info
= pinfo
;
1338 struct ucontext
*uc
= puc
;
1339 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1340 uint32_t insn
= *pc
;
1343 /* XXX: need kernel patch to get write flag faster */
1344 switch (insn
>> 26) {
1359 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1360 is_write
, &uc
->uc_sigmask
, puc
);
1362 #elif defined(__sparc__)
1364 int cpu_signal_handler(int host_signum
, void *pinfo
,
1367 siginfo_t
*info
= pinfo
;
1370 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1371 uint32_t *regs
= (uint32_t *)(info
+ 1);
1372 void *sigmask
= (regs
+ 20);
1373 /* XXX: is there a standard glibc define ? */
1374 unsigned long pc
= regs
[1];
1377 struct sigcontext
*sc
= puc
;
1378 unsigned long pc
= sc
->sigc_regs
.tpc
;
1379 void *sigmask
= (void *)sc
->sigc_mask
;
1380 #elif defined(__OpenBSD__)
1381 struct sigcontext
*uc
= puc
;
1382 unsigned long pc
= uc
->sc_pc
;
1383 void *sigmask
= (void *)(long)uc
->sc_mask
;
1387 /* XXX: need kernel patch to get write flag faster */
1389 insn
= *(uint32_t *)pc
;
1390 if ((insn
>> 30) == 3) {
1391 switch((insn
>> 19) & 0x3f) {
1415 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1416 is_write
, sigmask
, NULL
);
1419 #elif defined(__arm__)
1421 int cpu_signal_handler(int host_signum
, void *pinfo
,
1424 siginfo_t
*info
= pinfo
;
1425 struct ucontext
*uc
= puc
;
1429 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1430 pc
= uc
->uc_mcontext
.gregs
[R15
];
1432 pc
= uc
->uc_mcontext
.arm_pc
;
1434 /* XXX: compute is_write */
1436 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1438 &uc
->uc_sigmask
, puc
);
1441 #elif defined(__mc68000)
1443 int cpu_signal_handler(int host_signum
, void *pinfo
,
1446 siginfo_t
*info
= pinfo
;
1447 struct ucontext
*uc
= puc
;
1451 pc
= uc
->uc_mcontext
.gregs
[16];
1452 /* XXX: compute is_write */
1454 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1456 &uc
->uc_sigmask
, puc
);
1459 #elif defined(__ia64)
1462 /* This ought to be in <bits/siginfo.h>... */
1463 # define __ISR_VALID 1
1466 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1468 siginfo_t
*info
= pinfo
;
1469 struct ucontext
*uc
= puc
;
1473 ip
= uc
->uc_mcontext
.sc_ip
;
1474 switch (host_signum
) {
1480 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1481 /* ISR.W (write-access) is bit 33: */
1482 is_write
= (info
->si_isr
>> 33) & 1;
1488 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1490 &uc
->uc_sigmask
, puc
);
1493 #elif defined(__s390__)
1495 int cpu_signal_handler(int host_signum
, void *pinfo
,
1498 siginfo_t
*info
= pinfo
;
1499 struct ucontext
*uc
= puc
;
1503 pc
= uc
->uc_mcontext
.psw
.addr
;
1504 /* XXX: compute is_write */
1506 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1507 is_write
, &uc
->uc_sigmask
, puc
);
1510 #elif defined(__mips__)
1512 int cpu_signal_handler(int host_signum
, void *pinfo
,
1515 siginfo_t
*info
= pinfo
;
1516 struct ucontext
*uc
= puc
;
1517 greg_t pc
= uc
->uc_mcontext
.pc
;
1520 /* XXX: compute is_write */
1522 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1523 is_write
, &uc
->uc_sigmask
, puc
);
1526 #elif defined(__hppa__)
1528 int cpu_signal_handler(int host_signum
, void *pinfo
,
1531 struct siginfo
*info
= pinfo
;
1532 struct ucontext
*uc
= puc
;
1536 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1537 /* FIXME: compute is_write */
1539 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1541 &uc
->uc_sigmask
, puc
);
1546 #error host CPU specific signal handler needed
1550 #endif /* !defined(CONFIG_SOFTMMU) */