2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
26 #if !defined(CONFIG_SOFTMMU)
38 #include <sys/ucontext.h>
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
45 #define env cpu_single_env
48 int tb_invalidated_flag
;
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState
*env
)
55 return cpu_has_work(env
);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
63 longjmp(env
->jmp_env
, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
71 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext
*uc
= puc
;
74 #elif defined(__OpenBSD__)
75 struct sigcontext
*uc
= puc
;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
85 /* XXX: use siglongjmp ? */
87 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
93 env
->exception_index
= -1;
94 longjmp(env
->jmp_env
, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
101 unsigned long next_tb
;
102 TranslationBlock
*tb
;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles
> CF_COUNT_MASK
)
107 max_cycles
= CF_COUNT_MASK
;
109 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
111 env
->current_tb
= tb
;
112 /* execute the generated code */
113 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
115 if ((next_tb
& 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env
, tb
);
120 tb_phys_invalidate(tb
, -1);
124 static TranslationBlock
*tb_find_slow(target_ulong pc
,
125 target_ulong cs_base
,
128 TranslationBlock
*tb
, **ptb1
;
130 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
132 tb_invalidated_flag
= 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc
= get_phys_addr_code(env
, pc
);
138 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
140 h
= tb_phys_hash_func(phys_pc
);
141 ptb1
= &tb_phys_hash
[h
];
147 tb
->page_addr
[0] == phys_page1
&&
148 tb
->cs_base
== cs_base
&&
149 tb
->flags
== flags
) {
150 /* check next page if needed */
151 if (tb
->page_addr
[1] != -1) {
152 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
154 phys_page2
= get_phys_addr_code(env
, virt_page2
);
155 if (tb
->page_addr
[1] == phys_page2
)
161 ptb1
= &tb
->phys_hash_next
;
164 /* if no translated code available, then translate it now */
165 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
168 /* we add the TB in the virtual pc hash table */
169 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
173 static inline TranslationBlock
*tb_find_fast(void)
175 TranslationBlock
*tb
;
176 target_ulong cs_base
, pc
;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
183 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
184 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
185 tb
->flags
!= flags
)) {
186 tb
= tb_find_slow(pc
, cs_base
, flags
);
191 static CPUDebugExcpHandler
*debug_excp_handler
;
193 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
195 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
197 debug_excp_handler
= handler
;
201 static void cpu_handle_debug_exception(CPUState
*env
)
205 if (!env
->watchpoint_hit
)
206 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
207 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
209 if (debug_excp_handler
)
210 debug_excp_handler(env
);
213 /* main execution loop */
215 int cpu_exec(CPUState
*env1
)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret
, interrupt_request
;
220 TranslationBlock
*tb
;
222 unsigned long next_tb
;
224 if (cpu_halted(env1
) == EXCP_HALTED
)
227 cpu_single_env
= env1
;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
238 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
239 CC_OP
= CC_OP_EFLAGS
;
240 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env
->cc_op
= CC_OP_FLAGS
;
244 env
->cc_dest
= env
->sr
& 0xf;
245 env
->cc_x
= (env
->sr
>> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MIPS)
250 #elif defined(TARGET_SH4)
251 #elif defined(TARGET_CRIS)
252 #elif defined(TARGET_MMIX)
255 #error unsupported target CPU
257 env
->exception_index
= -1;
259 /* prepare setjmp context for exception handling */
261 if (setjmp(env
->jmp_env
) == 0) {
262 #if defined(__sparc__) && !defined(HOST_SOLARIS)
264 env
= cpu_single_env
;
265 #define env cpu_single_env
267 env
->current_tb
= NULL
;
268 /* if an exception is pending, we execute it here */
269 if (env
->exception_index
>= 0) {
270 if (env
->exception_index
>= EXCP_INTERRUPT
) {
271 /* exit request from the cpu execution loop */
272 ret
= env
->exception_index
;
273 if (ret
== EXCP_DEBUG
)
274 cpu_handle_debug_exception(env
);
277 #if defined(CONFIG_USER_ONLY)
278 /* if user mode only, we simulate a fake exception
279 which will be handled outside the cpu execution
281 #if defined(TARGET_I386)
282 do_interrupt_user(env
->exception_index
,
283 env
->exception_is_int
,
285 env
->exception_next_eip
);
286 /* successfully delivered */
287 env
->old_exception
= -1;
289 ret
= env
->exception_index
;
292 #if defined(TARGET_I386)
293 /* simulate a real cpu exception. On i386, it can
294 trigger new exceptions, but we do not handle
295 double or triple faults yet. */
296 do_interrupt(env
->exception_index
,
297 env
->exception_is_int
,
299 env
->exception_next_eip
, 0);
300 /* successfully delivered */
301 env
->old_exception
= -1;
302 #elif defined(TARGET_PPC)
304 #elif defined(TARGET_MIPS)
306 #elif defined(TARGET_SPARC)
308 #elif defined(TARGET_ARM)
310 #elif defined(TARGET_SH4)
312 #elif defined(TARGET_ALPHA)
314 #elif defined(TARGET_CRIS)
316 #elif defined(TARGET_M68K)
318 #elif defined(TARGET_MMIX)
324 env
->exception_index
= -1;
327 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
329 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
330 ret
= kqemu_cpu_exec(env
);
331 /* put eflags in CPU temporary format */
332 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
333 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
334 CC_OP
= CC_OP_EFLAGS
;
335 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
338 longjmp(env
->jmp_env
, 1);
339 } else if (ret
== 2) {
340 /* softmmu execution needed */
342 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
343 /* hardware interrupt will be executed just after */
345 /* otherwise, we restart */
346 longjmp(env
->jmp_env
, 1);
354 longjmp(env
->jmp_env
, 1);
357 next_tb
= 0; /* force lookup of first TB */
359 interrupt_request
= env
->interrupt_request
;
360 if (unlikely(interrupt_request
)) {
361 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
362 /* Mask out external interrupts for this step. */
363 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
368 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
369 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
370 env
->exception_index
= EXCP_DEBUG
;
373 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
374 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
376 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
377 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
379 env
->exception_index
= EXCP_HLT
;
383 #if defined(TARGET_I386)
384 if (env
->hflags2
& HF2_GIF_MASK
) {
385 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
386 !(env
->hflags
& HF_SMM_MASK
)) {
387 svm_check_intercept(SVM_EXIT_SMI
);
388 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
391 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
392 !(env
->hflags2
& HF2_NMI_MASK
)) {
393 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
394 env
->hflags2
|= HF2_NMI_MASK
;
395 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
397 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
398 (((env
->hflags2
& HF2_VINTR_MASK
) &&
399 (env
->hflags2
& HF2_HIF_MASK
)) ||
400 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
401 (env
->eflags
& IF_MASK
&&
402 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
404 svm_check_intercept(SVM_EXIT_INTR
);
405 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
406 intno
= cpu_get_pic_interrupt(env
);
407 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
408 #if defined(__sparc__) && !defined(HOST_SOLARIS)
410 env
= cpu_single_env
;
411 #define env cpu_single_env
413 do_interrupt(intno
, 0, 0, 0, 1);
414 /* ensure that no TB jump will be modified as
415 the program flow was changed */
417 #if !defined(CONFIG_USER_ONLY)
418 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
419 (env
->eflags
& IF_MASK
) &&
420 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
422 /* FIXME: this should respect TPR */
423 svm_check_intercept(SVM_EXIT_VINTR
);
424 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
425 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
426 do_interrupt(intno
, 0, 0, 0, 1);
427 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
432 #elif defined(TARGET_PPC)
434 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
438 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
439 ppc_hw_interrupt(env
);
440 if (env
->pending_interrupts
== 0)
441 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
444 #elif defined(TARGET_MIPS)
445 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
446 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
447 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
448 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
449 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
450 !(env
->hflags
& MIPS_HFLAG_DM
)) {
452 env
->exception_index
= EXCP_EXT_INTERRUPT
;
457 #elif defined(TARGET_SPARC)
458 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
460 int pil
= env
->interrupt_index
& 15;
461 int type
= env
->interrupt_index
& 0xf0;
463 if (((type
== TT_EXTINT
) &&
464 (pil
== 15 || pil
> env
->psrpil
)) ||
466 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
467 env
->exception_index
= env
->interrupt_index
;
469 env
->interrupt_index
= 0;
470 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
475 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
476 //do_interrupt(0, 0, 0, 0, 0);
477 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
479 #elif defined(TARGET_ARM)
480 if (interrupt_request
& CPU_INTERRUPT_FIQ
481 && !(env
->uncached_cpsr
& CPSR_F
)) {
482 env
->exception_index
= EXCP_FIQ
;
486 /* ARMv7-M interrupt return works by loading a magic value
487 into the PC. On real hardware the load causes the
488 return to occur. The qemu implementation performs the
489 jump normally, then does the exception return when the
490 CPU tries to execute code at the magic address.
491 This will cause the magic PC value to be pushed to
492 the stack if an interrupt occured at the wrong time.
493 We avoid this by disabling interrupts when
494 pc contains a magic address. */
495 if (interrupt_request
& CPU_INTERRUPT_HARD
496 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
497 || !(env
->uncached_cpsr
& CPSR_I
))) {
498 env
->exception_index
= EXCP_IRQ
;
502 #elif defined(TARGET_SH4)
503 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
507 #elif defined(TARGET_ALPHA)
508 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
512 #elif defined(TARGET_CRIS)
513 if (interrupt_request
& CPU_INTERRUPT_HARD
514 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
515 env
->exception_index
= EXCP_IRQ
;
519 if (interrupt_request
& CPU_INTERRUPT_NMI
520 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
521 env
->exception_index
= EXCP_NMI
;
525 #elif defined(TARGET_M68K)
526 if (interrupt_request
& CPU_INTERRUPT_HARD
527 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
528 < env
->pending_level
) {
529 /* Real hardware gets the interrupt vector via an
530 IACK cycle at this point. Current emulated
531 hardware doesn't rely on this, so we
532 provide/save the vector when the interrupt is
534 env
->exception_index
= env
->pending_vector
;
538 #elif defined(TARGET_MMIX)
540 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
545 /* Don't use the cached interupt_request value,
546 do_interrupt may have updated the EXITTB flag. */
547 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
548 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
549 /* ensure that no TB jump will be modified as
550 the program flow was changed */
554 if (unlikely(env
->exit_request
)) {
555 env
->exit_request
= 0;
556 env
->exception_index
= EXCP_INTERRUPT
;
560 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
561 /* restore flags in standard format */
563 #if defined(TARGET_I386)
564 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
565 log_cpu_state(env
, X86_DUMP_CCOP
);
566 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
567 #elif defined(TARGET_ARM)
568 log_cpu_state(env
, 0);
569 #elif defined(TARGET_SPARC)
570 log_cpu_state(env
, 0);
571 #elif defined(TARGET_PPC)
572 log_cpu_state(env
, 0);
573 #elif defined(TARGET_M68K)
574 cpu_m68k_flush_flags(env
, env
->cc_op
);
575 env
->cc_op
= CC_OP_FLAGS
;
576 env
->sr
= (env
->sr
& 0xffe0)
577 | env
->cc_dest
| (env
->cc_x
<< 4);
578 log_cpu_state(env
, 0);
579 #elif defined(TARGET_MIPS)
580 log_cpu_state(env
, 0);
581 #elif defined(TARGET_SH4)
582 log_cpu_state(env
, 0);
583 #elif defined(TARGET_ALPHA)
584 log_cpu_state(env
, 0);
585 #elif defined(TARGET_CRIS)
586 log_cpu_state(env
, 0);
587 #elif defined(TARGET_MMIX)
588 log_cpu_state(env
, 0);
590 #error unsupported target CPU
596 /* Note: we do it here to avoid a gcc bug on Mac OS X when
597 doing it in tb_find_slow */
598 if (tb_invalidated_flag
) {
599 /* as some TB could have been invalidated because
600 of memory exceptions while generating the code, we
601 must recompute the hash index here */
603 tb_invalidated_flag
= 0;
606 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
607 (long)tb
->tc_ptr
, tb
->pc
,
608 lookup_symbol(tb
->pc
));
610 /* see if we can patch the calling TB. When the TB
611 spans two pages, we cannot safely do a direct
616 (env
->kqemu_enabled
!= 2) &&
618 tb
->page_addr
[1] == -1) {
619 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
622 spin_unlock(&tb_lock
);
623 env
->current_tb
= tb
;
625 /* cpu_interrupt might be called while translating the
626 TB, but before it is linked into a potentially
627 infinite loop and becomes env->current_tb. Avoid
628 starting execution if there is a pending interrupt. */
629 if (unlikely (env
->exit_request
))
630 env
->current_tb
= NULL
;
632 while (env
->current_tb
) {
634 /* execute the generated code */
635 #if defined(__sparc__) && !defined(HOST_SOLARIS)
637 env
= cpu_single_env
;
638 #define env cpu_single_env
640 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
641 env
->current_tb
= NULL
;
642 if ((next_tb
& 3) == 2) {
643 /* Instruction counter expired. */
645 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
647 cpu_pc_from_tb(env
, tb
);
648 insns_left
= env
->icount_decr
.u32
;
649 if (env
->icount_extra
&& insns_left
>= 0) {
650 /* Refill decrementer and continue execution. */
651 env
->icount_extra
+= insns_left
;
652 if (env
->icount_extra
> 0xffff) {
655 insns_left
= env
->icount_extra
;
657 env
->icount_extra
-= insns_left
;
658 env
->icount_decr
.u16
.low
= insns_left
;
660 if (insns_left
> 0) {
661 /* Execute remaining instructions. */
662 cpu_exec_nocache(insns_left
, tb
);
664 env
->exception_index
= EXCP_INTERRUPT
;
670 /* reset soft MMU for next block (it can currently
671 only be set by a memory fault) */
672 #if defined(CONFIG_KQEMU)
673 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
674 if (kqemu_is_ok(env
) &&
675 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
686 #if defined(TARGET_I386)
687 /* restore flags in standard format */
688 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
689 #elif defined(TARGET_ARM)
690 /* XXX: Save/restore host fpu exception state?. */
691 #elif defined(TARGET_SPARC)
692 #elif defined(TARGET_PPC)
693 #elif defined(TARGET_M68K)
694 cpu_m68k_flush_flags(env
, env
->cc_op
);
695 env
->cc_op
= CC_OP_FLAGS
;
696 env
->sr
= (env
->sr
& 0xffe0)
697 | env
->cc_dest
| (env
->cc_x
<< 4);
698 #elif defined(TARGET_MIPS)
699 #elif defined(TARGET_SH4)
700 #elif defined(TARGET_ALPHA)
701 #elif defined(TARGET_CRIS)
702 #elif defined(TARGET_MMIX)
705 #error unsupported target CPU
708 /* restore global registers */
709 #include "hostregs_helper.h"
711 /* fail safe : never use cpu_single_env outside cpu_exec() */
712 cpu_single_env
= NULL
;
716 /* must only be called from the generated code as an exception can be
718 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
720 /* XXX: cannot enable it yet because it yields to MMU exception
721 where NIP != read address on PowerPC */
723 target_ulong phys_addr
;
724 phys_addr
= get_phys_addr_code(env
, start
);
725 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
729 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
731 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
733 CPUX86State
*saved_env
;
737 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
739 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
740 (selector
<< 4), 0xffff, 0);
742 helper_load_seg(seg_reg
, selector
);
747 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
749 CPUX86State
*saved_env
;
754 helper_fsave(ptr
, data32
);
759 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
761 CPUX86State
*saved_env
;
766 helper_frstor(ptr
, data32
);
771 #endif /* TARGET_I386 */
773 #if !defined(CONFIG_SOFTMMU)
775 #if defined(TARGET_I386)
777 /* 'pc' is the host PC at which the exception was raised. 'address' is
778 the effective address of the memory exception. 'is_write' is 1 if a
779 write caused the exception and otherwise 0'. 'old_set' is the
780 signal set which should be restored */
781 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
782 int is_write
, sigset_t
*old_set
,
785 TranslationBlock
*tb
;
789 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
790 #if defined(DEBUG_SIGNAL)
791 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
792 pc
, address
, is_write
, *(unsigned long *)old_set
);
794 /* XXX: locking issue */
795 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
799 /* see if it is an MMU fault */
800 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
802 return 0; /* not an MMU fault */
804 return 1; /* the MMU fault was handled without causing real CPU fault */
805 /* now we have a real cpu fault */
808 /* the PC is inside the translated code. It means that we have
809 a virtual CPU fault */
810 cpu_restore_state(tb
, env
, pc
, puc
);
814 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
815 env
->eip
, env
->cr
[2], env
->error_code
);
817 /* we restore the process signal mask as the sigreturn should
818 do it (XXX: use sigsetjmp) */
819 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
820 raise_exception_err(env
->exception_index
, env
->error_code
);
822 /* activate soft MMU for this block */
823 env
->hflags
|= HF_SOFTMMU_MASK
;
824 cpu_resume_from_signal(env
, puc
);
826 /* never comes here */
830 #elif defined(TARGET_ARM)
831 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
832 int is_write
, sigset_t
*old_set
,
835 TranslationBlock
*tb
;
839 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
840 #if defined(DEBUG_SIGNAL)
841 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
842 pc
, address
, is_write
, *(unsigned long *)old_set
);
844 /* XXX: locking issue */
845 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
848 /* see if it is an MMU fault */
849 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
851 return 0; /* not an MMU fault */
853 return 1; /* the MMU fault was handled without causing real CPU fault */
854 /* now we have a real cpu fault */
857 /* the PC is inside the translated code. It means that we have
858 a virtual CPU fault */
859 cpu_restore_state(tb
, env
, pc
, puc
);
861 /* we restore the process signal mask as the sigreturn should
862 do it (XXX: use sigsetjmp) */
863 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
865 /* never comes here */
868 #elif defined(TARGET_SPARC)
869 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
870 int is_write
, sigset_t
*old_set
,
873 TranslationBlock
*tb
;
877 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
878 #if defined(DEBUG_SIGNAL)
879 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
880 pc
, address
, is_write
, *(unsigned long *)old_set
);
882 /* XXX: locking issue */
883 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
886 /* see if it is an MMU fault */
887 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
889 return 0; /* not an MMU fault */
891 return 1; /* the MMU fault was handled without causing real CPU fault */
892 /* now we have a real cpu fault */
895 /* the PC is inside the translated code. It means that we have
896 a virtual CPU fault */
897 cpu_restore_state(tb
, env
, pc
, puc
);
899 /* we restore the process signal mask as the sigreturn should
900 do it (XXX: use sigsetjmp) */
901 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
903 /* never comes here */
906 #elif defined (TARGET_PPC)
907 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
908 int is_write
, sigset_t
*old_set
,
911 TranslationBlock
*tb
;
915 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
916 #if defined(DEBUG_SIGNAL)
917 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
918 pc
, address
, is_write
, *(unsigned long *)old_set
);
920 /* XXX: locking issue */
921 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
925 /* see if it is an MMU fault */
926 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
928 return 0; /* not an MMU fault */
930 return 1; /* the MMU fault was handled without causing real CPU fault */
932 /* now we have a real cpu fault */
935 /* the PC is inside the translated code. It means that we have
936 a virtual CPU fault */
937 cpu_restore_state(tb
, env
, pc
, puc
);
941 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
942 env
->nip
, env
->error_code
, tb
);
944 /* we restore the process signal mask as the sigreturn should
945 do it (XXX: use sigsetjmp) */
946 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
949 /* activate soft MMU for this block */
950 cpu_resume_from_signal(env
, puc
);
952 /* never comes here */
956 #elif defined(TARGET_M68K)
957 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
958 int is_write
, sigset_t
*old_set
,
961 TranslationBlock
*tb
;
965 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
966 #if defined(DEBUG_SIGNAL)
967 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
968 pc
, address
, is_write
, *(unsigned long *)old_set
);
970 /* XXX: locking issue */
971 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
974 /* see if it is an MMU fault */
975 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
977 return 0; /* not an MMU fault */
979 return 1; /* the MMU fault was handled without causing real CPU fault */
980 /* now we have a real cpu fault */
983 /* the PC is inside the translated code. It means that we have
984 a virtual CPU fault */
985 cpu_restore_state(tb
, env
, pc
, puc
);
987 /* we restore the process signal mask as the sigreturn should
988 do it (XXX: use sigsetjmp) */
989 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
991 /* never comes here */
995 #elif defined (TARGET_MIPS)
996 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
997 int is_write
, sigset_t
*old_set
,
1000 TranslationBlock
*tb
;
1004 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1005 #if defined(DEBUG_SIGNAL)
1006 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1007 pc
, address
, is_write
, *(unsigned long *)old_set
);
1009 /* XXX: locking issue */
1010 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1014 /* see if it is an MMU fault */
1015 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1017 return 0; /* not an MMU fault */
1019 return 1; /* the MMU fault was handled without causing real CPU fault */
1021 /* now we have a real cpu fault */
1022 tb
= tb_find_pc(pc
);
1024 /* the PC is inside the translated code. It means that we have
1025 a virtual CPU fault */
1026 cpu_restore_state(tb
, env
, pc
, puc
);
1030 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1031 env
->PC
, env
->error_code
, tb
);
1033 /* we restore the process signal mask as the sigreturn should
1034 do it (XXX: use sigsetjmp) */
1035 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1038 /* activate soft MMU for this block */
1039 cpu_resume_from_signal(env
, puc
);
1041 /* never comes here */
1045 #elif defined (TARGET_SH4)
1046 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1047 int is_write
, sigset_t
*old_set
,
1050 TranslationBlock
*tb
;
1054 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1055 #if defined(DEBUG_SIGNAL)
1056 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1057 pc
, address
, is_write
, *(unsigned long *)old_set
);
1059 /* XXX: locking issue */
1060 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1064 /* see if it is an MMU fault */
1065 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1067 return 0; /* not an MMU fault */
1069 return 1; /* the MMU fault was handled without causing real CPU fault */
1071 /* now we have a real cpu fault */
1072 tb
= tb_find_pc(pc
);
1074 /* the PC is inside the translated code. It means that we have
1075 a virtual CPU fault */
1076 cpu_restore_state(tb
, env
, pc
, puc
);
1079 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1080 env
->nip
, env
->error_code
, tb
);
1082 /* we restore the process signal mask as the sigreturn should
1083 do it (XXX: use sigsetjmp) */
1084 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1086 /* never comes here */
1090 #elif defined (TARGET_ALPHA)
1091 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1092 int is_write
, sigset_t
*old_set
,
1095 TranslationBlock
*tb
;
1099 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1100 #if defined(DEBUG_SIGNAL)
1101 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1102 pc
, address
, is_write
, *(unsigned long *)old_set
);
1104 /* XXX: locking issue */
1105 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1109 /* see if it is an MMU fault */
1110 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1112 return 0; /* not an MMU fault */
1114 return 1; /* the MMU fault was handled without causing real CPU fault */
1116 /* now we have a real cpu fault */
1117 tb
= tb_find_pc(pc
);
1119 /* the PC is inside the translated code. It means that we have
1120 a virtual CPU fault */
1121 cpu_restore_state(tb
, env
, pc
, puc
);
1124 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1125 env
->nip
, env
->error_code
, tb
);
1127 /* we restore the process signal mask as the sigreturn should
1128 do it (XXX: use sigsetjmp) */
1129 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1131 /* never comes here */
1134 #elif defined (TARGET_CRIS)
1135 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1136 int is_write
, sigset_t
*old_set
,
1139 TranslationBlock
*tb
;
1143 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1144 #if defined(DEBUG_SIGNAL)
1145 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1146 pc
, address
, is_write
, *(unsigned long *)old_set
);
1148 /* XXX: locking issue */
1149 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1153 /* see if it is an MMU fault */
1154 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1156 return 0; /* not an MMU fault */
1158 return 1; /* the MMU fault was handled without causing real CPU fault */
1160 /* now we have a real cpu fault */
1161 tb
= tb_find_pc(pc
);
1163 /* the PC is inside the translated code. It means that we have
1164 a virtual CPU fault */
1165 cpu_restore_state(tb
, env
, pc
, puc
);
1167 /* we restore the process signal mask as the sigreturn should
1168 do it (XXX: use sigsetjmp) */
1169 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1171 /* never comes here */
1176 #error unsupported target CPU
1179 #if defined(__i386__)
1181 #if defined(__APPLE__)
1182 # include <sys/ucontext.h>
1184 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1185 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1186 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1187 # define MASK_sig(context) ((context)->uc_sigmask)
1188 #elif defined(__OpenBSD__)
1189 # define EIP_sig(context) ((context)->sc_eip)
1190 # define TRAP_sig(context) ((context)->sc_trapno)
1191 # define ERROR_sig(context) ((context)->sc_err)
1192 # define MASK_sig(context) ((context)->sc_mask)
1194 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1195 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1196 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1197 # define MASK_sig(context) ((context)->uc_sigmask)
1200 int cpu_signal_handler(int host_signum
, void *pinfo
,
1203 siginfo_t
*info
= pinfo
;
1204 #if defined(__OpenBSD__)
1205 struct sigcontext
*uc
= puc
;
1207 struct ucontext
*uc
= puc
;
1216 #define REG_TRAPNO TRAPNO
1219 trapno
= TRAP_sig(uc
);
1220 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1222 (ERROR_sig(uc
) >> 1) & 1 : 0,
1223 &MASK_sig(uc
), puc
);
1226 #elif defined(__x86_64__)
1229 #define PC_sig(context) _UC_MACHINE_PC(context)
1230 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1231 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1232 #define MASK_sig(context) ((context)->uc_sigmask)
1233 #elif defined(__OpenBSD__)
1234 #define PC_sig(context) ((context)->sc_rip)
1235 #define TRAP_sig(context) ((context)->sc_trapno)
1236 #define ERROR_sig(context) ((context)->sc_err)
1237 #define MASK_sig(context) ((context)->sc_mask)
1239 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1240 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1241 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1242 #define MASK_sig(context) ((context)->uc_sigmask)
1245 int cpu_signal_handler(int host_signum
, void *pinfo
,
1248 siginfo_t
*info
= pinfo
;
1251 ucontext_t
*uc
= puc
;
1252 #elif defined(__OpenBSD__)
1253 struct sigcontext
*uc
= puc
;
1255 struct ucontext
*uc
= puc
;
1259 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1260 TRAP_sig(uc
) == 0xe ?
1261 (ERROR_sig(uc
) >> 1) & 1 : 0,
1262 &MASK_sig(uc
), puc
);
1265 #elif defined(_ARCH_PPC)
1267 /***********************************************************************
1268 * signal context platform-specific definitions
1272 /* All Registers access - only for local access */
1273 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1274 /* Gpr Registers access */
1275 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1276 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1277 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1278 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1279 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1280 # define LR_sig(context) REG_sig(link, context) /* Link register */
1281 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1282 /* Float Registers access */
1283 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1284 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1285 /* Exception Registers access */
1286 # define DAR_sig(context) REG_sig(dar, context)
1287 # define DSISR_sig(context) REG_sig(dsisr, context)
1288 # define TRAP_sig(context) REG_sig(trap, context)
1292 # include <sys/ucontext.h>
1293 typedef struct ucontext SIGCONTEXT
;
1294 /* All Registers access - only for local access */
1295 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1296 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1297 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1298 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1299 /* Gpr Registers access */
1300 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1301 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1302 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1303 # define CTR_sig(context) REG_sig(ctr, context)
1304 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1305 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1306 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1307 /* Float Registers access */
1308 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1309 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1310 /* Exception Registers access */
1311 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1312 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1313 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1314 #endif /* __APPLE__ */
1316 int cpu_signal_handler(int host_signum
, void *pinfo
,
1319 siginfo_t
*info
= pinfo
;
1320 struct ucontext
*uc
= puc
;
1328 if (DSISR_sig(uc
) & 0x00800000)
1331 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1334 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1335 is_write
, &uc
->uc_sigmask
, puc
);
1338 #elif defined(__alpha__)
1340 int cpu_signal_handler(int host_signum
, void *pinfo
,
1343 siginfo_t
*info
= pinfo
;
1344 struct ucontext
*uc
= puc
;
1345 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1346 uint32_t insn
= *pc
;
1349 /* XXX: need kernel patch to get write flag faster */
1350 switch (insn
>> 26) {
1365 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1366 is_write
, &uc
->uc_sigmask
, puc
);
1368 #elif defined(__sparc__)
1370 int cpu_signal_handler(int host_signum
, void *pinfo
,
1373 siginfo_t
*info
= pinfo
;
1376 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1377 uint32_t *regs
= (uint32_t *)(info
+ 1);
1378 void *sigmask
= (regs
+ 20);
1379 /* XXX: is there a standard glibc define ? */
1380 unsigned long pc
= regs
[1];
1383 struct sigcontext
*sc
= puc
;
1384 unsigned long pc
= sc
->sigc_regs
.tpc
;
1385 void *sigmask
= (void *)sc
->sigc_mask
;
1386 #elif defined(__OpenBSD__)
1387 struct sigcontext
*uc
= puc
;
1388 unsigned long pc
= uc
->sc_pc
;
1389 void *sigmask
= (void *)(long)uc
->sc_mask
;
1393 /* XXX: need kernel patch to get write flag faster */
1395 insn
= *(uint32_t *)pc
;
1396 if ((insn
>> 30) == 3) {
1397 switch((insn
>> 19) & 0x3f) {
1421 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1422 is_write
, sigmask
, NULL
);
1425 #elif defined(__arm__)
1427 int cpu_signal_handler(int host_signum
, void *pinfo
,
1430 siginfo_t
*info
= pinfo
;
1431 struct ucontext
*uc
= puc
;
1435 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1436 pc
= uc
->uc_mcontext
.gregs
[R15
];
1438 pc
= uc
->uc_mcontext
.arm_pc
;
1440 /* XXX: compute is_write */
1442 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1444 &uc
->uc_sigmask
, puc
);
1447 #elif defined(__mc68000)
1449 int cpu_signal_handler(int host_signum
, void *pinfo
,
1452 siginfo_t
*info
= pinfo
;
1453 struct ucontext
*uc
= puc
;
1457 pc
= uc
->uc_mcontext
.gregs
[16];
1458 /* XXX: compute is_write */
1460 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1462 &uc
->uc_sigmask
, puc
);
1465 #elif defined(__ia64)
1468 /* This ought to be in <bits/siginfo.h>... */
1469 # define __ISR_VALID 1
1472 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1474 siginfo_t
*info
= pinfo
;
1475 struct ucontext
*uc
= puc
;
1479 ip
= uc
->uc_mcontext
.sc_ip
;
1480 switch (host_signum
) {
1486 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1487 /* ISR.W (write-access) is bit 33: */
1488 is_write
= (info
->si_isr
>> 33) & 1;
1494 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1496 &uc
->uc_sigmask
, puc
);
1499 #elif defined(__s390__)
1501 int cpu_signal_handler(int host_signum
, void *pinfo
,
1504 siginfo_t
*info
= pinfo
;
1505 struct ucontext
*uc
= puc
;
1509 pc
= uc
->uc_mcontext
.psw
.addr
;
1510 /* XXX: compute is_write */
1512 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1513 is_write
, &uc
->uc_sigmask
, puc
);
1516 #elif defined(__mips__)
1518 int cpu_signal_handler(int host_signum
, void *pinfo
,
1521 siginfo_t
*info
= pinfo
;
1522 struct ucontext
*uc
= puc
;
1523 greg_t pc
= uc
->uc_mcontext
.pc
;
1526 /* XXX: compute is_write */
1528 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1529 is_write
, &uc
->uc_sigmask
, puc
);
1532 #elif defined(__hppa__)
1534 int cpu_signal_handler(int host_signum
, void *pinfo
,
1537 struct siginfo
*info
= pinfo
;
1538 struct ucontext
*uc
= puc
;
1542 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1543 /* FIXME: compute is_write */
1545 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1547 &uc
->uc_sigmask
, puc
);
1552 #error host CPU specific signal handler needed
1556 #endif /* !defined(CONFIG_SOFTMMU) */