2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 #if !defined(TARGET_IA64)
28 #if !defined(CONFIG_SOFTMMU)
40 #include <sys/ucontext.h>
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
49 #define env cpu_single_env
52 int tb_invalidated_flag
;
55 //#define DEBUG_SIGNAL
57 void cpu_loop_exit(void)
59 /* NOTE: the register at this point must be saved by hand because
60 longjmp restore them */
62 longjmp(env
->jmp_env
, 1);
65 /* exit the current TB from a signal handler. The host registers are
66 restored in a state compatible with the CPU emulator
68 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
70 #if !defined(CONFIG_SOFTMMU)
72 struct ucontext
*uc
= puc
;
73 #elif defined(__OpenBSD__)
74 struct sigcontext
*uc
= puc
;
80 /* XXX: restore cpu registers saved in host registers */
82 #if !defined(CONFIG_SOFTMMU)
84 /* XXX: use siglongjmp ? */
86 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
87 #elif defined(__OpenBSD__)
88 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
92 env
->exception_index
= -1;
93 longjmp(env
->jmp_env
, 1);
96 /* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
100 unsigned long next_tb
;
101 TranslationBlock
*tb
;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles
> CF_COUNT_MASK
)
106 max_cycles
= CF_COUNT_MASK
;
108 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
110 env
->current_tb
= tb
;
111 /* execute the generated code */
112 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
114 if ((next_tb
& 3) == 2) {
115 /* Restore PC. This may happen if async event occurs before
116 the TB starts executing. */
117 cpu_pc_from_tb(env
, tb
);
119 tb_phys_invalidate(tb
, -1);
123 static TranslationBlock
*tb_find_slow(target_ulong pc
,
124 target_ulong cs_base
,
127 TranslationBlock
*tb
, **ptb1
;
129 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
131 tb_invalidated_flag
= 0;
133 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135 /* find translated block using physical mappings */
136 phys_pc
= get_phys_addr_code(env
, pc
);
137 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
139 h
= tb_phys_hash_func(phys_pc
);
140 ptb1
= &tb_phys_hash
[h
];
146 tb
->page_addr
[0] == phys_page1
&&
147 tb
->cs_base
== cs_base
&&
148 tb
->flags
== flags
) {
149 /* check next page if needed */
150 if (tb
->page_addr
[1] != -1) {
151 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
153 phys_page2
= get_phys_addr_code(env
, virt_page2
);
154 if (tb
->page_addr
[1] == phys_page2
)
160 ptb1
= &tb
->phys_hash_next
;
163 /* if no translated code available, then translate it now */
164 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
167 /* we add the TB in the virtual pc hash table */
168 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
172 static inline TranslationBlock
*tb_find_fast(void)
174 TranslationBlock
*tb
;
175 target_ulong cs_base
, pc
;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
181 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
182 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
183 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
184 tb
->flags
!= flags
)) {
185 tb
= tb_find_slow(pc
, cs_base
, flags
);
190 static CPUDebugExcpHandler
*debug_excp_handler
;
192 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
194 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
196 debug_excp_handler
= handler
;
200 static void cpu_handle_debug_exception(CPUState
*env
)
204 if (!env
->watchpoint_hit
)
205 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
206 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
208 if (debug_excp_handler
)
209 debug_excp_handler(env
);
212 /* main execution loop */
214 int cpu_exec(CPUState
*env1
)
216 #define DECLARE_HOST_REGS 1
217 #include "hostregs_helper.h"
218 int ret
, interrupt_request
;
219 TranslationBlock
*tb
;
221 unsigned long next_tb
;
223 if (cpu_halted(env1
) == EXCP_HALTED
)
226 cpu_single_env
= env1
;
228 /* first we save global registers */
229 #define SAVE_HOST_REGS 1
230 #include "hostregs_helper.h"
234 #if defined(TARGET_I386)
235 /* put eflags in CPU temporary format */
236 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
237 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
238 CC_OP
= CC_OP_EFLAGS
;
239 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
240 #elif defined(TARGET_SPARC)
241 #elif defined(TARGET_M68K)
242 env
->cc_op
= CC_OP_FLAGS
;
243 env
->cc_dest
= env
->sr
& 0xf;
244 env
->cc_x
= (env
->sr
>> 4) & 1;
245 #elif defined(TARGET_ALPHA)
246 #elif defined(TARGET_ARM)
247 #elif defined(TARGET_PPC)
248 #elif defined(TARGET_MIPS)
249 #elif defined(TARGET_SH4)
250 #elif defined(TARGET_CRIS)
251 #elif defined(TARGET_IA64)
254 #error unsupported target CPU
256 env
->exception_index
= -1;
258 /* prepare setjmp context for exception handling */
260 if (setjmp(env
->jmp_env
) == 0) {
261 #if defined(__sparc__) && !defined(HOST_SOLARIS)
263 env
= cpu_single_env
;
264 #define env cpu_single_env
266 env
->current_tb
= NULL
;
267 /* if an exception is pending, we execute it here */
268 if (env
->exception_index
>= 0) {
269 if (env
->exception_index
>= EXCP_INTERRUPT
) {
270 /* exit request from the cpu execution loop */
271 ret
= env
->exception_index
;
272 if (ret
== EXCP_DEBUG
)
273 cpu_handle_debug_exception(env
);
276 #if defined(CONFIG_USER_ONLY)
277 /* if user mode only, we simulate a fake exception
278 which will be handled outside the cpu execution
280 #if defined(TARGET_I386)
281 do_interrupt_user(env
->exception_index
,
282 env
->exception_is_int
,
284 env
->exception_next_eip
);
285 /* successfully delivered */
286 env
->old_exception
= -1;
288 ret
= env
->exception_index
;
291 #if defined(TARGET_I386)
292 /* simulate a real cpu exception. On i386, it can
293 trigger new exceptions, but we do not handle
294 double or triple faults yet. */
295 do_interrupt(env
->exception_index
,
296 env
->exception_is_int
,
298 env
->exception_next_eip
, 0);
299 /* successfully delivered */
300 env
->old_exception
= -1;
301 #elif defined(TARGET_PPC)
303 #elif defined(TARGET_MIPS)
305 #elif defined(TARGET_SPARC)
307 #elif defined(TARGET_ARM)
309 #elif defined(TARGET_SH4)
311 #elif defined(TARGET_ALPHA)
313 #elif defined(TARGET_CRIS)
315 #elif defined(TARGET_M68K)
317 #elif defined(TARGET_IA64)
322 env
->exception_index
= -1;
325 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
327 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
328 ret
= kqemu_cpu_exec(env
);
329 /* put eflags in CPU temporary format */
330 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
331 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
332 CC_OP
= CC_OP_EFLAGS
;
333 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
336 longjmp(env
->jmp_env
, 1);
337 } else if (ret
== 2) {
338 /* softmmu execution needed */
340 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
341 /* hardware interrupt will be executed just after */
343 /* otherwise, we restart */
344 longjmp(env
->jmp_env
, 1);
352 longjmp(env
->jmp_env
, 1);
355 next_tb
= 0; /* force lookup of first TB */
357 interrupt_request
= env
->interrupt_request
;
358 if (unlikely(interrupt_request
)) {
359 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
360 /* Mask out external interrupts for this step. */
361 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
366 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
367 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
368 env
->exception_index
= EXCP_DEBUG
;
371 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
372 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
373 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
374 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
376 env
->exception_index
= EXCP_HLT
;
380 #if defined(TARGET_I386)
381 if (env
->hflags2
& HF2_GIF_MASK
) {
382 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
383 !(env
->hflags
& HF_SMM_MASK
)) {
384 svm_check_intercept(SVM_EXIT_SMI
);
385 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
388 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
389 !(env
->hflags2
& HF2_NMI_MASK
)) {
390 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
391 env
->hflags2
|= HF2_NMI_MASK
;
392 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
394 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
395 (((env
->hflags2
& HF2_VINTR_MASK
) &&
396 (env
->hflags2
& HF2_HIF_MASK
)) ||
397 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
398 (env
->eflags
& IF_MASK
&&
399 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
401 svm_check_intercept(SVM_EXIT_INTR
);
402 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
403 intno
= cpu_get_pic_interrupt(env
);
404 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
405 #if defined(__sparc__) && !defined(HOST_SOLARIS)
407 env
= cpu_single_env
;
408 #define env cpu_single_env
410 do_interrupt(intno
, 0, 0, 0, 1);
411 /* ensure that no TB jump will be modified as
412 the program flow was changed */
414 #if !defined(CONFIG_USER_ONLY)
415 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
416 (env
->eflags
& IF_MASK
) &&
417 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
419 /* FIXME: this should respect TPR */
420 svm_check_intercept(SVM_EXIT_VINTR
);
421 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
422 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
423 do_interrupt(intno
, 0, 0, 0, 1);
424 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
429 #elif defined(TARGET_PPC)
431 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
435 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
436 ppc_hw_interrupt(env
);
437 if (env
->pending_interrupts
== 0)
438 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
441 #elif defined(TARGET_MIPS)
442 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
443 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
444 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
445 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
446 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
447 !(env
->hflags
& MIPS_HFLAG_DM
)) {
449 env
->exception_index
= EXCP_EXT_INTERRUPT
;
454 #elif defined(TARGET_SPARC)
455 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
457 int pil
= env
->interrupt_index
& 15;
458 int type
= env
->interrupt_index
& 0xf0;
460 if (((type
== TT_EXTINT
) &&
461 (pil
== 15 || pil
> env
->psrpil
)) ||
463 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
464 env
->exception_index
= env
->interrupt_index
;
466 env
->interrupt_index
= 0;
467 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
472 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
473 //do_interrupt(0, 0, 0, 0, 0);
474 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
476 #elif defined(TARGET_ARM)
477 if (interrupt_request
& CPU_INTERRUPT_FIQ
478 && !(env
->uncached_cpsr
& CPSR_F
)) {
479 env
->exception_index
= EXCP_FIQ
;
483 /* ARMv7-M interrupt return works by loading a magic value
484 into the PC. On real hardware the load causes the
485 return to occur. The qemu implementation performs the
486 jump normally, then does the exception return when the
487 CPU tries to execute code at the magic address.
488 This will cause the magic PC value to be pushed to
489 the stack if an interrupt occured at the wrong time.
490 We avoid this by disabling interrupts when
491 pc contains a magic address. */
492 if (interrupt_request
& CPU_INTERRUPT_HARD
493 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
494 || !(env
->uncached_cpsr
& CPSR_I
))) {
495 env
->exception_index
= EXCP_IRQ
;
499 #elif defined(TARGET_SH4)
500 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
504 #elif defined(TARGET_ALPHA)
505 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
509 #elif defined(TARGET_CRIS)
510 if (interrupt_request
& CPU_INTERRUPT_HARD
511 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
512 env
->exception_index
= EXCP_IRQ
;
516 if (interrupt_request
& CPU_INTERRUPT_NMI
517 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
518 env
->exception_index
= EXCP_NMI
;
522 #elif defined(TARGET_M68K)
523 if (interrupt_request
& CPU_INTERRUPT_HARD
524 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
525 < env
->pending_level
) {
526 /* Real hardware gets the interrupt vector via an
527 IACK cycle at this point. Current emulated
528 hardware doesn't rely on this, so we
529 provide/save the vector when the interrupt is
531 env
->exception_index
= env
->pending_vector
;
536 /* Don't use the cached interupt_request value,
537 do_interrupt may have updated the EXITTB flag. */
538 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
539 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
540 /* ensure that no TB jump will be modified as
541 the program flow was changed */
545 if (unlikely(env
->exit_request
)) {
546 env
->exit_request
= 0;
547 env
->exception_index
= EXCP_INTERRUPT
;
551 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
552 /* restore flags in standard format */
554 #if defined(TARGET_I386)
555 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
556 log_cpu_state(env
, X86_DUMP_CCOP
);
557 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
558 #elif defined(TARGET_ARM)
559 log_cpu_state(env
, 0);
560 #elif defined(TARGET_SPARC)
561 log_cpu_state(env
, 0);
562 #elif defined(TARGET_PPC)
563 log_cpu_state(env
, 0);
564 #elif defined(TARGET_M68K)
565 cpu_m68k_flush_flags(env
, env
->cc_op
);
566 env
->cc_op
= CC_OP_FLAGS
;
567 env
->sr
= (env
->sr
& 0xffe0)
568 | env
->cc_dest
| (env
->cc_x
<< 4);
569 log_cpu_state(env
, 0);
570 #elif defined(TARGET_MIPS)
571 log_cpu_state(env
, 0);
572 #elif defined(TARGET_SH4)
573 log_cpu_state(env
, 0);
574 #elif defined(TARGET_ALPHA)
575 log_cpu_state(env
, 0);
576 #elif defined(TARGET_CRIS)
577 log_cpu_state(env
, 0);
579 #error unsupported target CPU
585 /* Note: we do it here to avoid a gcc bug on Mac OS X when
586 doing it in tb_find_slow */
587 if (tb_invalidated_flag
) {
588 /* as some TB could have been invalidated because
589 of memory exceptions while generating the code, we
590 must recompute the hash index here */
592 tb_invalidated_flag
= 0;
595 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
596 (long)tb
->tc_ptr
, tb
->pc
,
597 lookup_symbol(tb
->pc
));
599 /* see if we can patch the calling TB. When the TB
600 spans two pages, we cannot safely do a direct
605 (env
->kqemu_enabled
!= 2) &&
607 tb
->page_addr
[1] == -1) {
608 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
611 spin_unlock(&tb_lock
);
612 env
->current_tb
= tb
;
614 /* cpu_interrupt might be called while translating the
615 TB, but before it is linked into a potentially
616 infinite loop and becomes env->current_tb. Avoid
617 starting execution if there is a pending interrupt. */
618 if (unlikely (env
->exit_request
))
619 env
->current_tb
= NULL
;
621 while (env
->current_tb
) {
623 /* execute the generated code */
624 #if defined(__sparc__) && !defined(HOST_SOLARIS)
626 env
= cpu_single_env
;
627 #define env cpu_single_env
629 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
630 env
->current_tb
= NULL
;
631 if ((next_tb
& 3) == 2) {
632 /* Instruction counter expired. */
634 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
636 cpu_pc_from_tb(env
, tb
);
637 insns_left
= env
->icount_decr
.u32
;
638 if (env
->icount_extra
&& insns_left
>= 0) {
639 /* Refill decrementer and continue execution. */
640 env
->icount_extra
+= insns_left
;
641 if (env
->icount_extra
> 0xffff) {
644 insns_left
= env
->icount_extra
;
646 env
->icount_extra
-= insns_left
;
647 env
->icount_decr
.u16
.low
= insns_left
;
649 if (insns_left
> 0) {
650 /* Execute remaining instructions. */
651 cpu_exec_nocache(insns_left
, tb
);
653 env
->exception_index
= EXCP_INTERRUPT
;
659 /* reset soft MMU for next block (it can currently
660 only be set by a memory fault) */
661 #if defined(CONFIG_KQEMU)
662 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
663 if (kqemu_is_ok(env
) &&
664 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
675 #if defined(TARGET_I386)
676 /* restore flags in standard format */
677 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
678 #elif defined(TARGET_ARM)
679 /* XXX: Save/restore host fpu exception state?. */
680 #elif defined(TARGET_SPARC)
681 #elif defined(TARGET_PPC)
682 #elif defined(TARGET_M68K)
683 cpu_m68k_flush_flags(env
, env
->cc_op
);
684 env
->cc_op
= CC_OP_FLAGS
;
685 env
->sr
= (env
->sr
& 0xffe0)
686 | env
->cc_dest
| (env
->cc_x
<< 4);
687 #elif defined(TARGET_MIPS)
688 #elif defined(TARGET_SH4)
689 #elif defined(TARGET_IA64)
690 #elif defined(TARGET_ALPHA)
691 #elif defined(TARGET_CRIS)
694 #error unsupported target CPU
697 /* restore global registers */
698 #include "hostregs_helper.h"
700 /* fail safe : never use cpu_single_env outside cpu_exec() */
701 cpu_single_env
= NULL
;
705 /* must only be called from the generated code as an exception can be
707 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
709 /* XXX: cannot enable it yet because it yields to MMU exception
710 where NIP != read address on PowerPC */
712 target_ulong phys_addr
;
713 phys_addr
= get_phys_addr_code(env
, start
);
714 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
718 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
720 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
722 CPUX86State
*saved_env
;
726 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
728 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
729 (selector
<< 4), 0xffff, 0);
731 helper_load_seg(seg_reg
, selector
);
736 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
738 CPUX86State
*saved_env
;
743 helper_fsave(ptr
, data32
);
748 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
750 CPUX86State
*saved_env
;
755 helper_frstor(ptr
, data32
);
760 #endif /* TARGET_I386 */
762 #if !defined(CONFIG_SOFTMMU)
764 #if defined(TARGET_I386)
766 /* 'pc' is the host PC at which the exception was raised. 'address' is
767 the effective address of the memory exception. 'is_write' is 1 if a
768 write caused the exception and otherwise 0'. 'old_set' is the
769 signal set which should be restored */
770 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
771 int is_write
, sigset_t
*old_set
,
774 TranslationBlock
*tb
;
778 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
779 #if defined(DEBUG_SIGNAL)
780 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
781 pc
, address
, is_write
, *(unsigned long *)old_set
);
783 /* XXX: locking issue */
784 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
788 /* see if it is an MMU fault */
789 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
791 return 0; /* not an MMU fault */
793 return 1; /* the MMU fault was handled without causing real CPU fault */
794 /* now we have a real cpu fault */
797 /* the PC is inside the translated code. It means that we have
798 a virtual CPU fault */
799 cpu_restore_state(tb
, env
, pc
, puc
);
803 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
804 env
->eip
, env
->cr
[2], env
->error_code
);
806 /* we restore the process signal mask as the sigreturn should
807 do it (XXX: use sigsetjmp) */
808 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
809 raise_exception_err(env
->exception_index
, env
->error_code
);
811 /* activate soft MMU for this block */
812 env
->hflags
|= HF_SOFTMMU_MASK
;
813 cpu_resume_from_signal(env
, puc
);
815 /* never comes here */
819 #elif defined(TARGET_ARM)
820 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
821 int is_write
, sigset_t
*old_set
,
824 TranslationBlock
*tb
;
828 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
829 #if defined(DEBUG_SIGNAL)
830 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
831 pc
, address
, is_write
, *(unsigned long *)old_set
);
833 /* XXX: locking issue */
834 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
837 /* see if it is an MMU fault */
838 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
840 return 0; /* not an MMU fault */
842 return 1; /* the MMU fault was handled without causing real CPU fault */
843 /* now we have a real cpu fault */
846 /* the PC is inside the translated code. It means that we have
847 a virtual CPU fault */
848 cpu_restore_state(tb
, env
, pc
, puc
);
850 /* we restore the process signal mask as the sigreturn should
851 do it (XXX: use sigsetjmp) */
852 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
854 /* never comes here */
857 #elif defined(TARGET_SPARC)
858 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
859 int is_write
, sigset_t
*old_set
,
862 TranslationBlock
*tb
;
866 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
867 #if defined(DEBUG_SIGNAL)
868 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
869 pc
, address
, is_write
, *(unsigned long *)old_set
);
871 /* XXX: locking issue */
872 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
875 /* see if it is an MMU fault */
876 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
878 return 0; /* not an MMU fault */
880 return 1; /* the MMU fault was handled without causing real CPU fault */
881 /* now we have a real cpu fault */
884 /* the PC is inside the translated code. It means that we have
885 a virtual CPU fault */
886 cpu_restore_state(tb
, env
, pc
, puc
);
888 /* we restore the process signal mask as the sigreturn should
889 do it (XXX: use sigsetjmp) */
890 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
892 /* never comes here */
895 #elif defined (TARGET_PPC)
896 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
897 int is_write
, sigset_t
*old_set
,
900 TranslationBlock
*tb
;
904 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
905 #if defined(DEBUG_SIGNAL)
906 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
907 pc
, address
, is_write
, *(unsigned long *)old_set
);
909 /* XXX: locking issue */
910 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
914 /* see if it is an MMU fault */
915 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
917 return 0; /* not an MMU fault */
919 return 1; /* the MMU fault was handled without causing real CPU fault */
921 /* now we have a real cpu fault */
924 /* the PC is inside the translated code. It means that we have
925 a virtual CPU fault */
926 cpu_restore_state(tb
, env
, pc
, puc
);
930 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
931 env
->nip
, env
->error_code
, tb
);
933 /* we restore the process signal mask as the sigreturn should
934 do it (XXX: use sigsetjmp) */
935 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
938 /* activate soft MMU for this block */
939 cpu_resume_from_signal(env
, puc
);
941 /* never comes here */
945 #elif defined(TARGET_M68K)
946 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
947 int is_write
, sigset_t
*old_set
,
950 TranslationBlock
*tb
;
954 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
955 #if defined(DEBUG_SIGNAL)
956 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
957 pc
, address
, is_write
, *(unsigned long *)old_set
);
959 /* XXX: locking issue */
960 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
963 /* see if it is an MMU fault */
964 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
966 return 0; /* not an MMU fault */
968 return 1; /* the MMU fault was handled without causing real CPU fault */
969 /* now we have a real cpu fault */
972 /* the PC is inside the translated code. It means that we have
973 a virtual CPU fault */
974 cpu_restore_state(tb
, env
, pc
, puc
);
976 /* we restore the process signal mask as the sigreturn should
977 do it (XXX: use sigsetjmp) */
978 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
980 /* never comes here */
984 #elif defined (TARGET_MIPS)
985 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
986 int is_write
, sigset_t
*old_set
,
989 TranslationBlock
*tb
;
993 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
994 #if defined(DEBUG_SIGNAL)
995 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
996 pc
, address
, is_write
, *(unsigned long *)old_set
);
998 /* XXX: locking issue */
999 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1003 /* see if it is an MMU fault */
1004 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1006 return 0; /* not an MMU fault */
1008 return 1; /* the MMU fault was handled without causing real CPU fault */
1010 /* now we have a real cpu fault */
1011 tb
= tb_find_pc(pc
);
1013 /* the PC is inside the translated code. It means that we have
1014 a virtual CPU fault */
1015 cpu_restore_state(tb
, env
, pc
, puc
);
1019 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1020 env
->PC
, env
->error_code
, tb
);
1022 /* we restore the process signal mask as the sigreturn should
1023 do it (XXX: use sigsetjmp) */
1024 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1027 /* activate soft MMU for this block */
1028 cpu_resume_from_signal(env
, puc
);
1030 /* never comes here */
1034 #elif defined (TARGET_SH4)
1035 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1036 int is_write
, sigset_t
*old_set
,
1039 TranslationBlock
*tb
;
1043 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1044 #if defined(DEBUG_SIGNAL)
1045 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1046 pc
, address
, is_write
, *(unsigned long *)old_set
);
1048 /* XXX: locking issue */
1049 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1053 /* see if it is an MMU fault */
1054 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1056 return 0; /* not an MMU fault */
1058 return 1; /* the MMU fault was handled without causing real CPU fault */
1060 /* now we have a real cpu fault */
1061 tb
= tb_find_pc(pc
);
1063 /* the PC is inside the translated code. It means that we have
1064 a virtual CPU fault */
1065 cpu_restore_state(tb
, env
, pc
, puc
);
1068 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1069 env
->nip
, env
->error_code
, tb
);
1071 /* we restore the process signal mask as the sigreturn should
1072 do it (XXX: use sigsetjmp) */
1073 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1075 /* never comes here */
1079 #elif defined (TARGET_ALPHA)
1080 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1081 int is_write
, sigset_t
*old_set
,
1084 TranslationBlock
*tb
;
1088 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1089 #if defined(DEBUG_SIGNAL)
1090 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1091 pc
, address
, is_write
, *(unsigned long *)old_set
);
1093 /* XXX: locking issue */
1094 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1098 /* see if it is an MMU fault */
1099 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1101 return 0; /* not an MMU fault */
1103 return 1; /* the MMU fault was handled without causing real CPU fault */
1105 /* now we have a real cpu fault */
1106 tb
= tb_find_pc(pc
);
1108 /* the PC is inside the translated code. It means that we have
1109 a virtual CPU fault */
1110 cpu_restore_state(tb
, env
, pc
, puc
);
1113 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1114 env
->nip
, env
->error_code
, tb
);
1116 /* we restore the process signal mask as the sigreturn should
1117 do it (XXX: use sigsetjmp) */
1118 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1120 /* never comes here */
1123 #elif defined (TARGET_CRIS)
1124 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1125 int is_write
, sigset_t
*old_set
,
1128 TranslationBlock
*tb
;
1132 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1133 #if defined(DEBUG_SIGNAL)
1134 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1135 pc
, address
, is_write
, *(unsigned long *)old_set
);
1137 /* XXX: locking issue */
1138 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1142 /* see if it is an MMU fault */
1143 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1145 return 0; /* not an MMU fault */
1147 return 1; /* the MMU fault was handled without causing real CPU fault */
1149 /* now we have a real cpu fault */
1150 tb
= tb_find_pc(pc
);
1152 /* the PC is inside the translated code. It means that we have
1153 a virtual CPU fault */
1154 cpu_restore_state(tb
, env
, pc
, puc
);
1156 /* we restore the process signal mask as the sigreturn should
1157 do it (XXX: use sigsetjmp) */
1158 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1160 /* never comes here */
1165 #error unsupported target CPU
1168 #if defined(__i386__)
1170 #if defined(__APPLE__)
1171 # include <sys/ucontext.h>
1173 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1174 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1175 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1176 # define MASK_sig(context) ((context)->uc_sigmask)
1177 #elif defined(__OpenBSD__)
1178 # define EIP_sig(context) ((context)->sc_eip)
1179 # define TRAP_sig(context) ((context)->sc_trapno)
1180 # define ERROR_sig(context) ((context)->sc_err)
1181 # define MASK_sig(context) ((context)->sc_mask)
1183 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1184 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1185 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1186 # define MASK_sig(context) ((context)->uc_sigmask)
1189 int cpu_signal_handler(int host_signum
, void *pinfo
,
1192 siginfo_t
*info
= pinfo
;
1193 #if defined(__OpenBSD__)
1194 struct sigcontext
*uc
= puc
;
1196 struct ucontext
*uc
= puc
;
1205 #define REG_TRAPNO TRAPNO
1208 trapno
= TRAP_sig(uc
);
1209 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1211 (ERROR_sig(uc
) >> 1) & 1 : 0,
1212 &MASK_sig(uc
), puc
);
1215 #elif defined(__x86_64__)
1218 #define PC_sig(context) _UC_MACHINE_PC(context)
1219 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1220 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1221 #define MASK_sig(context) ((context)->uc_sigmask)
1222 #elif defined(__OpenBSD__)
1223 #define PC_sig(context) ((context)->sc_rip)
1224 #define TRAP_sig(context) ((context)->sc_trapno)
1225 #define ERROR_sig(context) ((context)->sc_err)
1226 #define MASK_sig(context) ((context)->sc_mask)
1228 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1229 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1230 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1231 #define MASK_sig(context) ((context)->uc_sigmask)
1234 int cpu_signal_handler(int host_signum
, void *pinfo
,
1237 siginfo_t
*info
= pinfo
;
1240 ucontext_t
*uc
= puc
;
1241 #elif defined(__OpenBSD__)
1242 struct sigcontext
*uc
= puc
;
1244 struct ucontext
*uc
= puc
;
1248 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1249 TRAP_sig(uc
) == 0xe ?
1250 (ERROR_sig(uc
) >> 1) & 1 : 0,
1251 &MASK_sig(uc
), puc
);
1254 #elif defined(_ARCH_PPC)
1256 /***********************************************************************
1257 * signal context platform-specific definitions
1261 /* All Registers access - only for local access */
1262 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1263 /* Gpr Registers access */
1264 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1265 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1266 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1267 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1268 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1269 # define LR_sig(context) REG_sig(link, context) /* Link register */
1270 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1271 /* Float Registers access */
1272 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1273 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1274 /* Exception Registers access */
1275 # define DAR_sig(context) REG_sig(dar, context)
1276 # define DSISR_sig(context) REG_sig(dsisr, context)
1277 # define TRAP_sig(context) REG_sig(trap, context)
1281 # include <sys/ucontext.h>
1282 typedef struct ucontext SIGCONTEXT
;
1283 /* All Registers access - only for local access */
1284 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1285 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1286 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1287 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1288 /* Gpr Registers access */
1289 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1290 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1291 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1292 # define CTR_sig(context) REG_sig(ctr, context)
1293 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1294 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1295 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1296 /* Float Registers access */
1297 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1298 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1299 /* Exception Registers access */
1300 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1301 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1302 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1303 #endif /* __APPLE__ */
1305 int cpu_signal_handler(int host_signum
, void *pinfo
,
1308 siginfo_t
*info
= pinfo
;
1309 struct ucontext
*uc
= puc
;
1317 if (DSISR_sig(uc
) & 0x00800000)
1320 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1323 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1324 is_write
, &uc
->uc_sigmask
, puc
);
1327 #elif defined(__alpha__)
1329 int cpu_signal_handler(int host_signum
, void *pinfo
,
1332 siginfo_t
*info
= pinfo
;
1333 struct ucontext
*uc
= puc
;
1334 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1335 uint32_t insn
= *pc
;
1338 /* XXX: need kernel patch to get write flag faster */
1339 switch (insn
>> 26) {
1354 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1355 is_write
, &uc
->uc_sigmask
, puc
);
1357 #elif defined(__sparc__)
1359 int cpu_signal_handler(int host_signum
, void *pinfo
,
1362 siginfo_t
*info
= pinfo
;
1365 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1366 uint32_t *regs
= (uint32_t *)(info
+ 1);
1367 void *sigmask
= (regs
+ 20);
1368 /* XXX: is there a standard glibc define ? */
1369 unsigned long pc
= regs
[1];
1372 struct sigcontext
*sc
= puc
;
1373 unsigned long pc
= sc
->sigc_regs
.tpc
;
1374 void *sigmask
= (void *)sc
->sigc_mask
;
1375 #elif defined(__OpenBSD__)
1376 struct sigcontext
*uc
= puc
;
1377 unsigned long pc
= uc
->sc_pc
;
1378 void *sigmask
= (void *)(long)uc
->sc_mask
;
1382 /* XXX: need kernel patch to get write flag faster */
1384 insn
= *(uint32_t *)pc
;
1385 if ((insn
>> 30) == 3) {
1386 switch((insn
>> 19) & 0x3f) {
1398 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1399 is_write
, sigmask
, NULL
);
1402 #elif defined(__arm__)
1404 int cpu_signal_handler(int host_signum
, void *pinfo
,
1407 siginfo_t
*info
= pinfo
;
1408 struct ucontext
*uc
= puc
;
1412 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1413 pc
= uc
->uc_mcontext
.gregs
[R15
];
1415 pc
= uc
->uc_mcontext
.arm_pc
;
1417 /* XXX: compute is_write */
1419 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1421 &uc
->uc_sigmask
, puc
);
1424 #elif defined(__mc68000)
1426 int cpu_signal_handler(int host_signum
, void *pinfo
,
1429 siginfo_t
*info
= pinfo
;
1430 struct ucontext
*uc
= puc
;
1434 pc
= uc
->uc_mcontext
.gregs
[16];
1435 /* XXX: compute is_write */
1437 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1439 &uc
->uc_sigmask
, puc
);
1442 #elif defined(__ia64)
1445 /* This ought to be in <bits/siginfo.h>... */
1446 # define __ISR_VALID 1
1449 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1451 siginfo_t
*info
= pinfo
;
1452 struct ucontext
*uc
= puc
;
1456 ip
= uc
->uc_mcontext
.sc_ip
;
1457 switch (host_signum
) {
1463 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1464 /* ISR.W (write-access) is bit 33: */
1465 is_write
= (info
->si_isr
>> 33) & 1;
1471 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1473 &uc
->uc_sigmask
, puc
);
1476 #elif defined(__s390__)
1478 int cpu_signal_handler(int host_signum
, void *pinfo
,
1481 siginfo_t
*info
= pinfo
;
1482 struct ucontext
*uc
= puc
;
1486 pc
= uc
->uc_mcontext
.psw
.addr
;
1487 /* XXX: compute is_write */
1489 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1490 is_write
, &uc
->uc_sigmask
, puc
);
1493 #elif defined(__mips__)
1495 int cpu_signal_handler(int host_signum
, void *pinfo
,
1498 siginfo_t
*info
= pinfo
;
1499 struct ucontext
*uc
= puc
;
1500 greg_t pc
= uc
->uc_mcontext
.pc
;
1503 /* XXX: compute is_write */
1505 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1506 is_write
, &uc
->uc_sigmask
, puc
);
1509 #elif defined(__hppa__)
1511 int cpu_signal_handler(int host_signum
, void *pinfo
,
1514 struct siginfo
*info
= pinfo
;
1515 struct ucontext
*uc
= puc
;
1519 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1520 /* FIXME: compute is_write */
1522 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1524 &uc
->uc_sigmask
, puc
);
1529 #error host CPU specific signal handler needed
1533 #endif /* !defined(CONFIG_SOFTMMU) */