2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag
;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env
->jmp_env
, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
70 #elif defined(__OpenBSD__)
71 struct sigcontext
*uc
= puc
;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
89 env
->exception_index
= -1;
90 longjmp(env
->jmp_env
, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
97 unsigned long next_tb
;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles
> CF_COUNT_MASK
)
103 max_cycles
= CF_COUNT_MASK
;
105 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
107 env
->current_tb
= tb
;
108 /* execute the generated code */
109 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
111 if ((next_tb
& 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env
, tb
);
116 tb_phys_invalidate(tb
, -1);
120 static TranslationBlock
*tb_find_slow(target_ulong pc
,
121 target_ulong cs_base
,
124 TranslationBlock
*tb
, **ptb1
;
126 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
128 tb_invalidated_flag
= 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc
= get_phys_addr_code(env
, pc
);
134 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
136 h
= tb_phys_hash_func(phys_pc
);
137 ptb1
= &tb_phys_hash
[h
];
143 tb
->page_addr
[0] == phys_page1
&&
144 tb
->cs_base
== cs_base
&&
145 tb
->flags
== flags
) {
146 /* check next page if needed */
147 if (tb
->page_addr
[1] != -1) {
148 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
150 phys_page2
= get_phys_addr_code(env
, virt_page2
);
151 if (tb
->page_addr
[1] == phys_page2
)
157 ptb1
= &tb
->phys_hash_next
;
160 /* if no translated code available, then translate it now */
161 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
164 /* we add the TB in the virtual pc hash table */
165 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
169 static inline TranslationBlock
*tb_find_fast(void)
171 TranslationBlock
*tb
;
172 target_ulong cs_base
, pc
;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
178 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
179 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
180 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
181 tb
->flags
!= flags
)) {
182 tb
= tb_find_slow(pc
, cs_base
, flags
);
187 static CPUDebugExcpHandler
*debug_excp_handler
;
189 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
191 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
193 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUState
*env
)
201 if (!env
->watchpoint_hit
)
202 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
203 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
205 if (debug_excp_handler
)
206 debug_excp_handler(env
);
209 /* main execution loop */
211 int cpu_exec(CPUState
*env1
)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret
, interrupt_request
;
216 TranslationBlock
*tb
;
218 unsigned long next_tb
;
220 if (cpu_halted(env1
) == EXCP_HALTED
)
223 cpu_single_env
= env1
;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
234 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
235 CC_OP
= CC_OP_EFLAGS
;
236 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env
->cc_op
= CC_OP_FLAGS
;
240 env
->cc_dest
= env
->sr
& 0xf;
241 env
->cc_x
= (env
->sr
>> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_MIPS)
246 #elif defined(TARGET_SH4)
247 #elif defined(TARGET_CRIS)
250 #error unsupported target CPU
252 env
->exception_index
= -1;
254 /* prepare setjmp context for exception handling */
256 if (setjmp(env
->jmp_env
) == 0) {
257 env
->current_tb
= NULL
;
258 /* if an exception is pending, we execute it here */
259 if (env
->exception_index
>= 0) {
260 if (env
->exception_index
>= EXCP_INTERRUPT
) {
261 /* exit request from the cpu execution loop */
262 ret
= env
->exception_index
;
263 if (ret
== EXCP_DEBUG
)
264 cpu_handle_debug_exception(env
);
266 } else if (env
->user_mode_only
) {
267 /* if user mode only, we simulate a fake exception
268 which will be handled outside the cpu execution
270 #if defined(TARGET_I386)
271 do_interrupt_user(env
->exception_index
,
272 env
->exception_is_int
,
274 env
->exception_next_eip
);
275 /* successfully delivered */
276 env
->old_exception
= -1;
278 ret
= env
->exception_index
;
281 #if defined(TARGET_I386)
282 /* simulate a real cpu exception. On i386, it can
283 trigger new exceptions, but we do not handle
284 double or triple faults yet. */
285 do_interrupt(env
->exception_index
,
286 env
->exception_is_int
,
288 env
->exception_next_eip
, 0);
289 /* successfully delivered */
290 env
->old_exception
= -1;
291 #elif defined(TARGET_PPC)
293 #elif defined(TARGET_MIPS)
295 #elif defined(TARGET_SPARC)
297 #elif defined(TARGET_ARM)
299 #elif defined(TARGET_SH4)
301 #elif defined(TARGET_ALPHA)
303 #elif defined(TARGET_CRIS)
305 #elif defined(TARGET_M68K)
309 env
->exception_index
= -1;
312 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
314 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
315 ret
= kqemu_cpu_exec(env
);
316 /* put eflags in CPU temporary format */
317 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
318 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
319 CC_OP
= CC_OP_EFLAGS
;
320 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
323 longjmp(env
->jmp_env
, 1);
324 } else if (ret
== 2) {
325 /* softmmu execution needed */
327 if (env
->interrupt_request
!= 0) {
328 /* hardware interrupt will be executed just after */
330 /* otherwise, we restart */
331 longjmp(env
->jmp_env
, 1);
339 longjmp(env
->jmp_env
, 1);
342 next_tb
= 0; /* force lookup of first TB */
344 interrupt_request
= env
->interrupt_request
;
345 if (unlikely(interrupt_request
)) {
346 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
347 /* Mask out external interrupts for this step. */
348 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
353 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
354 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
355 env
->exception_index
= EXCP_DEBUG
;
358 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
359 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
360 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
361 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
363 env
->exception_index
= EXCP_HLT
;
367 #if defined(TARGET_I386)
368 if (env
->hflags2
& HF2_GIF_MASK
) {
369 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
370 !(env
->hflags
& HF_SMM_MASK
)) {
371 svm_check_intercept(SVM_EXIT_SMI
);
372 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
375 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
376 !(env
->hflags2
& HF2_NMI_MASK
)) {
377 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
378 env
->hflags2
|= HF2_NMI_MASK
;
379 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
381 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
382 (((env
->hflags2
& HF2_VINTR_MASK
) &&
383 (env
->hflags2
& HF2_HIF_MASK
)) ||
384 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
385 (env
->eflags
& IF_MASK
&&
386 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
388 svm_check_intercept(SVM_EXIT_INTR
);
389 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
390 intno
= cpu_get_pic_interrupt(env
);
391 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
392 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
394 do_interrupt(intno
, 0, 0, 0, 1);
395 /* ensure that no TB jump will be modified as
396 the program flow was changed */
398 #if !defined(CONFIG_USER_ONLY)
399 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
400 (env
->eflags
& IF_MASK
) &&
401 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
403 /* FIXME: this should respect TPR */
404 svm_check_intercept(SVM_EXIT_VINTR
);
405 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
406 if (loglevel
& CPU_LOG_TB_IN_ASM
)
407 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
408 do_interrupt(intno
, 0, 0, 0, 1);
409 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
414 #elif defined(TARGET_PPC)
416 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
420 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
421 ppc_hw_interrupt(env
);
422 if (env
->pending_interrupts
== 0)
423 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
426 #elif defined(TARGET_MIPS)
427 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
428 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
429 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
430 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
431 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
432 !(env
->hflags
& MIPS_HFLAG_DM
)) {
434 env
->exception_index
= EXCP_EXT_INTERRUPT
;
439 #elif defined(TARGET_SPARC)
440 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
442 int pil
= env
->interrupt_index
& 15;
443 int type
= env
->interrupt_index
& 0xf0;
445 if (((type
== TT_EXTINT
) &&
446 (pil
== 15 || pil
> env
->psrpil
)) ||
448 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
449 env
->exception_index
= env
->interrupt_index
;
451 env
->interrupt_index
= 0;
452 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
457 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
458 //do_interrupt(0, 0, 0, 0, 0);
459 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
461 #elif defined(TARGET_ARM)
462 if (interrupt_request
& CPU_INTERRUPT_FIQ
463 && !(env
->uncached_cpsr
& CPSR_F
)) {
464 env
->exception_index
= EXCP_FIQ
;
468 /* ARMv7-M interrupt return works by loading a magic value
469 into the PC. On real hardware the load causes the
470 return to occur. The qemu implementation performs the
471 jump normally, then does the exception return when the
472 CPU tries to execute code at the magic address.
473 This will cause the magic PC value to be pushed to
474 the stack if an interrupt occured at the wrong time.
475 We avoid this by disabling interrupts when
476 pc contains a magic address. */
477 if (interrupt_request
& CPU_INTERRUPT_HARD
478 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
479 || !(env
->uncached_cpsr
& CPSR_I
))) {
480 env
->exception_index
= EXCP_IRQ
;
484 #elif defined(TARGET_SH4)
485 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
489 #elif defined(TARGET_ALPHA)
490 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
494 #elif defined(TARGET_CRIS)
495 if (interrupt_request
& CPU_INTERRUPT_HARD
496 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
497 env
->exception_index
= EXCP_IRQ
;
501 if (interrupt_request
& CPU_INTERRUPT_NMI
502 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
503 env
->exception_index
= EXCP_NMI
;
507 #elif defined(TARGET_M68K)
508 if (interrupt_request
& CPU_INTERRUPT_HARD
509 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
510 < env
->pending_level
) {
511 /* Real hardware gets the interrupt vector via an
512 IACK cycle at this point. Current emulated
513 hardware doesn't rely on this, so we
514 provide/save the vector when the interrupt is
516 env
->exception_index
= env
->pending_vector
;
521 /* Don't use the cached interupt_request value,
522 do_interrupt may have updated the EXITTB flag. */
523 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
524 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
525 /* ensure that no TB jump will be modified as
526 the program flow was changed */
529 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
530 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
531 env
->exception_index
= EXCP_INTERRUPT
;
536 if ((loglevel
& CPU_LOG_TB_CPU
)) {
537 /* restore flags in standard format */
539 #if defined(TARGET_I386)
540 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
541 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
542 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
543 #elif defined(TARGET_ARM)
544 cpu_dump_state(env
, logfile
, fprintf
, 0);
545 #elif defined(TARGET_SPARC)
546 cpu_dump_state(env
, logfile
, fprintf
, 0);
547 #elif defined(TARGET_PPC)
548 cpu_dump_state(env
, logfile
, fprintf
, 0);
549 #elif defined(TARGET_M68K)
550 cpu_m68k_flush_flags(env
, env
->cc_op
);
551 env
->cc_op
= CC_OP_FLAGS
;
552 env
->sr
= (env
->sr
& 0xffe0)
553 | env
->cc_dest
| (env
->cc_x
<< 4);
554 cpu_dump_state(env
, logfile
, fprintf
, 0);
555 #elif defined(TARGET_MIPS)
556 cpu_dump_state(env
, logfile
, fprintf
, 0);
557 #elif defined(TARGET_SH4)
558 cpu_dump_state(env
, logfile
, fprintf
, 0);
559 #elif defined(TARGET_ALPHA)
560 cpu_dump_state(env
, logfile
, fprintf
, 0);
561 #elif defined(TARGET_CRIS)
562 cpu_dump_state(env
, logfile
, fprintf
, 0);
564 #error unsupported target CPU
570 /* Note: we do it here to avoid a gcc bug on Mac OS X when
571 doing it in tb_find_slow */
572 if (tb_invalidated_flag
) {
573 /* as some TB could have been invalidated because
574 of memory exceptions while generating the code, we
575 must recompute the hash index here */
577 tb_invalidated_flag
= 0;
580 if ((loglevel
& CPU_LOG_EXEC
)) {
581 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
582 (long)tb
->tc_ptr
, tb
->pc
,
583 lookup_symbol(tb
->pc
));
586 /* see if we can patch the calling TB. When the TB
587 spans two pages, we cannot safely do a direct
592 (env
->kqemu_enabled
!= 2) &&
594 tb
->page_addr
[1] == -1) {
595 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
598 spin_unlock(&tb_lock
);
599 env
->current_tb
= tb
;
601 /* cpu_interrupt might be called while translating the
602 TB, but before it is linked into a potentially
603 infinite loop and becomes env->current_tb. Avoid
604 starting execution if there is a pending interrupt. */
605 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
606 env
->current_tb
= NULL
;
608 while (env
->current_tb
) {
610 /* execute the generated code */
611 #if defined(__sparc__) && !defined(HOST_SOLARIS)
613 env
= cpu_single_env
;
614 #define env cpu_single_env
616 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
617 env
->current_tb
= NULL
;
618 if ((next_tb
& 3) == 2) {
619 /* Instruction counter expired. */
621 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
623 cpu_pc_from_tb(env
, tb
);
624 insns_left
= env
->icount_decr
.u32
;
625 if (env
->icount_extra
&& insns_left
>= 0) {
626 /* Refill decrementer and continue execution. */
627 env
->icount_extra
+= insns_left
;
628 if (env
->icount_extra
> 0xffff) {
631 insns_left
= env
->icount_extra
;
633 env
->icount_extra
-= insns_left
;
634 env
->icount_decr
.u16
.low
= insns_left
;
636 if (insns_left
> 0) {
637 /* Execute remaining instructions. */
638 cpu_exec_nocache(insns_left
, tb
);
640 env
->exception_index
= EXCP_INTERRUPT
;
646 /* reset soft MMU for next block (it can currently
647 only be set by a memory fault) */
648 #if defined(USE_KQEMU)
649 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
650 if (kqemu_is_ok(env
) &&
651 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
662 #if defined(TARGET_I386)
663 /* restore flags in standard format */
664 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
665 #elif defined(TARGET_ARM)
666 /* XXX: Save/restore host fpu exception state?. */
667 #elif defined(TARGET_SPARC)
668 #elif defined(TARGET_PPC)
669 #elif defined(TARGET_M68K)
670 cpu_m68k_flush_flags(env
, env
->cc_op
);
671 env
->cc_op
= CC_OP_FLAGS
;
672 env
->sr
= (env
->sr
& 0xffe0)
673 | env
->cc_dest
| (env
->cc_x
<< 4);
674 #elif defined(TARGET_MIPS)
675 #elif defined(TARGET_SH4)
676 #elif defined(TARGET_ALPHA)
677 #elif defined(TARGET_CRIS)
680 #error unsupported target CPU
683 /* restore global registers */
684 #include "hostregs_helper.h"
686 /* fail safe : never use cpu_single_env outside cpu_exec() */
687 cpu_single_env
= NULL
;
691 /* must only be called from the generated code as an exception can be
693 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
695 /* XXX: cannot enable it yet because it yields to MMU exception
696 where NIP != read address on PowerPC */
698 target_ulong phys_addr
;
699 phys_addr
= get_phys_addr_code(env
, start
);
700 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
704 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
706 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
708 CPUX86State
*saved_env
;
712 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
714 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
715 (selector
<< 4), 0xffff, 0);
717 helper_load_seg(seg_reg
, selector
);
722 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
724 CPUX86State
*saved_env
;
729 helper_fsave(ptr
, data32
);
734 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
736 CPUX86State
*saved_env
;
741 helper_frstor(ptr
, data32
);
746 #endif /* TARGET_I386 */
748 #if !defined(CONFIG_SOFTMMU)
750 #if defined(TARGET_I386)
752 /* 'pc' is the host PC at which the exception was raised. 'address' is
753 the effective address of the memory exception. 'is_write' is 1 if a
754 write caused the exception and otherwise 0'. 'old_set' is the
755 signal set which should be restored */
756 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
757 int is_write
, sigset_t
*old_set
,
760 TranslationBlock
*tb
;
764 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
765 #if defined(DEBUG_SIGNAL)
766 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
767 pc
, address
, is_write
, *(unsigned long *)old_set
);
769 /* XXX: locking issue */
770 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
774 /* see if it is an MMU fault */
775 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
777 return 0; /* not an MMU fault */
779 return 1; /* the MMU fault was handled without causing real CPU fault */
780 /* now we have a real cpu fault */
783 /* the PC is inside the translated code. It means that we have
784 a virtual CPU fault */
785 cpu_restore_state(tb
, env
, pc
, puc
);
789 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
790 env
->eip
, env
->cr
[2], env
->error_code
);
792 /* we restore the process signal mask as the sigreturn should
793 do it (XXX: use sigsetjmp) */
794 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
795 raise_exception_err(env
->exception_index
, env
->error_code
);
797 /* activate soft MMU for this block */
798 env
->hflags
|= HF_SOFTMMU_MASK
;
799 cpu_resume_from_signal(env
, puc
);
801 /* never comes here */
805 #elif defined(TARGET_ARM)
806 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
807 int is_write
, sigset_t
*old_set
,
810 TranslationBlock
*tb
;
814 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
815 #if defined(DEBUG_SIGNAL)
816 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
817 pc
, address
, is_write
, *(unsigned long *)old_set
);
819 /* XXX: locking issue */
820 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
823 /* see if it is an MMU fault */
824 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
826 return 0; /* not an MMU fault */
828 return 1; /* the MMU fault was handled without causing real CPU fault */
829 /* now we have a real cpu fault */
832 /* the PC is inside the translated code. It means that we have
833 a virtual CPU fault */
834 cpu_restore_state(tb
, env
, pc
, puc
);
836 /* we restore the process signal mask as the sigreturn should
837 do it (XXX: use sigsetjmp) */
838 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
840 /* never comes here */
843 #elif defined(TARGET_SPARC)
844 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
845 int is_write
, sigset_t
*old_set
,
848 TranslationBlock
*tb
;
852 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
853 #if defined(DEBUG_SIGNAL)
854 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
855 pc
, address
, is_write
, *(unsigned long *)old_set
);
857 /* XXX: locking issue */
858 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
861 /* see if it is an MMU fault */
862 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
864 return 0; /* not an MMU fault */
866 return 1; /* the MMU fault was handled without causing real CPU fault */
867 /* now we have a real cpu fault */
870 /* the PC is inside the translated code. It means that we have
871 a virtual CPU fault */
872 cpu_restore_state(tb
, env
, pc
, puc
);
874 /* we restore the process signal mask as the sigreturn should
875 do it (XXX: use sigsetjmp) */
876 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
878 /* never comes here */
881 #elif defined (TARGET_PPC)
882 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
883 int is_write
, sigset_t
*old_set
,
886 TranslationBlock
*tb
;
890 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
891 #if defined(DEBUG_SIGNAL)
892 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
893 pc
, address
, is_write
, *(unsigned long *)old_set
);
895 /* XXX: locking issue */
896 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
900 /* see if it is an MMU fault */
901 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
903 return 0; /* not an MMU fault */
905 return 1; /* the MMU fault was handled without causing real CPU fault */
907 /* now we have a real cpu fault */
910 /* the PC is inside the translated code. It means that we have
911 a virtual CPU fault */
912 cpu_restore_state(tb
, env
, pc
, puc
);
916 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
917 env
->nip
, env
->error_code
, tb
);
919 /* we restore the process signal mask as the sigreturn should
920 do it (XXX: use sigsetjmp) */
921 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
924 /* activate soft MMU for this block */
925 cpu_resume_from_signal(env
, puc
);
927 /* never comes here */
931 #elif defined(TARGET_M68K)
932 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
933 int is_write
, sigset_t
*old_set
,
936 TranslationBlock
*tb
;
940 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
941 #if defined(DEBUG_SIGNAL)
942 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
943 pc
, address
, is_write
, *(unsigned long *)old_set
);
945 /* XXX: locking issue */
946 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
949 /* see if it is an MMU fault */
950 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
952 return 0; /* not an MMU fault */
954 return 1; /* the MMU fault was handled without causing real CPU fault */
955 /* now we have a real cpu fault */
958 /* the PC is inside the translated code. It means that we have
959 a virtual CPU fault */
960 cpu_restore_state(tb
, env
, pc
, puc
);
962 /* we restore the process signal mask as the sigreturn should
963 do it (XXX: use sigsetjmp) */
964 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
966 /* never comes here */
970 #elif defined (TARGET_MIPS)
971 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
972 int is_write
, sigset_t
*old_set
,
975 TranslationBlock
*tb
;
979 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
980 #if defined(DEBUG_SIGNAL)
981 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
982 pc
, address
, is_write
, *(unsigned long *)old_set
);
984 /* XXX: locking issue */
985 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
989 /* see if it is an MMU fault */
990 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
992 return 0; /* not an MMU fault */
994 return 1; /* the MMU fault was handled without causing real CPU fault */
996 /* now we have a real cpu fault */
999 /* the PC is inside the translated code. It means that we have
1000 a virtual CPU fault */
1001 cpu_restore_state(tb
, env
, pc
, puc
);
1005 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1006 env
->PC
, env
->error_code
, tb
);
1008 /* we restore the process signal mask as the sigreturn should
1009 do it (XXX: use sigsetjmp) */
1010 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1013 /* activate soft MMU for this block */
1014 cpu_resume_from_signal(env
, puc
);
1016 /* never comes here */
1020 #elif defined (TARGET_SH4)
1021 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1022 int is_write
, sigset_t
*old_set
,
1025 TranslationBlock
*tb
;
1029 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1030 #if defined(DEBUG_SIGNAL)
1031 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1032 pc
, address
, is_write
, *(unsigned long *)old_set
);
1034 /* XXX: locking issue */
1035 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1039 /* see if it is an MMU fault */
1040 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1042 return 0; /* not an MMU fault */
1044 return 1; /* the MMU fault was handled without causing real CPU fault */
1046 /* now we have a real cpu fault */
1047 tb
= tb_find_pc(pc
);
1049 /* the PC is inside the translated code. It means that we have
1050 a virtual CPU fault */
1051 cpu_restore_state(tb
, env
, pc
, puc
);
1054 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1055 env
->nip
, env
->error_code
, tb
);
1057 /* we restore the process signal mask as the sigreturn should
1058 do it (XXX: use sigsetjmp) */
1059 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1061 /* never comes here */
1065 #elif defined (TARGET_ALPHA)
1066 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1067 int is_write
, sigset_t
*old_set
,
1070 TranslationBlock
*tb
;
1074 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1075 #if defined(DEBUG_SIGNAL)
1076 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1077 pc
, address
, is_write
, *(unsigned long *)old_set
);
1079 /* XXX: locking issue */
1080 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1084 /* see if it is an MMU fault */
1085 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1087 return 0; /* not an MMU fault */
1089 return 1; /* the MMU fault was handled without causing real CPU fault */
1091 /* now we have a real cpu fault */
1092 tb
= tb_find_pc(pc
);
1094 /* the PC is inside the translated code. It means that we have
1095 a virtual CPU fault */
1096 cpu_restore_state(tb
, env
, pc
, puc
);
1099 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1100 env
->nip
, env
->error_code
, tb
);
1102 /* we restore the process signal mask as the sigreturn should
1103 do it (XXX: use sigsetjmp) */
1104 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1106 /* never comes here */
1109 #elif defined (TARGET_CRIS)
1110 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1111 int is_write
, sigset_t
*old_set
,
1114 TranslationBlock
*tb
;
1118 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1119 #if defined(DEBUG_SIGNAL)
1120 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1121 pc
, address
, is_write
, *(unsigned long *)old_set
);
1123 /* XXX: locking issue */
1124 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1128 /* see if it is an MMU fault */
1129 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1131 return 0; /* not an MMU fault */
1133 return 1; /* the MMU fault was handled without causing real CPU fault */
1135 /* now we have a real cpu fault */
1136 tb
= tb_find_pc(pc
);
1138 /* the PC is inside the translated code. It means that we have
1139 a virtual CPU fault */
1140 cpu_restore_state(tb
, env
, pc
, puc
);
1142 /* we restore the process signal mask as the sigreturn should
1143 do it (XXX: use sigsetjmp) */
1144 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1146 /* never comes here */
1151 #error unsupported target CPU
1154 #if defined(__i386__)
1156 #if defined(__APPLE__)
1157 # include <sys/ucontext.h>
1159 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1160 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1161 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1163 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1164 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1165 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1168 int cpu_signal_handler(int host_signum
, void *pinfo
,
1171 siginfo_t
*info
= pinfo
;
1172 struct ucontext
*uc
= puc
;
1180 #define REG_TRAPNO TRAPNO
1183 trapno
= TRAP_sig(uc
);
1184 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1186 (ERROR_sig(uc
) >> 1) & 1 : 0,
1187 &uc
->uc_sigmask
, puc
);
1190 #elif defined(__x86_64__)
1193 #define REG_ERR _REG_ERR
1194 #define REG_TRAPNO _REG_TRAPNO
1196 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1197 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1199 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1200 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1203 int cpu_signal_handler(int host_signum
, void *pinfo
,
1206 siginfo_t
*info
= pinfo
;
1209 ucontext_t
*uc
= puc
;
1211 struct ucontext
*uc
= puc
;
1214 pc
= QEMU_UC_MACHINE_PC(uc
);
1215 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1216 QEMU_UC_MCONTEXT_GREGS(uc
, REG_TRAPNO
) == 0xe ?
1217 (QEMU_UC_MCONTEXT_GREGS(uc
, REG_ERR
) >> 1) & 1 : 0,
1218 &uc
->uc_sigmask
, puc
);
1221 #elif defined(__powerpc__)
1223 /***********************************************************************
1224 * signal context platform-specific definitions
1228 /* All Registers access - only for local access */
1229 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1230 /* Gpr Registers access */
1231 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1232 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1233 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1234 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1235 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1236 # define LR_sig(context) REG_sig(link, context) /* Link register */
1237 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1238 /* Float Registers access */
1239 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1240 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1241 /* Exception Registers access */
1242 # define DAR_sig(context) REG_sig(dar, context)
1243 # define DSISR_sig(context) REG_sig(dsisr, context)
1244 # define TRAP_sig(context) REG_sig(trap, context)
1248 # include <sys/ucontext.h>
1249 typedef struct ucontext SIGCONTEXT
;
1250 /* All Registers access - only for local access */
1251 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1252 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1253 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1254 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1255 /* Gpr Registers access */
1256 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1257 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1258 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1259 # define CTR_sig(context) REG_sig(ctr, context)
1260 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1261 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1262 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1263 /* Float Registers access */
1264 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1265 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1266 /* Exception Registers access */
1267 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1268 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1269 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1270 #endif /* __APPLE__ */
1272 int cpu_signal_handler(int host_signum
, void *pinfo
,
1275 siginfo_t
*info
= pinfo
;
1276 struct ucontext
*uc
= puc
;
1284 if (DSISR_sig(uc
) & 0x00800000)
1287 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1290 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1291 is_write
, &uc
->uc_sigmask
, puc
);
1294 #elif defined(__alpha__)
1296 int cpu_signal_handler(int host_signum
, void *pinfo
,
1299 siginfo_t
*info
= pinfo
;
1300 struct ucontext
*uc
= puc
;
1301 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1302 uint32_t insn
= *pc
;
1305 /* XXX: need kernel patch to get write flag faster */
1306 switch (insn
>> 26) {
1321 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1322 is_write
, &uc
->uc_sigmask
, puc
);
1324 #elif defined(__sparc__)
1326 int cpu_signal_handler(int host_signum
, void *pinfo
,
1329 siginfo_t
*info
= pinfo
;
1332 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1333 uint32_t *regs
= (uint32_t *)(info
+ 1);
1334 void *sigmask
= (regs
+ 20);
1335 /* XXX: is there a standard glibc define ? */
1336 unsigned long pc
= regs
[1];
1339 struct sigcontext
*sc
= puc
;
1340 unsigned long pc
= sc
->sigc_regs
.tpc
;
1341 void *sigmask
= (void *)sc
->sigc_mask
;
1342 #elif defined(__OpenBSD__)
1343 struct sigcontext
*uc
= puc
;
1344 unsigned long pc
= uc
->sc_pc
;
1345 void *sigmask
= (void *)(long)uc
->sc_mask
;
1349 /* XXX: need kernel patch to get write flag faster */
1351 insn
= *(uint32_t *)pc
;
1352 if ((insn
>> 30) == 3) {
1353 switch((insn
>> 19) & 0x3f) {
1365 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1366 is_write
, sigmask
, NULL
);
1369 #elif defined(__arm__)
1371 int cpu_signal_handler(int host_signum
, void *pinfo
,
1374 siginfo_t
*info
= pinfo
;
1375 struct ucontext
*uc
= puc
;
1379 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1380 pc
= uc
->uc_mcontext
.gregs
[R15
];
1382 pc
= uc
->uc_mcontext
.arm_pc
;
1384 /* XXX: compute is_write */
1386 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1388 &uc
->uc_sigmask
, puc
);
1391 #elif defined(__mc68000)
1393 int cpu_signal_handler(int host_signum
, void *pinfo
,
1396 siginfo_t
*info
= pinfo
;
1397 struct ucontext
*uc
= puc
;
1401 pc
= uc
->uc_mcontext
.gregs
[16];
1402 /* XXX: compute is_write */
1404 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1406 &uc
->uc_sigmask
, puc
);
1409 #elif defined(__ia64)
1412 /* This ought to be in <bits/siginfo.h>... */
1413 # define __ISR_VALID 1
1416 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1418 siginfo_t
*info
= pinfo
;
1419 struct ucontext
*uc
= puc
;
1423 ip
= uc
->uc_mcontext
.sc_ip
;
1424 switch (host_signum
) {
1430 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1431 /* ISR.W (write-access) is bit 33: */
1432 is_write
= (info
->si_isr
>> 33) & 1;
1438 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1440 &uc
->uc_sigmask
, puc
);
1443 #elif defined(__s390__)
1445 int cpu_signal_handler(int host_signum
, void *pinfo
,
1448 siginfo_t
*info
= pinfo
;
1449 struct ucontext
*uc
= puc
;
1453 pc
= uc
->uc_mcontext
.psw
.addr
;
1454 /* XXX: compute is_write */
1456 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1457 is_write
, &uc
->uc_sigmask
, puc
);
1460 #elif defined(__mips__)
1462 int cpu_signal_handler(int host_signum
, void *pinfo
,
1465 siginfo_t
*info
= pinfo
;
1466 struct ucontext
*uc
= puc
;
1467 greg_t pc
= uc
->uc_mcontext
.pc
;
1470 /* XXX: compute is_write */
1472 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1473 is_write
, &uc
->uc_sigmask
, puc
);
1476 #elif defined(__hppa__)
1478 int cpu_signal_handler(int host_signum
, void *pinfo
,
1481 struct siginfo
*info
= pinfo
;
1482 struct ucontext
*uc
= puc
;
1486 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1487 /* FIXME: compute is_write */
1489 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1491 &uc
->uc_sigmask
, puc
);
1496 #error host CPU specific signal handler needed
1500 #endif /* !defined(CONFIG_SOFTMMU) */