2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #define CPU_NO_GLOBAL_REGS
24 #if !defined(TARGET_IA64)
29 #if !defined(CONFIG_SOFTMMU)
41 #include <sys/ucontext.h>
47 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 // Work around ugly bugs in glibc that mangle global register contents
50 #define env cpu_single_env
53 int tb_invalidated_flag
;
56 //#define DEBUG_SIGNAL
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
63 longjmp(env
->jmp_env
, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
71 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext
*uc
= puc
;
74 #elif defined(__OpenBSD__)
75 struct sigcontext
*uc
= puc
;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
85 /* XXX: use siglongjmp ? */
87 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
93 env
->exception_index
= -1;
94 longjmp(env
->jmp_env
, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
101 unsigned long next_tb
;
102 TranslationBlock
*tb
;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles
> CF_COUNT_MASK
)
107 max_cycles
= CF_COUNT_MASK
;
109 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
111 env
->current_tb
= tb
;
112 /* execute the generated code */
113 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
115 if ((next_tb
& 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env
, tb
);
120 tb_phys_invalidate(tb
, -1);
124 static TranslationBlock
*tb_find_slow(target_ulong pc
,
125 target_ulong cs_base
,
128 TranslationBlock
*tb
, **ptb1
;
130 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
132 tb_invalidated_flag
= 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc
= get_phys_addr_code(env
, pc
);
138 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
140 h
= tb_phys_hash_func(phys_pc
);
141 ptb1
= &tb_phys_hash
[h
];
147 tb
->page_addr
[0] == phys_page1
&&
148 tb
->cs_base
== cs_base
&&
149 tb
->flags
== flags
) {
150 /* check next page if needed */
151 if (tb
->page_addr
[1] != -1) {
152 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
154 phys_page2
= get_phys_addr_code(env
, virt_page2
);
155 if (tb
->page_addr
[1] == phys_page2
)
161 ptb1
= &tb
->phys_hash_next
;
164 /* if no translated code available, then translate it now */
165 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
168 /* we add the TB in the virtual pc hash table */
169 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
173 static inline TranslationBlock
*tb_find_fast(void)
175 TranslationBlock
*tb
;
176 target_ulong cs_base
, pc
;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
183 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
184 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
185 tb
->flags
!= flags
)) {
186 tb
= tb_find_slow(pc
, cs_base
, flags
);
191 static CPUDebugExcpHandler
*debug_excp_handler
;
193 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
195 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
197 debug_excp_handler
= handler
;
201 static void cpu_handle_debug_exception(CPUState
*env
)
205 if (!env
->watchpoint_hit
)
206 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
207 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
209 if (debug_excp_handler
)
210 debug_excp_handler(env
);
213 /* main execution loop */
215 int cpu_exec(CPUState
*env1
)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret
, interrupt_request
;
220 TranslationBlock
*tb
;
222 unsigned long next_tb
;
224 if (cpu_halted(env1
) == EXCP_HALTED
)
227 cpu_single_env
= env1
;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
238 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
239 CC_OP
= CC_OP_EFLAGS
;
240 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env
->cc_op
= CC_OP_FLAGS
;
244 env
->cc_dest
= env
->sr
& 0xf;
245 env
->cc_x
= (env
->sr
>> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MIPS)
250 #elif defined(TARGET_SH4)
251 #elif defined(TARGET_CRIS)
252 #elif defined(TARGET_IA64)
255 #error unsupported target CPU
257 env
->exception_index
= -1;
259 /* prepare setjmp context for exception handling */
261 if (setjmp(env
->jmp_env
) == 0) {
262 env
->current_tb
= NULL
;
263 /* if an exception is pending, we execute it here */
264 if (env
->exception_index
>= 0) {
265 if (env
->exception_index
>= EXCP_INTERRUPT
) {
266 /* exit request from the cpu execution loop */
267 ret
= env
->exception_index
;
268 if (ret
== EXCP_DEBUG
)
269 cpu_handle_debug_exception(env
);
272 #if defined(CONFIG_USER_ONLY)
273 /* if user mode only, we simulate a fake exception
274 which will be handled outside the cpu execution
276 #if defined(TARGET_I386)
277 do_interrupt_user(env
->exception_index
,
278 env
->exception_is_int
,
280 env
->exception_next_eip
);
281 /* successfully delivered */
282 env
->old_exception
= -1;
284 ret
= env
->exception_index
;
287 #if defined(TARGET_I386)
288 /* simulate a real cpu exception. On i386, it can
289 trigger new exceptions, but we do not handle
290 double or triple faults yet. */
291 do_interrupt(env
->exception_index
,
292 env
->exception_is_int
,
294 env
->exception_next_eip
, 0);
295 /* successfully delivered */
296 env
->old_exception
= -1;
297 #elif defined(TARGET_PPC)
299 #elif defined(TARGET_MIPS)
301 #elif defined(TARGET_SPARC)
303 #elif defined(TARGET_ARM)
305 #elif defined(TARGET_SH4)
307 #elif defined(TARGET_ALPHA)
309 #elif defined(TARGET_CRIS)
311 #elif defined(TARGET_M68K)
313 #elif defined(TARGET_IA64)
318 env
->exception_index
= -1;
321 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
323 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
324 ret
= kqemu_cpu_exec(env
);
325 /* put eflags in CPU temporary format */
326 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
327 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
328 CC_OP
= CC_OP_EFLAGS
;
329 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
332 longjmp(env
->jmp_env
, 1);
333 } else if (ret
== 2) {
334 /* softmmu execution needed */
336 if (env
->interrupt_request
!= 0) {
337 /* hardware interrupt will be executed just after */
339 /* otherwise, we restart */
340 longjmp(env
->jmp_env
, 1);
346 /* kvm vcpu threads */
349 longjmp(env
->jmp_env
, 1);
354 longjmp(env
->jmp_env
, 1);
357 next_tb
= 0; /* force lookup of first TB */
359 interrupt_request
= env
->interrupt_request
;
360 if (unlikely(interrupt_request
)) {
361 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
362 /* Mask out external interrupts for this step. */
363 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
368 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
369 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
370 env
->exception_index
= EXCP_DEBUG
;
373 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
374 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
375 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
376 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
378 env
->exception_index
= EXCP_HLT
;
382 #if defined(TARGET_I386)
383 if (env
->hflags2
& HF2_GIF_MASK
) {
384 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
385 !(env
->hflags
& HF_SMM_MASK
)) {
386 svm_check_intercept(SVM_EXIT_SMI
);
387 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
390 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
391 !(env
->hflags2
& HF2_NMI_MASK
)) {
392 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
393 env
->hflags2
|= HF2_NMI_MASK
;
394 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
396 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
397 (((env
->hflags2
& HF2_VINTR_MASK
) &&
398 (env
->hflags2
& HF2_HIF_MASK
)) ||
399 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
400 (env
->eflags
& IF_MASK
&&
401 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
403 svm_check_intercept(SVM_EXIT_INTR
);
404 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
405 intno
= cpu_get_pic_interrupt(env
);
406 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
407 do_interrupt(intno
, 0, 0, 0, 1);
408 /* ensure that no TB jump will be modified as
409 the program flow was changed */
411 #if !defined(CONFIG_USER_ONLY)
412 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
413 (env
->eflags
& IF_MASK
) &&
414 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
416 /* FIXME: this should respect TPR */
417 svm_check_intercept(SVM_EXIT_VINTR
);
418 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
419 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
420 do_interrupt(intno
, 0, 0, 0, 1);
421 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
426 #elif defined(TARGET_PPC)
428 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
432 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
433 ppc_hw_interrupt(env
);
434 if (env
->pending_interrupts
== 0)
435 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
438 #elif defined(TARGET_MIPS)
439 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
440 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
441 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
442 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
443 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
444 !(env
->hflags
& MIPS_HFLAG_DM
)) {
446 env
->exception_index
= EXCP_EXT_INTERRUPT
;
451 #elif defined(TARGET_SPARC)
452 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
454 int pil
= env
->interrupt_index
& 15;
455 int type
= env
->interrupt_index
& 0xf0;
457 if (((type
== TT_EXTINT
) &&
458 (pil
== 15 || pil
> env
->psrpil
)) ||
460 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
461 env
->exception_index
= env
->interrupt_index
;
463 env
->interrupt_index
= 0;
464 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
469 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
470 //do_interrupt(0, 0, 0, 0, 0);
471 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
473 #elif defined(TARGET_ARM)
474 if (interrupt_request
& CPU_INTERRUPT_FIQ
475 && !(env
->uncached_cpsr
& CPSR_F
)) {
476 env
->exception_index
= EXCP_FIQ
;
480 /* ARMv7-M interrupt return works by loading a magic value
481 into the PC. On real hardware the load causes the
482 return to occur. The qemu implementation performs the
483 jump normally, then does the exception return when the
484 CPU tries to execute code at the magic address.
485 This will cause the magic PC value to be pushed to
486 the stack if an interrupt occured at the wrong time.
487 We avoid this by disabling interrupts when
488 pc contains a magic address. */
489 if (interrupt_request
& CPU_INTERRUPT_HARD
490 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
491 || !(env
->uncached_cpsr
& CPSR_I
))) {
492 env
->exception_index
= EXCP_IRQ
;
496 #elif defined(TARGET_SH4)
497 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
501 #elif defined(TARGET_ALPHA)
502 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
506 #elif defined(TARGET_CRIS)
507 if (interrupt_request
& CPU_INTERRUPT_HARD
508 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
509 env
->exception_index
= EXCP_IRQ
;
513 if (interrupt_request
& CPU_INTERRUPT_NMI
514 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
515 env
->exception_index
= EXCP_NMI
;
519 #elif defined(TARGET_M68K)
520 if (interrupt_request
& CPU_INTERRUPT_HARD
521 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
522 < env
->pending_level
) {
523 /* Real hardware gets the interrupt vector via an
524 IACK cycle at this point. Current emulated
525 hardware doesn't rely on this, so we
526 provide/save the vector when the interrupt is
528 env
->exception_index
= env
->pending_vector
;
533 /* Don't use the cached interupt_request value,
534 do_interrupt may have updated the EXITTB flag. */
535 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
536 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
541 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
542 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
543 env
->exception_index
= EXCP_INTERRUPT
;
548 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
549 /* restore flags in standard format */
551 #if defined(TARGET_I386)
552 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
553 log_cpu_state(env
, X86_DUMP_CCOP
);
554 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
555 #elif defined(TARGET_ARM)
556 log_cpu_state(env
, 0);
557 #elif defined(TARGET_SPARC)
558 log_cpu_state(env
, 0);
559 #elif defined(TARGET_PPC)
560 log_cpu_state(env
, 0);
561 #elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env
, env
->cc_op
);
563 env
->cc_op
= CC_OP_FLAGS
;
564 env
->sr
= (env
->sr
& 0xffe0)
565 | env
->cc_dest
| (env
->cc_x
<< 4);
566 log_cpu_state(env
, 0);
567 #elif defined(TARGET_MIPS)
568 log_cpu_state(env
, 0);
569 #elif defined(TARGET_SH4)
570 log_cpu_state(env
, 0);
571 #elif defined(TARGET_ALPHA)
572 log_cpu_state(env
, 0);
573 #elif defined(TARGET_CRIS)
574 log_cpu_state(env
, 0);
576 #error unsupported target CPU
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag
) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
589 tb_invalidated_flag
= 0;
592 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
593 (long)tb
->tc_ptr
, tb
->pc
,
594 lookup_symbol(tb
->pc
));
596 /* see if we can patch the calling TB. When the TB
597 spans two pages, we cannot safely do a direct
602 (env
->kqemu_enabled
!= 2) &&
604 tb
->page_addr
[1] == -1) {
605 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
608 spin_unlock(&tb_lock
);
609 env
->current_tb
= tb
;
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
615 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
616 env
->current_tb
= NULL
;
618 while (env
->current_tb
) {
620 /* execute the generated code */
621 #if defined(__sparc__) && !defined(HOST_SOLARIS)
623 env
= cpu_single_env
;
624 #define env cpu_single_env
626 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
627 env
->current_tb
= NULL
;
628 if ((next_tb
& 3) == 2) {
629 /* Instruction counter expired. */
631 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
633 cpu_pc_from_tb(env
, tb
);
634 insns_left
= env
->icount_decr
.u32
;
635 if (env
->icount_extra
&& insns_left
>= 0) {
636 /* Refill decrementer and continue execution. */
637 env
->icount_extra
+= insns_left
;
638 if (env
->icount_extra
> 0xffff) {
641 insns_left
= env
->icount_extra
;
643 env
->icount_extra
-= insns_left
;
644 env
->icount_decr
.u16
.low
= insns_left
;
646 if (insns_left
> 0) {
647 /* Execute remaining instructions. */
648 cpu_exec_nocache(insns_left
, tb
);
650 env
->exception_index
= EXCP_INTERRUPT
;
656 /* reset soft MMU for next block (it can currently
657 only be set by a memory fault) */
658 #if defined(USE_KQEMU)
659 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
660 if (kqemu_is_ok(env
) &&
661 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
672 #if defined(TARGET_I386)
673 /* restore flags in standard format */
674 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
675 #elif defined(TARGET_ARM)
676 /* XXX: Save/restore host fpu exception state?. */
677 #elif defined(TARGET_SPARC)
678 #elif defined(TARGET_PPC)
679 #elif defined(TARGET_M68K)
680 cpu_m68k_flush_flags(env
, env
->cc_op
);
681 env
->cc_op
= CC_OP_FLAGS
;
682 env
->sr
= (env
->sr
& 0xffe0)
683 | env
->cc_dest
| (env
->cc_x
<< 4);
684 #elif defined(TARGET_MIPS)
685 #elif defined(TARGET_SH4)
686 #elif defined(TARGET_IA64)
687 #elif defined(TARGET_ALPHA)
688 #elif defined(TARGET_CRIS)
691 #error unsupported target CPU
694 /* restore global registers */
695 #include "hostregs_helper.h"
697 /* fail safe : never use cpu_single_env outside cpu_exec() */
698 cpu_single_env
= NULL
;
702 /* must only be called from the generated code as an exception can be
704 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
706 /* XXX: cannot enable it yet because it yields to MMU exception
707 where NIP != read address on PowerPC */
709 target_ulong phys_addr
;
710 phys_addr
= get_phys_addr_code(env
, start
);
711 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
715 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
717 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
719 CPUX86State
*saved_env
;
723 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
725 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
726 (selector
<< 4), 0xffff, 0);
728 helper_load_seg(seg_reg
, selector
);
733 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
735 CPUX86State
*saved_env
;
740 helper_fsave(ptr
, data32
);
745 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
747 CPUX86State
*saved_env
;
752 helper_frstor(ptr
, data32
);
757 #endif /* TARGET_I386 */
759 #if !defined(CONFIG_SOFTMMU)
761 #if defined(TARGET_I386)
763 /* 'pc' is the host PC at which the exception was raised. 'address' is
764 the effective address of the memory exception. 'is_write' is 1 if a
765 write caused the exception and otherwise 0'. 'old_set' is the
766 signal set which should be restored */
767 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
768 int is_write
, sigset_t
*old_set
,
771 TranslationBlock
*tb
;
775 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
776 #if defined(DEBUG_SIGNAL)
777 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
778 pc
, address
, is_write
, *(unsigned long *)old_set
);
780 /* XXX: locking issue */
781 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
785 /* see if it is an MMU fault */
786 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
788 return 0; /* not an MMU fault */
790 return 1; /* the MMU fault was handled without causing real CPU fault */
791 /* now we have a real cpu fault */
794 /* the PC is inside the translated code. It means that we have
795 a virtual CPU fault */
796 cpu_restore_state(tb
, env
, pc
, puc
);
800 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
801 env
->eip
, env
->cr
[2], env
->error_code
);
803 /* we restore the process signal mask as the sigreturn should
804 do it (XXX: use sigsetjmp) */
805 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
806 raise_exception_err(env
->exception_index
, env
->error_code
);
808 /* activate soft MMU for this block */
809 env
->hflags
|= HF_SOFTMMU_MASK
;
810 cpu_resume_from_signal(env
, puc
);
812 /* never comes here */
816 #elif defined(TARGET_ARM)
817 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
818 int is_write
, sigset_t
*old_set
,
821 TranslationBlock
*tb
;
825 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
826 #if defined(DEBUG_SIGNAL)
827 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
828 pc
, address
, is_write
, *(unsigned long *)old_set
);
830 /* XXX: locking issue */
831 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
834 /* see if it is an MMU fault */
835 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
837 return 0; /* not an MMU fault */
839 return 1; /* the MMU fault was handled without causing real CPU fault */
840 /* now we have a real cpu fault */
843 /* the PC is inside the translated code. It means that we have
844 a virtual CPU fault */
845 cpu_restore_state(tb
, env
, pc
, puc
);
847 /* we restore the process signal mask as the sigreturn should
848 do it (XXX: use sigsetjmp) */
849 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
851 /* never comes here */
854 #elif defined(TARGET_SPARC)
855 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
856 int is_write
, sigset_t
*old_set
,
859 TranslationBlock
*tb
;
863 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
864 #if defined(DEBUG_SIGNAL)
865 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
866 pc
, address
, is_write
, *(unsigned long *)old_set
);
868 /* XXX: locking issue */
869 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
872 /* see if it is an MMU fault */
873 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
875 return 0; /* not an MMU fault */
877 return 1; /* the MMU fault was handled without causing real CPU fault */
878 /* now we have a real cpu fault */
881 /* the PC is inside the translated code. It means that we have
882 a virtual CPU fault */
883 cpu_restore_state(tb
, env
, pc
, puc
);
885 /* we restore the process signal mask as the sigreturn should
886 do it (XXX: use sigsetjmp) */
887 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
889 /* never comes here */
892 #elif defined (TARGET_PPC)
893 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
894 int is_write
, sigset_t
*old_set
,
897 TranslationBlock
*tb
;
901 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
902 #if defined(DEBUG_SIGNAL)
903 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
904 pc
, address
, is_write
, *(unsigned long *)old_set
);
906 /* XXX: locking issue */
907 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
911 /* see if it is an MMU fault */
912 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
914 return 0; /* not an MMU fault */
916 return 1; /* the MMU fault was handled without causing real CPU fault */
918 /* now we have a real cpu fault */
921 /* the PC is inside the translated code. It means that we have
922 a virtual CPU fault */
923 cpu_restore_state(tb
, env
, pc
, puc
);
927 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
928 env
->nip
, env
->error_code
, tb
);
930 /* we restore the process signal mask as the sigreturn should
931 do it (XXX: use sigsetjmp) */
932 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
935 /* activate soft MMU for this block */
936 cpu_resume_from_signal(env
, puc
);
938 /* never comes here */
942 #elif defined(TARGET_M68K)
943 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
944 int is_write
, sigset_t
*old_set
,
947 TranslationBlock
*tb
;
951 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
952 #if defined(DEBUG_SIGNAL)
953 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
954 pc
, address
, is_write
, *(unsigned long *)old_set
);
956 /* XXX: locking issue */
957 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
960 /* see if it is an MMU fault */
961 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
963 return 0; /* not an MMU fault */
965 return 1; /* the MMU fault was handled without causing real CPU fault */
966 /* now we have a real cpu fault */
969 /* the PC is inside the translated code. It means that we have
970 a virtual CPU fault */
971 cpu_restore_state(tb
, env
, pc
, puc
);
973 /* we restore the process signal mask as the sigreturn should
974 do it (XXX: use sigsetjmp) */
975 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
977 /* never comes here */
981 #elif defined (TARGET_MIPS)
982 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
983 int is_write
, sigset_t
*old_set
,
986 TranslationBlock
*tb
;
990 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
991 #if defined(DEBUG_SIGNAL)
992 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
993 pc
, address
, is_write
, *(unsigned long *)old_set
);
995 /* XXX: locking issue */
996 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1000 /* see if it is an MMU fault */
1001 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1003 return 0; /* not an MMU fault */
1005 return 1; /* the MMU fault was handled without causing real CPU fault */
1007 /* now we have a real cpu fault */
1008 tb
= tb_find_pc(pc
);
1010 /* the PC is inside the translated code. It means that we have
1011 a virtual CPU fault */
1012 cpu_restore_state(tb
, env
, pc
, puc
);
1016 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1017 env
->PC
, env
->error_code
, tb
);
1019 /* we restore the process signal mask as the sigreturn should
1020 do it (XXX: use sigsetjmp) */
1021 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1024 /* activate soft MMU for this block */
1025 cpu_resume_from_signal(env
, puc
);
1027 /* never comes here */
1031 #elif defined (TARGET_SH4)
1032 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1033 int is_write
, sigset_t
*old_set
,
1036 TranslationBlock
*tb
;
1040 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1041 #if defined(DEBUG_SIGNAL)
1042 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1043 pc
, address
, is_write
, *(unsigned long *)old_set
);
1045 /* XXX: locking issue */
1046 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1050 /* see if it is an MMU fault */
1051 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1053 return 0; /* not an MMU fault */
1055 return 1; /* the MMU fault was handled without causing real CPU fault */
1057 /* now we have a real cpu fault */
1058 tb
= tb_find_pc(pc
);
1060 /* the PC is inside the translated code. It means that we have
1061 a virtual CPU fault */
1062 cpu_restore_state(tb
, env
, pc
, puc
);
1065 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1066 env
->nip
, env
->error_code
, tb
);
1068 /* we restore the process signal mask as the sigreturn should
1069 do it (XXX: use sigsetjmp) */
1070 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1072 /* never comes here */
1076 #elif defined (TARGET_ALPHA)
1077 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1078 int is_write
, sigset_t
*old_set
,
1081 TranslationBlock
*tb
;
1085 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1086 #if defined(DEBUG_SIGNAL)
1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc
, address
, is_write
, *(unsigned long *)old_set
);
1090 /* XXX: locking issue */
1091 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1095 /* see if it is an MMU fault */
1096 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1098 return 0; /* not an MMU fault */
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb
= tb_find_pc(pc
);
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb
, env
, pc
, puc
);
1110 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111 env
->nip
, env
->error_code
, tb
);
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1117 /* never comes here */
1120 #elif defined (TARGET_CRIS)
1121 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1122 int is_write
, sigset_t
*old_set
,
1125 TranslationBlock
*tb
;
1129 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1130 #if defined(DEBUG_SIGNAL)
1131 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1132 pc
, address
, is_write
, *(unsigned long *)old_set
);
1134 /* XXX: locking issue */
1135 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1139 /* see if it is an MMU fault */
1140 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1142 return 0; /* not an MMU fault */
1144 return 1; /* the MMU fault was handled without causing real CPU fault */
1146 /* now we have a real cpu fault */
1147 tb
= tb_find_pc(pc
);
1149 /* the PC is inside the translated code. It means that we have
1150 a virtual CPU fault */
1151 cpu_restore_state(tb
, env
, pc
, puc
);
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1157 /* never comes here */
1162 #error unsupported target CPU
1165 #if defined(__i386__)
1167 #if defined(__APPLE__)
1168 # include <sys/ucontext.h>
1170 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1171 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1172 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1174 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1175 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1176 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1179 int cpu_signal_handler(int host_signum
, void *pinfo
,
1182 siginfo_t
*info
= pinfo
;
1183 struct ucontext
*uc
= puc
;
1191 #define REG_TRAPNO TRAPNO
1194 trapno
= TRAP_sig(uc
);
1195 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1197 (ERROR_sig(uc
) >> 1) & 1 : 0,
1198 &uc
->uc_sigmask
, puc
);
1201 #elif defined(__x86_64__)
1204 #define REG_ERR _REG_ERR
1205 #define REG_TRAPNO _REG_TRAPNO
1207 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1208 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1210 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1211 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1214 int cpu_signal_handler(int host_signum
, void *pinfo
,
1217 siginfo_t
*info
= pinfo
;
1220 ucontext_t
*uc
= puc
;
1222 struct ucontext
*uc
= puc
;
1225 pc
= QEMU_UC_MACHINE_PC(uc
);
1226 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1227 QEMU_UC_MCONTEXT_GREGS(uc
, REG_TRAPNO
) == 0xe ?
1228 (QEMU_UC_MCONTEXT_GREGS(uc
, REG_ERR
) >> 1) & 1 : 0,
1229 &uc
->uc_sigmask
, puc
);
1232 #elif defined(_ARCH_PPC)
1234 /***********************************************************************
1235 * signal context platform-specific definitions
1239 /* All Registers access - only for local access */
1240 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1241 /* Gpr Registers access */
1242 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1243 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1244 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1245 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1246 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1247 # define LR_sig(context) REG_sig(link, context) /* Link register */
1248 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1249 /* Float Registers access */
1250 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1251 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1252 /* Exception Registers access */
1253 # define DAR_sig(context) REG_sig(dar, context)
1254 # define DSISR_sig(context) REG_sig(dsisr, context)
1255 # define TRAP_sig(context) REG_sig(trap, context)
1259 # include <sys/ucontext.h>
1260 typedef struct ucontext SIGCONTEXT
;
1261 /* All Registers access - only for local access */
1262 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1263 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1264 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1265 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1266 /* Gpr Registers access */
1267 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1268 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1269 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1270 # define CTR_sig(context) REG_sig(ctr, context)
1271 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1272 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1273 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1274 /* Float Registers access */
1275 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1276 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1277 /* Exception Registers access */
1278 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1279 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1280 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1281 #endif /* __APPLE__ */
1283 int cpu_signal_handler(int host_signum
, void *pinfo
,
1286 siginfo_t
*info
= pinfo
;
1287 struct ucontext
*uc
= puc
;
1295 if (DSISR_sig(uc
) & 0x00800000)
1298 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1301 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1302 is_write
, &uc
->uc_sigmask
, puc
);
1305 #elif defined(__alpha__)
1307 int cpu_signal_handler(int host_signum
, void *pinfo
,
1310 siginfo_t
*info
= pinfo
;
1311 struct ucontext
*uc
= puc
;
1312 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1313 uint32_t insn
= *pc
;
1316 /* XXX: need kernel patch to get write flag faster */
1317 switch (insn
>> 26) {
1332 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1333 is_write
, &uc
->uc_sigmask
, puc
);
1335 #elif defined(__sparc__)
1337 int cpu_signal_handler(int host_signum
, void *pinfo
,
1340 siginfo_t
*info
= pinfo
;
1343 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1344 uint32_t *regs
= (uint32_t *)(info
+ 1);
1345 void *sigmask
= (regs
+ 20);
1346 /* XXX: is there a standard glibc define ? */
1347 unsigned long pc
= regs
[1];
1350 struct sigcontext
*sc
= puc
;
1351 unsigned long pc
= sc
->sigc_regs
.tpc
;
1352 void *sigmask
= (void *)sc
->sigc_mask
;
1353 #elif defined(__OpenBSD__)
1354 struct sigcontext
*uc
= puc
;
1355 unsigned long pc
= uc
->sc_pc
;
1356 void *sigmask
= (void *)(long)uc
->sc_mask
;
1360 /* XXX: need kernel patch to get write flag faster */
1362 insn
= *(uint32_t *)pc
;
1363 if ((insn
>> 30) == 3) {
1364 switch((insn
>> 19) & 0x3f) {
1376 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1377 is_write
, sigmask
, NULL
);
1380 #elif defined(__arm__)
1382 int cpu_signal_handler(int host_signum
, void *pinfo
,
1385 siginfo_t
*info
= pinfo
;
1386 struct ucontext
*uc
= puc
;
1390 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1391 pc
= uc
->uc_mcontext
.gregs
[R15
];
1393 pc
= uc
->uc_mcontext
.arm_pc
;
1395 /* XXX: compute is_write */
1397 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1399 &uc
->uc_sigmask
, puc
);
1402 #elif defined(__mc68000)
1404 int cpu_signal_handler(int host_signum
, void *pinfo
,
1407 siginfo_t
*info
= pinfo
;
1408 struct ucontext
*uc
= puc
;
1412 pc
= uc
->uc_mcontext
.gregs
[16];
1413 /* XXX: compute is_write */
1415 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1417 &uc
->uc_sigmask
, puc
);
1420 #elif defined(__ia64)
1423 /* This ought to be in <bits/siginfo.h>... */
1424 # define __ISR_VALID 1
1427 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1429 siginfo_t
*info
= pinfo
;
1430 struct ucontext
*uc
= puc
;
1434 ip
= uc
->uc_mcontext
.sc_ip
;
1435 switch (host_signum
) {
1441 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1442 /* ISR.W (write-access) is bit 33: */
1443 is_write
= (info
->si_isr
>> 33) & 1;
1449 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1451 &uc
->uc_sigmask
, puc
);
1454 #elif defined(__s390__)
1456 int cpu_signal_handler(int host_signum
, void *pinfo
,
1459 siginfo_t
*info
= pinfo
;
1460 struct ucontext
*uc
= puc
;
1464 pc
= uc
->uc_mcontext
.psw
.addr
;
1465 /* XXX: compute is_write */
1467 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1468 is_write
, &uc
->uc_sigmask
, puc
);
1471 #elif defined(__mips__)
1473 int cpu_signal_handler(int host_signum
, void *pinfo
,
1476 siginfo_t
*info
= pinfo
;
1477 struct ucontext
*uc
= puc
;
1478 greg_t pc
= uc
->uc_mcontext
.pc
;
1481 /* XXX: compute is_write */
1483 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1484 is_write
, &uc
->uc_sigmask
, puc
);
1487 #elif defined(__hppa__)
1489 int cpu_signal_handler(int host_signum
, void *pinfo
,
1492 struct siginfo
*info
= pinfo
;
1493 struct ucontext
*uc
= puc
;
1497 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1498 /* FIXME: compute is_write */
1500 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1502 &uc
->uc_sigmask
, puc
);
1507 #error host CPU specific signal handler needed
1511 #endif /* !defined(CONFIG_SOFTMMU) */