2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag
;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env
->jmp_env
, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
70 #elif defined(__OpenBSD__)
71 struct sigcontext
*uc
= puc
;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
89 env
->exception_index
= -1;
90 longjmp(env
->jmp_env
, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
97 unsigned long next_tb
;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles
> CF_COUNT_MASK
)
103 max_cycles
= CF_COUNT_MASK
;
105 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
107 env
->current_tb
= tb
;
108 /* execute the generated code */
109 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
111 if ((next_tb
& 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env
, tb
);
116 tb_phys_invalidate(tb
, -1);
120 static TranslationBlock
*tb_find_slow(target_ulong pc
,
121 target_ulong cs_base
,
124 TranslationBlock
*tb
, **ptb1
;
126 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
128 tb_invalidated_flag
= 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc
= get_phys_addr_code(env
, pc
);
134 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
136 h
= tb_phys_hash_func(phys_pc
);
137 ptb1
= &tb_phys_hash
[h
];
143 tb
->page_addr
[0] == phys_page1
&&
144 tb
->cs_base
== cs_base
&&
145 tb
->flags
== flags
) {
146 /* check next page if needed */
147 if (tb
->page_addr
[1] != -1) {
148 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
150 phys_page2
= get_phys_addr_code(env
, virt_page2
);
151 if (tb
->page_addr
[1] == phys_page2
)
157 ptb1
= &tb
->phys_hash_next
;
160 /* if no translated code available, then translate it now */
161 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
164 /* we add the TB in the virtual pc hash table */
165 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
169 static inline TranslationBlock
*tb_find_fast(void)
171 TranslationBlock
*tb
;
172 target_ulong cs_base
, pc
;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
178 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
179 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
180 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
181 tb
->flags
!= flags
)) {
182 tb
= tb_find_slow(pc
, cs_base
, flags
);
187 static CPUDebugExcpHandler
*debug_excp_handler
;
189 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
191 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
193 debug_excp_handler
= handler
;
197 static void cpu_handle_debug_exception(CPUState
*env
)
201 if (!env
->watchpoint_hit
)
202 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
203 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
205 if (debug_excp_handler
)
206 debug_excp_handler(env
);
209 /* main execution loop */
211 int cpu_exec(CPUState
*env1
)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret
, interrupt_request
;
216 TranslationBlock
*tb
;
218 unsigned long next_tb
;
220 if (cpu_halted(env1
) == EXCP_HALTED
)
223 cpu_single_env
= env1
;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
234 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
235 CC_OP
= CC_OP_EFLAGS
;
236 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env
->cc_op
= CC_OP_FLAGS
;
240 env
->cc_dest
= env
->sr
& 0xf;
241 env
->cc_x
= (env
->sr
>> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_MIPS)
246 #elif defined(TARGET_SH4)
247 #elif defined(TARGET_CRIS)
250 #error unsupported target CPU
252 env
->exception_index
= -1;
254 /* prepare setjmp context for exception handling */
256 if (setjmp(env
->jmp_env
) == 0) {
257 env
->current_tb
= NULL
;
258 /* if an exception is pending, we execute it here */
259 if (env
->exception_index
>= 0) {
260 if (env
->exception_index
>= EXCP_INTERRUPT
) {
261 /* exit request from the cpu execution loop */
262 ret
= env
->exception_index
;
263 if (ret
== EXCP_DEBUG
)
264 cpu_handle_debug_exception(env
);
267 #if defined(CONFIG_USER_ONLY)
268 /* if user mode only, we simulate a fake exception
269 which will be handled outside the cpu execution
271 #if defined(TARGET_I386)
272 do_interrupt_user(env
->exception_index
,
273 env
->exception_is_int
,
275 env
->exception_next_eip
);
276 /* successfully delivered */
277 env
->old_exception
= -1;
279 ret
= env
->exception_index
;
282 #if defined(TARGET_I386)
283 /* simulate a real cpu exception. On i386, it can
284 trigger new exceptions, but we do not handle
285 double or triple faults yet. */
286 do_interrupt(env
->exception_index
,
287 env
->exception_is_int
,
289 env
->exception_next_eip
, 0);
290 /* successfully delivered */
291 env
->old_exception
= -1;
292 #elif defined(TARGET_PPC)
294 #elif defined(TARGET_MIPS)
296 #elif defined(TARGET_SPARC)
298 #elif defined(TARGET_ARM)
300 #elif defined(TARGET_SH4)
302 #elif defined(TARGET_ALPHA)
304 #elif defined(TARGET_CRIS)
306 #elif defined(TARGET_M68K)
311 env
->exception_index
= -1;
314 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
316 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
317 ret
= kqemu_cpu_exec(env
);
318 /* put eflags in CPU temporary format */
319 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
320 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
321 CC_OP
= CC_OP_EFLAGS
;
322 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
325 longjmp(env
->jmp_env
, 1);
326 } else if (ret
== 2) {
327 /* softmmu execution needed */
329 if (env
->interrupt_request
!= 0) {
330 /* hardware interrupt will be executed just after */
332 /* otherwise, we restart */
333 longjmp(env
->jmp_env
, 1);
341 longjmp(env
->jmp_env
, 1);
344 next_tb
= 0; /* force lookup of first TB */
346 interrupt_request
= env
->interrupt_request
;
347 if (unlikely(interrupt_request
)) {
348 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
349 /* Mask out external interrupts for this step. */
350 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
355 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
356 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
357 env
->exception_index
= EXCP_DEBUG
;
360 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
361 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
362 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
363 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
365 env
->exception_index
= EXCP_HLT
;
369 #if defined(TARGET_I386)
370 if (env
->hflags2
& HF2_GIF_MASK
) {
371 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
372 !(env
->hflags
& HF_SMM_MASK
)) {
373 svm_check_intercept(SVM_EXIT_SMI
);
374 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
377 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
378 !(env
->hflags2
& HF2_NMI_MASK
)) {
379 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
380 env
->hflags2
|= HF2_NMI_MASK
;
381 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
383 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
384 (((env
->hflags2
& HF2_VINTR_MASK
) &&
385 (env
->hflags2
& HF2_HIF_MASK
)) ||
386 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
387 (env
->eflags
& IF_MASK
&&
388 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
390 svm_check_intercept(SVM_EXIT_INTR
);
391 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
392 intno
= cpu_get_pic_interrupt(env
);
393 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
394 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
396 do_interrupt(intno
, 0, 0, 0, 1);
397 /* ensure that no TB jump will be modified as
398 the program flow was changed */
400 #if !defined(CONFIG_USER_ONLY)
401 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
402 (env
->eflags
& IF_MASK
) &&
403 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
405 /* FIXME: this should respect TPR */
406 svm_check_intercept(SVM_EXIT_VINTR
);
407 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
408 if (loglevel
& CPU_LOG_TB_IN_ASM
)
409 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
410 do_interrupt(intno
, 0, 0, 0, 1);
411 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
416 #elif defined(TARGET_PPC)
418 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
422 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
423 ppc_hw_interrupt(env
);
424 if (env
->pending_interrupts
== 0)
425 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
428 #elif defined(TARGET_MIPS)
429 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
430 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
431 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
432 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
433 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
434 !(env
->hflags
& MIPS_HFLAG_DM
)) {
436 env
->exception_index
= EXCP_EXT_INTERRUPT
;
441 #elif defined(TARGET_SPARC)
442 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
444 int pil
= env
->interrupt_index
& 15;
445 int type
= env
->interrupt_index
& 0xf0;
447 if (((type
== TT_EXTINT
) &&
448 (pil
== 15 || pil
> env
->psrpil
)) ||
450 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
451 env
->exception_index
= env
->interrupt_index
;
453 env
->interrupt_index
= 0;
454 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
459 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
460 //do_interrupt(0, 0, 0, 0, 0);
461 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
463 #elif defined(TARGET_ARM)
464 if (interrupt_request
& CPU_INTERRUPT_FIQ
465 && !(env
->uncached_cpsr
& CPSR_F
)) {
466 env
->exception_index
= EXCP_FIQ
;
470 /* ARMv7-M interrupt return works by loading a magic value
471 into the PC. On real hardware the load causes the
472 return to occur. The qemu implementation performs the
473 jump normally, then does the exception return when the
474 CPU tries to execute code at the magic address.
475 This will cause the magic PC value to be pushed to
476 the stack if an interrupt occured at the wrong time.
477 We avoid this by disabling interrupts when
478 pc contains a magic address. */
479 if (interrupt_request
& CPU_INTERRUPT_HARD
480 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
481 || !(env
->uncached_cpsr
& CPSR_I
))) {
482 env
->exception_index
= EXCP_IRQ
;
486 #elif defined(TARGET_SH4)
487 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
491 #elif defined(TARGET_ALPHA)
492 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
496 #elif defined(TARGET_CRIS)
497 if (interrupt_request
& CPU_INTERRUPT_HARD
498 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
499 env
->exception_index
= EXCP_IRQ
;
503 if (interrupt_request
& CPU_INTERRUPT_NMI
504 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
505 env
->exception_index
= EXCP_NMI
;
509 #elif defined(TARGET_M68K)
510 if (interrupt_request
& CPU_INTERRUPT_HARD
511 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
512 < env
->pending_level
) {
513 /* Real hardware gets the interrupt vector via an
514 IACK cycle at this point. Current emulated
515 hardware doesn't rely on this, so we
516 provide/save the vector when the interrupt is
518 env
->exception_index
= env
->pending_vector
;
523 /* Don't use the cached interupt_request value,
524 do_interrupt may have updated the EXITTB flag. */
525 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
526 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
527 /* ensure that no TB jump will be modified as
528 the program flow was changed */
531 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
532 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
533 env
->exception_index
= EXCP_INTERRUPT
;
538 if ((loglevel
& CPU_LOG_TB_CPU
)) {
539 /* restore flags in standard format */
541 #if defined(TARGET_I386)
542 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
543 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
544 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
545 #elif defined(TARGET_ARM)
546 cpu_dump_state(env
, logfile
, fprintf
, 0);
547 #elif defined(TARGET_SPARC)
548 cpu_dump_state(env
, logfile
, fprintf
, 0);
549 #elif defined(TARGET_PPC)
550 cpu_dump_state(env
, logfile
, fprintf
, 0);
551 #elif defined(TARGET_M68K)
552 cpu_m68k_flush_flags(env
, env
->cc_op
);
553 env
->cc_op
= CC_OP_FLAGS
;
554 env
->sr
= (env
->sr
& 0xffe0)
555 | env
->cc_dest
| (env
->cc_x
<< 4);
556 cpu_dump_state(env
, logfile
, fprintf
, 0);
557 #elif defined(TARGET_MIPS)
558 cpu_dump_state(env
, logfile
, fprintf
, 0);
559 #elif defined(TARGET_SH4)
560 cpu_dump_state(env
, logfile
, fprintf
, 0);
561 #elif defined(TARGET_ALPHA)
562 cpu_dump_state(env
, logfile
, fprintf
, 0);
563 #elif defined(TARGET_CRIS)
564 cpu_dump_state(env
, logfile
, fprintf
, 0);
566 #error unsupported target CPU
572 /* Note: we do it here to avoid a gcc bug on Mac OS X when
573 doing it in tb_find_slow */
574 if (tb_invalidated_flag
) {
575 /* as some TB could have been invalidated because
576 of memory exceptions while generating the code, we
577 must recompute the hash index here */
579 tb_invalidated_flag
= 0;
582 if ((loglevel
& CPU_LOG_EXEC
)) {
583 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
584 (long)tb
->tc_ptr
, tb
->pc
,
585 lookup_symbol(tb
->pc
));
588 /* see if we can patch the calling TB. When the TB
589 spans two pages, we cannot safely do a direct
594 (env
->kqemu_enabled
!= 2) &&
596 tb
->page_addr
[1] == -1) {
597 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
600 spin_unlock(&tb_lock
);
601 env
->current_tb
= tb
;
603 /* cpu_interrupt might be called while translating the
604 TB, but before it is linked into a potentially
605 infinite loop and becomes env->current_tb. Avoid
606 starting execution if there is a pending interrupt. */
607 if (unlikely (env
->interrupt_request
& CPU_INTERRUPT_EXIT
))
608 env
->current_tb
= NULL
;
610 while (env
->current_tb
) {
612 /* execute the generated code */
613 #if defined(__sparc__) && !defined(HOST_SOLARIS)
615 env
= cpu_single_env
;
616 #define env cpu_single_env
618 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
619 env
->current_tb
= NULL
;
620 if ((next_tb
& 3) == 2) {
621 /* Instruction counter expired. */
623 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
625 cpu_pc_from_tb(env
, tb
);
626 insns_left
= env
->icount_decr
.u32
;
627 if (env
->icount_extra
&& insns_left
>= 0) {
628 /* Refill decrementer and continue execution. */
629 env
->icount_extra
+= insns_left
;
630 if (env
->icount_extra
> 0xffff) {
633 insns_left
= env
->icount_extra
;
635 env
->icount_extra
-= insns_left
;
636 env
->icount_decr
.u16
.low
= insns_left
;
638 if (insns_left
> 0) {
639 /* Execute remaining instructions. */
640 cpu_exec_nocache(insns_left
, tb
);
642 env
->exception_index
= EXCP_INTERRUPT
;
648 /* reset soft MMU for next block (it can currently
649 only be set by a memory fault) */
650 #if defined(USE_KQEMU)
651 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
652 if (kqemu_is_ok(env
) &&
653 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
664 #if defined(TARGET_I386)
665 /* restore flags in standard format */
666 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
667 #elif defined(TARGET_ARM)
668 /* XXX: Save/restore host fpu exception state?. */
669 #elif defined(TARGET_SPARC)
670 #elif defined(TARGET_PPC)
671 #elif defined(TARGET_M68K)
672 cpu_m68k_flush_flags(env
, env
->cc_op
);
673 env
->cc_op
= CC_OP_FLAGS
;
674 env
->sr
= (env
->sr
& 0xffe0)
675 | env
->cc_dest
| (env
->cc_x
<< 4);
676 #elif defined(TARGET_MIPS)
677 #elif defined(TARGET_SH4)
678 #elif defined(TARGET_ALPHA)
679 #elif defined(TARGET_CRIS)
682 #error unsupported target CPU
685 /* restore global registers */
686 #include "hostregs_helper.h"
688 /* fail safe : never use cpu_single_env outside cpu_exec() */
689 cpu_single_env
= NULL
;
693 /* must only be called from the generated code as an exception can be
695 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
697 /* XXX: cannot enable it yet because it yields to MMU exception
698 where NIP != read address on PowerPC */
700 target_ulong phys_addr
;
701 phys_addr
= get_phys_addr_code(env
, start
);
702 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
706 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
708 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
710 CPUX86State
*saved_env
;
714 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
716 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
717 (selector
<< 4), 0xffff, 0);
719 helper_load_seg(seg_reg
, selector
);
724 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
726 CPUX86State
*saved_env
;
731 helper_fsave(ptr
, data32
);
736 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
738 CPUX86State
*saved_env
;
743 helper_frstor(ptr
, data32
);
748 #endif /* TARGET_I386 */
750 #if !defined(CONFIG_SOFTMMU)
752 #if defined(TARGET_I386)
754 /* 'pc' is the host PC at which the exception was raised. 'address' is
755 the effective address of the memory exception. 'is_write' is 1 if a
756 write caused the exception and otherwise 0'. 'old_set' is the
757 signal set which should be restored */
758 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
759 int is_write
, sigset_t
*old_set
,
762 TranslationBlock
*tb
;
766 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
767 #if defined(DEBUG_SIGNAL)
768 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
769 pc
, address
, is_write
, *(unsigned long *)old_set
);
771 /* XXX: locking issue */
772 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
776 /* see if it is an MMU fault */
777 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
779 return 0; /* not an MMU fault */
781 return 1; /* the MMU fault was handled without causing real CPU fault */
782 /* now we have a real cpu fault */
785 /* the PC is inside the translated code. It means that we have
786 a virtual CPU fault */
787 cpu_restore_state(tb
, env
, pc
, puc
);
791 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
792 env
->eip
, env
->cr
[2], env
->error_code
);
794 /* we restore the process signal mask as the sigreturn should
795 do it (XXX: use sigsetjmp) */
796 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
797 raise_exception_err(env
->exception_index
, env
->error_code
);
799 /* activate soft MMU for this block */
800 env
->hflags
|= HF_SOFTMMU_MASK
;
801 cpu_resume_from_signal(env
, puc
);
803 /* never comes here */
807 #elif defined(TARGET_ARM)
808 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
809 int is_write
, sigset_t
*old_set
,
812 TranslationBlock
*tb
;
816 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
817 #if defined(DEBUG_SIGNAL)
818 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
819 pc
, address
, is_write
, *(unsigned long *)old_set
);
821 /* XXX: locking issue */
822 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
825 /* see if it is an MMU fault */
826 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
828 return 0; /* not an MMU fault */
830 return 1; /* the MMU fault was handled without causing real CPU fault */
831 /* now we have a real cpu fault */
834 /* the PC is inside the translated code. It means that we have
835 a virtual CPU fault */
836 cpu_restore_state(tb
, env
, pc
, puc
);
838 /* we restore the process signal mask as the sigreturn should
839 do it (XXX: use sigsetjmp) */
840 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
842 /* never comes here */
845 #elif defined(TARGET_SPARC)
846 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
847 int is_write
, sigset_t
*old_set
,
850 TranslationBlock
*tb
;
854 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
855 #if defined(DEBUG_SIGNAL)
856 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
857 pc
, address
, is_write
, *(unsigned long *)old_set
);
859 /* XXX: locking issue */
860 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
863 /* see if it is an MMU fault */
864 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
866 return 0; /* not an MMU fault */
868 return 1; /* the MMU fault was handled without causing real CPU fault */
869 /* now we have a real cpu fault */
872 /* the PC is inside the translated code. It means that we have
873 a virtual CPU fault */
874 cpu_restore_state(tb
, env
, pc
, puc
);
876 /* we restore the process signal mask as the sigreturn should
877 do it (XXX: use sigsetjmp) */
878 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
880 /* never comes here */
883 #elif defined (TARGET_PPC)
884 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
885 int is_write
, sigset_t
*old_set
,
888 TranslationBlock
*tb
;
892 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
893 #if defined(DEBUG_SIGNAL)
894 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
895 pc
, address
, is_write
, *(unsigned long *)old_set
);
897 /* XXX: locking issue */
898 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
902 /* see if it is an MMU fault */
903 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
905 return 0; /* not an MMU fault */
907 return 1; /* the MMU fault was handled without causing real CPU fault */
909 /* now we have a real cpu fault */
912 /* the PC is inside the translated code. It means that we have
913 a virtual CPU fault */
914 cpu_restore_state(tb
, env
, pc
, puc
);
918 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
919 env
->nip
, env
->error_code
, tb
);
921 /* we restore the process signal mask as the sigreturn should
922 do it (XXX: use sigsetjmp) */
923 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
926 /* activate soft MMU for this block */
927 cpu_resume_from_signal(env
, puc
);
929 /* never comes here */
933 #elif defined(TARGET_M68K)
934 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
935 int is_write
, sigset_t
*old_set
,
938 TranslationBlock
*tb
;
942 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
943 #if defined(DEBUG_SIGNAL)
944 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
945 pc
, address
, is_write
, *(unsigned long *)old_set
);
947 /* XXX: locking issue */
948 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
951 /* see if it is an MMU fault */
952 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
954 return 0; /* not an MMU fault */
956 return 1; /* the MMU fault was handled without causing real CPU fault */
957 /* now we have a real cpu fault */
960 /* the PC is inside the translated code. It means that we have
961 a virtual CPU fault */
962 cpu_restore_state(tb
, env
, pc
, puc
);
964 /* we restore the process signal mask as the sigreturn should
965 do it (XXX: use sigsetjmp) */
966 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
968 /* never comes here */
972 #elif defined (TARGET_MIPS)
973 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
974 int is_write
, sigset_t
*old_set
,
977 TranslationBlock
*tb
;
981 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
982 #if defined(DEBUG_SIGNAL)
983 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
984 pc
, address
, is_write
, *(unsigned long *)old_set
);
986 /* XXX: locking issue */
987 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
991 /* see if it is an MMU fault */
992 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
994 return 0; /* not an MMU fault */
996 return 1; /* the MMU fault was handled without causing real CPU fault */
998 /* now we have a real cpu fault */
1001 /* the PC is inside the translated code. It means that we have
1002 a virtual CPU fault */
1003 cpu_restore_state(tb
, env
, pc
, puc
);
1007 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1008 env
->PC
, env
->error_code
, tb
);
1010 /* we restore the process signal mask as the sigreturn should
1011 do it (XXX: use sigsetjmp) */
1012 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1015 /* activate soft MMU for this block */
1016 cpu_resume_from_signal(env
, puc
);
1018 /* never comes here */
1022 #elif defined (TARGET_SH4)
1023 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1024 int is_write
, sigset_t
*old_set
,
1027 TranslationBlock
*tb
;
1031 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1032 #if defined(DEBUG_SIGNAL)
1033 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1034 pc
, address
, is_write
, *(unsigned long *)old_set
);
1036 /* XXX: locking issue */
1037 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1041 /* see if it is an MMU fault */
1042 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1044 return 0; /* not an MMU fault */
1046 return 1; /* the MMU fault was handled without causing real CPU fault */
1048 /* now we have a real cpu fault */
1049 tb
= tb_find_pc(pc
);
1051 /* the PC is inside the translated code. It means that we have
1052 a virtual CPU fault */
1053 cpu_restore_state(tb
, env
, pc
, puc
);
1056 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1057 env
->nip
, env
->error_code
, tb
);
1059 /* we restore the process signal mask as the sigreturn should
1060 do it (XXX: use sigsetjmp) */
1061 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1063 /* never comes here */
1067 #elif defined (TARGET_ALPHA)
1068 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1069 int is_write
, sigset_t
*old_set
,
1072 TranslationBlock
*tb
;
1076 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1077 #if defined(DEBUG_SIGNAL)
1078 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1079 pc
, address
, is_write
, *(unsigned long *)old_set
);
1081 /* XXX: locking issue */
1082 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1086 /* see if it is an MMU fault */
1087 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1089 return 0; /* not an MMU fault */
1091 return 1; /* the MMU fault was handled without causing real CPU fault */
1093 /* now we have a real cpu fault */
1094 tb
= tb_find_pc(pc
);
1096 /* the PC is inside the translated code. It means that we have
1097 a virtual CPU fault */
1098 cpu_restore_state(tb
, env
, pc
, puc
);
1101 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1102 env
->nip
, env
->error_code
, tb
);
1104 /* we restore the process signal mask as the sigreturn should
1105 do it (XXX: use sigsetjmp) */
1106 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1108 /* never comes here */
1111 #elif defined (TARGET_CRIS)
1112 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1113 int is_write
, sigset_t
*old_set
,
1116 TranslationBlock
*tb
;
1120 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1121 #if defined(DEBUG_SIGNAL)
1122 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1123 pc
, address
, is_write
, *(unsigned long *)old_set
);
1125 /* XXX: locking issue */
1126 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1130 /* see if it is an MMU fault */
1131 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1133 return 0; /* not an MMU fault */
1135 return 1; /* the MMU fault was handled without causing real CPU fault */
1137 /* now we have a real cpu fault */
1138 tb
= tb_find_pc(pc
);
1140 /* the PC is inside the translated code. It means that we have
1141 a virtual CPU fault */
1142 cpu_restore_state(tb
, env
, pc
, puc
);
1144 /* we restore the process signal mask as the sigreturn should
1145 do it (XXX: use sigsetjmp) */
1146 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1148 /* never comes here */
1153 #error unsupported target CPU
1156 #if defined(__i386__)
1158 #if defined(__APPLE__)
1159 # include <sys/ucontext.h>
1161 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1162 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1163 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1165 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1166 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1167 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1170 int cpu_signal_handler(int host_signum
, void *pinfo
,
1173 siginfo_t
*info
= pinfo
;
1174 struct ucontext
*uc
= puc
;
1182 #define REG_TRAPNO TRAPNO
1185 trapno
= TRAP_sig(uc
);
1186 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1188 (ERROR_sig(uc
) >> 1) & 1 : 0,
1189 &uc
->uc_sigmask
, puc
);
1192 #elif defined(__x86_64__)
1195 #define REG_ERR _REG_ERR
1196 #define REG_TRAPNO _REG_TRAPNO
1198 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1199 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1201 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1202 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1205 int cpu_signal_handler(int host_signum
, void *pinfo
,
1208 siginfo_t
*info
= pinfo
;
1211 ucontext_t
*uc
= puc
;
1213 struct ucontext
*uc
= puc
;
1216 pc
= QEMU_UC_MACHINE_PC(uc
);
1217 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1218 QEMU_UC_MCONTEXT_GREGS(uc
, REG_TRAPNO
) == 0xe ?
1219 (QEMU_UC_MCONTEXT_GREGS(uc
, REG_ERR
) >> 1) & 1 : 0,
1220 &uc
->uc_sigmask
, puc
);
1223 #elif defined(_ARCH_PPC)
1225 /***********************************************************************
1226 * signal context platform-specific definitions
1230 /* All Registers access - only for local access */
1231 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1232 /* Gpr Registers access */
1233 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1234 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1235 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1236 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1237 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1238 # define LR_sig(context) REG_sig(link, context) /* Link register */
1239 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1240 /* Float Registers access */
1241 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1242 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1243 /* Exception Registers access */
1244 # define DAR_sig(context) REG_sig(dar, context)
1245 # define DSISR_sig(context) REG_sig(dsisr, context)
1246 # define TRAP_sig(context) REG_sig(trap, context)
1250 # include <sys/ucontext.h>
1251 typedef struct ucontext SIGCONTEXT
;
1252 /* All Registers access - only for local access */
1253 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1254 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1255 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1256 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1257 /* Gpr Registers access */
1258 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1259 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1260 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1261 # define CTR_sig(context) REG_sig(ctr, context)
1262 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1263 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1264 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1265 /* Float Registers access */
1266 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1267 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1268 /* Exception Registers access */
1269 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1270 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1271 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1272 #endif /* __APPLE__ */
1274 int cpu_signal_handler(int host_signum
, void *pinfo
,
1277 siginfo_t
*info
= pinfo
;
1278 struct ucontext
*uc
= puc
;
1286 if (DSISR_sig(uc
) & 0x00800000)
1289 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1292 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1293 is_write
, &uc
->uc_sigmask
, puc
);
1296 #elif defined(__alpha__)
1298 int cpu_signal_handler(int host_signum
, void *pinfo
,
1301 siginfo_t
*info
= pinfo
;
1302 struct ucontext
*uc
= puc
;
1303 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1304 uint32_t insn
= *pc
;
1307 /* XXX: need kernel patch to get write flag faster */
1308 switch (insn
>> 26) {
1323 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1324 is_write
, &uc
->uc_sigmask
, puc
);
1326 #elif defined(__sparc__)
1328 int cpu_signal_handler(int host_signum
, void *pinfo
,
1331 siginfo_t
*info
= pinfo
;
1334 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1335 uint32_t *regs
= (uint32_t *)(info
+ 1);
1336 void *sigmask
= (regs
+ 20);
1337 /* XXX: is there a standard glibc define ? */
1338 unsigned long pc
= regs
[1];
1341 struct sigcontext
*sc
= puc
;
1342 unsigned long pc
= sc
->sigc_regs
.tpc
;
1343 void *sigmask
= (void *)sc
->sigc_mask
;
1344 #elif defined(__OpenBSD__)
1345 struct sigcontext
*uc
= puc
;
1346 unsigned long pc
= uc
->sc_pc
;
1347 void *sigmask
= (void *)(long)uc
->sc_mask
;
1351 /* XXX: need kernel patch to get write flag faster */
1353 insn
= *(uint32_t *)pc
;
1354 if ((insn
>> 30) == 3) {
1355 switch((insn
>> 19) & 0x3f) {
1367 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1368 is_write
, sigmask
, NULL
);
1371 #elif defined(__arm__)
1373 int cpu_signal_handler(int host_signum
, void *pinfo
,
1376 siginfo_t
*info
= pinfo
;
1377 struct ucontext
*uc
= puc
;
1381 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1382 pc
= uc
->uc_mcontext
.gregs
[R15
];
1384 pc
= uc
->uc_mcontext
.arm_pc
;
1386 /* XXX: compute is_write */
1388 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1390 &uc
->uc_sigmask
, puc
);
1393 #elif defined(__mc68000)
1395 int cpu_signal_handler(int host_signum
, void *pinfo
,
1398 siginfo_t
*info
= pinfo
;
1399 struct ucontext
*uc
= puc
;
1403 pc
= uc
->uc_mcontext
.gregs
[16];
1404 /* XXX: compute is_write */
1406 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1408 &uc
->uc_sigmask
, puc
);
1411 #elif defined(__ia64)
1414 /* This ought to be in <bits/siginfo.h>... */
1415 # define __ISR_VALID 1
1418 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1420 siginfo_t
*info
= pinfo
;
1421 struct ucontext
*uc
= puc
;
1425 ip
= uc
->uc_mcontext
.sc_ip
;
1426 switch (host_signum
) {
1432 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1433 /* ISR.W (write-access) is bit 33: */
1434 is_write
= (info
->si_isr
>> 33) & 1;
1440 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1442 &uc
->uc_sigmask
, puc
);
1445 #elif defined(__s390__)
1447 int cpu_signal_handler(int host_signum
, void *pinfo
,
1450 siginfo_t
*info
= pinfo
;
1451 struct ucontext
*uc
= puc
;
1455 pc
= uc
->uc_mcontext
.psw
.addr
;
1456 /* XXX: compute is_write */
1458 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1459 is_write
, &uc
->uc_sigmask
, puc
);
1462 #elif defined(__mips__)
1464 int cpu_signal_handler(int host_signum
, void *pinfo
,
1467 siginfo_t
*info
= pinfo
;
1468 struct ucontext
*uc
= puc
;
1469 greg_t pc
= uc
->uc_mcontext
.pc
;
1472 /* XXX: compute is_write */
1474 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1475 is_write
, &uc
->uc_sigmask
, puc
);
1478 #elif defined(__hppa__)
1480 int cpu_signal_handler(int host_signum
, void *pinfo
,
1483 struct siginfo
*info
= pinfo
;
1484 struct ucontext
*uc
= puc
;
1488 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1489 /* FIXME: compute is_write */
1491 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1493 &uc
->uc_sigmask
, puc
);
1498 #error host CPU specific signal handler needed
1502 #endif /* !defined(CONFIG_SOFTMMU) */