2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
43 #define env cpu_single_env
46 int tb_invalidated_flag
;
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
56 longjmp(env
->jmp_env
, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext
*uc
= puc
;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
82 longjmp(env
->jmp_env
, 1);
85 /* Execute the code without caching the generated code. An interpreter
86 could be used if available. */
87 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
89 unsigned long next_tb
;
92 /* Should never happen.
93 We only end up here when an existing TB is too long. */
94 if (max_cycles
> CF_COUNT_MASK
)
95 max_cycles
= CF_COUNT_MASK
;
97 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
100 /* execute the generated code */
101 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
103 if ((next_tb
& 3) == 2) {
104 /* Restore PC. This may happen if async event occurs before
105 the TB starts executing. */
106 CPU_PC_FROM_TB(env
, tb
);
108 tb_phys_invalidate(tb
, -1);
112 static TranslationBlock
*tb_find_slow(target_ulong pc
,
113 target_ulong cs_base
,
116 TranslationBlock
*tb
, **ptb1
;
118 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
120 tb_invalidated_flag
= 0;
122 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
124 /* find translated block using physical mappings */
125 phys_pc
= get_phys_addr_code(env
, pc
);
126 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
128 h
= tb_phys_hash_func(phys_pc
);
129 ptb1
= &tb_phys_hash
[h
];
135 tb
->page_addr
[0] == phys_page1
&&
136 tb
->cs_base
== cs_base
&&
137 tb
->flags
== flags
) {
138 /* check next page if needed */
139 if (tb
->page_addr
[1] != -1) {
140 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
142 phys_page2
= get_phys_addr_code(env
, virt_page2
);
143 if (tb
->page_addr
[1] == phys_page2
)
149 ptb1
= &tb
->phys_hash_next
;
152 /* if no translated code available, then translate it now */
153 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
156 /* we add the TB in the virtual pc hash table */
157 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
161 static inline TranslationBlock
*tb_find_fast(void)
163 TranslationBlock
*tb
;
164 target_ulong cs_base
, pc
;
167 /* we record a subset of the CPU state. It will
168 always be the same before a given translated block
170 #if defined(TARGET_I386)
172 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
173 cs_base
= env
->segs
[R_CS
].base
;
174 pc
= cs_base
+ env
->eip
;
175 #elif defined(TARGET_ARM)
176 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
177 | (env
->vfp
.vec_stride
<< 4);
178 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
180 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
182 flags
|= (env
->condexec_bits
<< 8);
185 #elif defined(TARGET_SPARC)
186 #ifdef TARGET_SPARC64
187 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
189 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
191 // FPU enable . Supervisor
192 flags
= (env
->psref
<< 4) | env
->psrs
;
196 #elif defined(TARGET_PPC)
200 #elif defined(TARGET_MIPS)
201 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
203 pc
= env
->active_tc
.PC
;
204 #elif defined(TARGET_M68K)
205 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
206 | (env
->sr
& SR_S
) /* Bit 13 */
207 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
210 #elif defined(TARGET_SH4)
214 #elif defined(TARGET_ALPHA)
218 #elif defined(TARGET_CRIS)
219 flags
= env
->pregs
[PR_CCS
] & (P_FLAG
| U_FLAG
| X_FLAG
);
224 #error unsupported CPU
226 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
227 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
228 tb
->flags
!= flags
)) {
229 tb
= tb_find_slow(pc
, cs_base
, flags
);
234 /* main execution loop */
236 int cpu_exec(CPUState
*env1
)
238 #define DECLARE_HOST_REGS 1
239 #include "hostregs_helper.h"
240 int ret
, interrupt_request
;
241 TranslationBlock
*tb
;
243 unsigned long next_tb
;
245 if (cpu_halted(env1
) == EXCP_HALTED
)
248 cpu_single_env
= env1
;
250 /* first we save global registers */
251 #define SAVE_HOST_REGS 1
252 #include "hostregs_helper.h"
256 #if defined(TARGET_I386)
257 /* put eflags in CPU temporary format */
258 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
259 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
260 CC_OP
= CC_OP_EFLAGS
;
261 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
262 #elif defined(TARGET_SPARC)
263 #elif defined(TARGET_M68K)
264 env
->cc_op
= CC_OP_FLAGS
;
265 env
->cc_dest
= env
->sr
& 0xf;
266 env
->cc_x
= (env
->sr
>> 4) & 1;
267 #elif defined(TARGET_ALPHA)
268 #elif defined(TARGET_ARM)
269 #elif defined(TARGET_PPC)
270 #elif defined(TARGET_MIPS)
271 #elif defined(TARGET_SH4)
272 #elif defined(TARGET_CRIS)
275 #error unsupported target CPU
277 env
->exception_index
= -1;
279 /* prepare setjmp context for exception handling */
281 if (setjmp(env
->jmp_env
) == 0) {
282 env
->current_tb
= NULL
;
283 /* if an exception is pending, we execute it here */
284 if (env
->exception_index
>= 0) {
285 if (env
->exception_index
>= EXCP_INTERRUPT
) {
286 /* exit request from the cpu execution loop */
287 ret
= env
->exception_index
;
289 } else if (env
->user_mode_only
) {
290 /* if user mode only, we simulate a fake exception
291 which will be handled outside the cpu execution
293 #if defined(TARGET_I386)
294 do_interrupt_user(env
->exception_index
,
295 env
->exception_is_int
,
297 env
->exception_next_eip
);
298 /* successfully delivered */
299 env
->old_exception
= -1;
301 ret
= env
->exception_index
;
304 #if defined(TARGET_I386)
305 /* simulate a real cpu exception. On i386, it can
306 trigger new exceptions, but we do not handle
307 double or triple faults yet. */
308 do_interrupt(env
->exception_index
,
309 env
->exception_is_int
,
311 env
->exception_next_eip
, 0);
312 /* successfully delivered */
313 env
->old_exception
= -1;
314 #elif defined(TARGET_PPC)
316 #elif defined(TARGET_MIPS)
318 #elif defined(TARGET_SPARC)
320 #elif defined(TARGET_ARM)
322 #elif defined(TARGET_SH4)
324 #elif defined(TARGET_ALPHA)
326 #elif defined(TARGET_CRIS)
328 #elif defined(TARGET_M68K)
332 env
->exception_index
= -1;
335 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
337 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
338 ret
= kqemu_cpu_exec(env
);
339 /* put eflags in CPU temporary format */
340 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
341 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
342 CC_OP
= CC_OP_EFLAGS
;
343 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
346 longjmp(env
->jmp_env
, 1);
347 } else if (ret
== 2) {
348 /* softmmu execution needed */
350 if (env
->interrupt_request
!= 0) {
351 /* hardware interrupt will be executed just after */
353 /* otherwise, we restart */
354 longjmp(env
->jmp_env
, 1);
360 next_tb
= 0; /* force lookup of first TB */
362 interrupt_request
= env
->interrupt_request
;
363 if (unlikely(interrupt_request
) &&
364 likely(!(env
->singlestep_enabled
& SSTEP_NOIRQ
))) {
365 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
366 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
367 env
->exception_index
= EXCP_DEBUG
;
370 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
371 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
372 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
373 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
375 env
->exception_index
= EXCP_HLT
;
379 #if defined(TARGET_I386)
380 if (env
->hflags2
& HF2_GIF_MASK
) {
381 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
382 !(env
->hflags
& HF_SMM_MASK
)) {
383 svm_check_intercept(SVM_EXIT_SMI
);
384 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
387 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
388 !(env
->hflags2
& HF2_NMI_MASK
)) {
389 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
390 env
->hflags2
|= HF2_NMI_MASK
;
391 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
393 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
394 (((env
->hflags2
& HF2_VINTR_MASK
) &&
395 (env
->hflags2
& HF2_HIF_MASK
)) ||
396 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
397 (env
->eflags
& IF_MASK
&&
398 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
400 svm_check_intercept(SVM_EXIT_INTR
);
401 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
402 intno
= cpu_get_pic_interrupt(env
);
403 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
404 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
406 do_interrupt(intno
, 0, 0, 0, 1);
407 /* ensure that no TB jump will be modified as
408 the program flow was changed */
410 #if !defined(CONFIG_USER_ONLY)
411 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
412 (env
->eflags
& IF_MASK
) &&
413 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
415 /* FIXME: this should respect TPR */
416 svm_check_intercept(SVM_EXIT_VINTR
);
417 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
418 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
419 if (loglevel
& CPU_LOG_TB_IN_ASM
)
420 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
421 do_interrupt(intno
, 0, 0, 0, 1);
426 #elif defined(TARGET_PPC)
428 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
432 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
433 ppc_hw_interrupt(env
);
434 if (env
->pending_interrupts
== 0)
435 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
438 #elif defined(TARGET_MIPS)
439 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
440 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
441 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
442 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
443 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
444 !(env
->hflags
& MIPS_HFLAG_DM
)) {
446 env
->exception_index
= EXCP_EXT_INTERRUPT
;
451 #elif defined(TARGET_SPARC)
452 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
454 int pil
= env
->interrupt_index
& 15;
455 int type
= env
->interrupt_index
& 0xf0;
457 if (((type
== TT_EXTINT
) &&
458 (pil
== 15 || pil
> env
->psrpil
)) ||
460 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
461 env
->exception_index
= env
->interrupt_index
;
463 env
->interrupt_index
= 0;
464 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
469 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
470 //do_interrupt(0, 0, 0, 0, 0);
471 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
473 #elif defined(TARGET_ARM)
474 if (interrupt_request
& CPU_INTERRUPT_FIQ
475 && !(env
->uncached_cpsr
& CPSR_F
)) {
476 env
->exception_index
= EXCP_FIQ
;
480 /* ARMv7-M interrupt return works by loading a magic value
481 into the PC. On real hardware the load causes the
482 return to occur. The qemu implementation performs the
483 jump normally, then does the exception return when the
484 CPU tries to execute code at the magic address.
485 This will cause the magic PC value to be pushed to
486 the stack if an interrupt occured at the wrong time.
487 We avoid this by disabling interrupts when
488 pc contains a magic address. */
489 if (interrupt_request
& CPU_INTERRUPT_HARD
490 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
491 || !(env
->uncached_cpsr
& CPSR_I
))) {
492 env
->exception_index
= EXCP_IRQ
;
496 #elif defined(TARGET_SH4)
497 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
501 #elif defined(TARGET_ALPHA)
502 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
506 #elif defined(TARGET_CRIS)
507 if (interrupt_request
& CPU_INTERRUPT_HARD
508 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
509 env
->exception_index
= EXCP_IRQ
;
513 if (interrupt_request
& CPU_INTERRUPT_NMI
514 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
515 env
->exception_index
= EXCP_NMI
;
519 #elif defined(TARGET_M68K)
520 if (interrupt_request
& CPU_INTERRUPT_HARD
521 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
522 < env
->pending_level
) {
523 /* Real hardware gets the interrupt vector via an
524 IACK cycle at this point. Current emulated
525 hardware doesn't rely on this, so we
526 provide/save the vector when the interrupt is
528 env
->exception_index
= env
->pending_vector
;
533 /* Don't use the cached interupt_request value,
534 do_interrupt may have updated the EXITTB flag. */
535 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
536 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
541 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
542 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
543 env
->exception_index
= EXCP_INTERRUPT
;
548 if ((loglevel
& CPU_LOG_TB_CPU
)) {
549 /* restore flags in standard format */
551 #if defined(TARGET_I386)
552 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
553 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
554 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
555 #elif defined(TARGET_ARM)
556 cpu_dump_state(env
, logfile
, fprintf
, 0);
557 #elif defined(TARGET_SPARC)
558 cpu_dump_state(env
, logfile
, fprintf
, 0);
559 #elif defined(TARGET_PPC)
560 cpu_dump_state(env
, logfile
, fprintf
, 0);
561 #elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env
, env
->cc_op
);
563 env
->cc_op
= CC_OP_FLAGS
;
564 env
->sr
= (env
->sr
& 0xffe0)
565 | env
->cc_dest
| (env
->cc_x
<< 4);
566 cpu_dump_state(env
, logfile
, fprintf
, 0);
567 #elif defined(TARGET_MIPS)
568 cpu_dump_state(env
, logfile
, fprintf
, 0);
569 #elif defined(TARGET_SH4)
570 cpu_dump_state(env
, logfile
, fprintf
, 0);
571 #elif defined(TARGET_ALPHA)
572 cpu_dump_state(env
, logfile
, fprintf
, 0);
573 #elif defined(TARGET_CRIS)
574 cpu_dump_state(env
, logfile
, fprintf
, 0);
576 #error unsupported target CPU
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag
) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
589 tb_invalidated_flag
= 0;
592 if ((loglevel
& CPU_LOG_EXEC
)) {
593 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
594 (long)tb
->tc_ptr
, tb
->pc
,
595 lookup_symbol(tb
->pc
));
598 /* see if we can patch the calling TB. When the TB
599 spans two pages, we cannot safely do a direct
604 (env
->kqemu_enabled
!= 2) &&
606 tb
->page_addr
[1] == -1) {
607 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
610 spin_unlock(&tb_lock
);
611 env
->current_tb
= tb
;
612 while (env
->current_tb
) {
614 /* execute the generated code */
615 #if defined(__sparc__) && !defined(HOST_SOLARIS)
617 env
= cpu_single_env
;
618 #define env cpu_single_env
620 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
621 env
->current_tb
= NULL
;
622 if ((next_tb
& 3) == 2) {
623 /* Instruction counter expired. */
625 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
627 CPU_PC_FROM_TB(env
, tb
);
628 insns_left
= env
->icount_decr
.u32
;
629 if (env
->icount_extra
&& insns_left
>= 0) {
630 /* Refill decrementer and continue execution. */
631 env
->icount_extra
+= insns_left
;
632 if (env
->icount_extra
> 0xffff) {
635 insns_left
= env
->icount_extra
;
637 env
->icount_extra
-= insns_left
;
638 env
->icount_decr
.u16
.low
= insns_left
;
640 if (insns_left
> 0) {
641 /* Execute remaining instructions. */
642 cpu_exec_nocache(insns_left
, tb
);
644 env
->exception_index
= EXCP_INTERRUPT
;
650 /* reset soft MMU for next block (it can currently
651 only be set by a memory fault) */
652 #if defined(USE_KQEMU)
653 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
654 if (kqemu_is_ok(env
) &&
655 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
666 #if defined(TARGET_I386)
667 /* restore flags in standard format */
668 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
669 #elif defined(TARGET_ARM)
670 /* XXX: Save/restore host fpu exception state?. */
671 #elif defined(TARGET_SPARC)
672 #elif defined(TARGET_PPC)
673 #elif defined(TARGET_M68K)
674 cpu_m68k_flush_flags(env
, env
->cc_op
);
675 env
->cc_op
= CC_OP_FLAGS
;
676 env
->sr
= (env
->sr
& 0xffe0)
677 | env
->cc_dest
| (env
->cc_x
<< 4);
678 #elif defined(TARGET_MIPS)
679 #elif defined(TARGET_SH4)
680 #elif defined(TARGET_ALPHA)
681 #elif defined(TARGET_CRIS)
684 #error unsupported target CPU
687 /* restore global registers */
688 #include "hostregs_helper.h"
690 /* fail safe : never use cpu_single_env outside cpu_exec() */
691 cpu_single_env
= NULL
;
695 /* must only be called from the generated code as an exception can be
697 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
699 /* XXX: cannot enable it yet because it yields to MMU exception
700 where NIP != read address on PowerPC */
702 target_ulong phys_addr
;
703 phys_addr
= get_phys_addr_code(env
, start
);
704 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
708 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
710 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
712 CPUX86State
*saved_env
;
716 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
718 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
719 (selector
<< 4), 0xffff, 0);
721 helper_load_seg(seg_reg
, selector
);
726 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
728 CPUX86State
*saved_env
;
733 helper_fsave(ptr
, data32
);
738 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
740 CPUX86State
*saved_env
;
745 helper_frstor(ptr
, data32
);
750 #endif /* TARGET_I386 */
752 #if !defined(CONFIG_SOFTMMU)
754 #if defined(TARGET_I386)
756 /* 'pc' is the host PC at which the exception was raised. 'address' is
757 the effective address of the memory exception. 'is_write' is 1 if a
758 write caused the exception and otherwise 0'. 'old_set' is the
759 signal set which should be restored */
760 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
761 int is_write
, sigset_t
*old_set
,
764 TranslationBlock
*tb
;
768 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
769 #if defined(DEBUG_SIGNAL)
770 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
771 pc
, address
, is_write
, *(unsigned long *)old_set
);
773 /* XXX: locking issue */
774 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
778 /* see if it is an MMU fault */
779 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
781 return 0; /* not an MMU fault */
783 return 1; /* the MMU fault was handled without causing real CPU fault */
784 /* now we have a real cpu fault */
787 /* the PC is inside the translated code. It means that we have
788 a virtual CPU fault */
789 cpu_restore_state(tb
, env
, pc
, puc
);
793 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
794 env
->eip
, env
->cr
[2], env
->error_code
);
796 /* we restore the process signal mask as the sigreturn should
797 do it (XXX: use sigsetjmp) */
798 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
799 raise_exception_err(env
->exception_index
, env
->error_code
);
801 /* activate soft MMU for this block */
802 env
->hflags
|= HF_SOFTMMU_MASK
;
803 cpu_resume_from_signal(env
, puc
);
805 /* never comes here */
809 #elif defined(TARGET_ARM)
810 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
811 int is_write
, sigset_t
*old_set
,
814 TranslationBlock
*tb
;
818 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
819 #if defined(DEBUG_SIGNAL)
820 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
821 pc
, address
, is_write
, *(unsigned long *)old_set
);
823 /* XXX: locking issue */
824 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
827 /* see if it is an MMU fault */
828 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
830 return 0; /* not an MMU fault */
832 return 1; /* the MMU fault was handled without causing real CPU fault */
833 /* now we have a real cpu fault */
836 /* the PC is inside the translated code. It means that we have
837 a virtual CPU fault */
838 cpu_restore_state(tb
, env
, pc
, puc
);
840 /* we restore the process signal mask as the sigreturn should
841 do it (XXX: use sigsetjmp) */
842 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
844 /* never comes here */
847 #elif defined(TARGET_SPARC)
848 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
849 int is_write
, sigset_t
*old_set
,
852 TranslationBlock
*tb
;
856 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
857 #if defined(DEBUG_SIGNAL)
858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
859 pc
, address
, is_write
, *(unsigned long *)old_set
);
861 /* XXX: locking issue */
862 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
865 /* see if it is an MMU fault */
866 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
868 return 0; /* not an MMU fault */
870 return 1; /* the MMU fault was handled without causing real CPU fault */
871 /* now we have a real cpu fault */
874 /* the PC is inside the translated code. It means that we have
875 a virtual CPU fault */
876 cpu_restore_state(tb
, env
, pc
, puc
);
878 /* we restore the process signal mask as the sigreturn should
879 do it (XXX: use sigsetjmp) */
880 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
882 /* never comes here */
885 #elif defined (TARGET_PPC)
886 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
887 int is_write
, sigset_t
*old_set
,
890 TranslationBlock
*tb
;
894 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
895 #if defined(DEBUG_SIGNAL)
896 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
897 pc
, address
, is_write
, *(unsigned long *)old_set
);
899 /* XXX: locking issue */
900 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
904 /* see if it is an MMU fault */
905 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
907 return 0; /* not an MMU fault */
909 return 1; /* the MMU fault was handled without causing real CPU fault */
911 /* now we have a real cpu fault */
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
916 cpu_restore_state(tb
, env
, pc
, puc
);
920 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
921 env
->nip
, env
->error_code
, tb
);
923 /* we restore the process signal mask as the sigreturn should
924 do it (XXX: use sigsetjmp) */
925 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
926 do_raise_exception_err(env
->exception_index
, env
->error_code
);
928 /* activate soft MMU for this block */
929 cpu_resume_from_signal(env
, puc
);
931 /* never comes here */
935 #elif defined(TARGET_M68K)
936 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
937 int is_write
, sigset_t
*old_set
,
940 TranslationBlock
*tb
;
944 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
945 #if defined(DEBUG_SIGNAL)
946 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
947 pc
, address
, is_write
, *(unsigned long *)old_set
);
949 /* XXX: locking issue */
950 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
953 /* see if it is an MMU fault */
954 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
956 return 0; /* not an MMU fault */
958 return 1; /* the MMU fault was handled without causing real CPU fault */
959 /* now we have a real cpu fault */
962 /* the PC is inside the translated code. It means that we have
963 a virtual CPU fault */
964 cpu_restore_state(tb
, env
, pc
, puc
);
966 /* we restore the process signal mask as the sigreturn should
967 do it (XXX: use sigsetjmp) */
968 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
970 /* never comes here */
974 #elif defined (TARGET_MIPS)
975 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
976 int is_write
, sigset_t
*old_set
,
979 TranslationBlock
*tb
;
983 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
984 #if defined(DEBUG_SIGNAL)
985 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
986 pc
, address
, is_write
, *(unsigned long *)old_set
);
988 /* XXX: locking issue */
989 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
993 /* see if it is an MMU fault */
994 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
996 return 0; /* not an MMU fault */
998 return 1; /* the MMU fault was handled without causing real CPU fault */
1000 /* now we have a real cpu fault */
1001 tb
= tb_find_pc(pc
);
1003 /* the PC is inside the translated code. It means that we have
1004 a virtual CPU fault */
1005 cpu_restore_state(tb
, env
, pc
, puc
);
1009 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1010 env
->PC
, env
->error_code
, tb
);
1012 /* we restore the process signal mask as the sigreturn should
1013 do it (XXX: use sigsetjmp) */
1014 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1015 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1017 /* activate soft MMU for this block */
1018 cpu_resume_from_signal(env
, puc
);
1020 /* never comes here */
1024 #elif defined (TARGET_SH4)
1025 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1026 int is_write
, sigset_t
*old_set
,
1029 TranslationBlock
*tb
;
1033 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1034 #if defined(DEBUG_SIGNAL)
1035 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1036 pc
, address
, is_write
, *(unsigned long *)old_set
);
1038 /* XXX: locking issue */
1039 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1043 /* see if it is an MMU fault */
1044 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1046 return 0; /* not an MMU fault */
1048 return 1; /* the MMU fault was handled without causing real CPU fault */
1050 /* now we have a real cpu fault */
1051 tb
= tb_find_pc(pc
);
1053 /* the PC is inside the translated code. It means that we have
1054 a virtual CPU fault */
1055 cpu_restore_state(tb
, env
, pc
, puc
);
1058 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1059 env
->nip
, env
->error_code
, tb
);
1061 /* we restore the process signal mask as the sigreturn should
1062 do it (XXX: use sigsetjmp) */
1063 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1065 /* never comes here */
1069 #elif defined (TARGET_ALPHA)
1070 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1071 int is_write
, sigset_t
*old_set
,
1074 TranslationBlock
*tb
;
1078 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1079 #if defined(DEBUG_SIGNAL)
1080 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1081 pc
, address
, is_write
, *(unsigned long *)old_set
);
1083 /* XXX: locking issue */
1084 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1088 /* see if it is an MMU fault */
1089 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1091 return 0; /* not an MMU fault */
1093 return 1; /* the MMU fault was handled without causing real CPU fault */
1095 /* now we have a real cpu fault */
1096 tb
= tb_find_pc(pc
);
1098 /* the PC is inside the translated code. It means that we have
1099 a virtual CPU fault */
1100 cpu_restore_state(tb
, env
, pc
, puc
);
1103 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1104 env
->nip
, env
->error_code
, tb
);
1106 /* we restore the process signal mask as the sigreturn should
1107 do it (XXX: use sigsetjmp) */
1108 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1110 /* never comes here */
1113 #elif defined (TARGET_CRIS)
1114 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1115 int is_write
, sigset_t
*old_set
,
1118 TranslationBlock
*tb
;
1122 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1123 #if defined(DEBUG_SIGNAL)
1124 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1125 pc
, address
, is_write
, *(unsigned long *)old_set
);
1127 /* XXX: locking issue */
1128 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1132 /* see if it is an MMU fault */
1133 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1135 return 0; /* not an MMU fault */
1137 return 1; /* the MMU fault was handled without causing real CPU fault */
1139 /* now we have a real cpu fault */
1140 tb
= tb_find_pc(pc
);
1142 /* the PC is inside the translated code. It means that we have
1143 a virtual CPU fault */
1144 cpu_restore_state(tb
, env
, pc
, puc
);
1146 /* we restore the process signal mask as the sigreturn should
1147 do it (XXX: use sigsetjmp) */
1148 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1150 /* never comes here */
1155 #error unsupported target CPU
1158 #if defined(__i386__)
1160 #if defined(__APPLE__)
1161 # include <sys/ucontext.h>
1163 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1164 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1165 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1167 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1168 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1169 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1172 int cpu_signal_handler(int host_signum
, void *pinfo
,
1175 siginfo_t
*info
= pinfo
;
1176 struct ucontext
*uc
= puc
;
1184 #define REG_TRAPNO TRAPNO
1187 trapno
= TRAP_sig(uc
);
1188 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1190 (ERROR_sig(uc
) >> 1) & 1 : 0,
1191 &uc
->uc_sigmask
, puc
);
1194 #elif defined(__x86_64__)
1196 int cpu_signal_handler(int host_signum
, void *pinfo
,
1199 siginfo_t
*info
= pinfo
;
1200 struct ucontext
*uc
= puc
;
1203 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1204 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1205 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1206 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1207 &uc
->uc_sigmask
, puc
);
1210 #elif defined(__powerpc__)
1212 /***********************************************************************
1213 * signal context platform-specific definitions
1217 /* All Registers access - only for local access */
1218 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1219 /* Gpr Registers access */
1220 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1221 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1222 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1223 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1224 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1225 # define LR_sig(context) REG_sig(link, context) /* Link register */
1226 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1227 /* Float Registers access */
1228 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1229 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1230 /* Exception Registers access */
1231 # define DAR_sig(context) REG_sig(dar, context)
1232 # define DSISR_sig(context) REG_sig(dsisr, context)
1233 # define TRAP_sig(context) REG_sig(trap, context)
1237 # include <sys/ucontext.h>
1238 typedef struct ucontext SIGCONTEXT
;
1239 /* All Registers access - only for local access */
1240 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1241 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1242 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1243 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1244 /* Gpr Registers access */
1245 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1246 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1247 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1248 # define CTR_sig(context) REG_sig(ctr, context)
1249 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1250 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1251 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1252 /* Float Registers access */
1253 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1254 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1255 /* Exception Registers access */
1256 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1257 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1258 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1259 #endif /* __APPLE__ */
1261 int cpu_signal_handler(int host_signum
, void *pinfo
,
1264 siginfo_t
*info
= pinfo
;
1265 struct ucontext
*uc
= puc
;
1273 if (DSISR_sig(uc
) & 0x00800000)
1276 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1279 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1280 is_write
, &uc
->uc_sigmask
, puc
);
1283 #elif defined(__alpha__)
1285 int cpu_signal_handler(int host_signum
, void *pinfo
,
1288 siginfo_t
*info
= pinfo
;
1289 struct ucontext
*uc
= puc
;
1290 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1291 uint32_t insn
= *pc
;
1294 /* XXX: need kernel patch to get write flag faster */
1295 switch (insn
>> 26) {
1310 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1311 is_write
, &uc
->uc_sigmask
, puc
);
1313 #elif defined(__sparc__)
1315 int cpu_signal_handler(int host_signum
, void *pinfo
,
1318 siginfo_t
*info
= pinfo
;
1321 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1322 uint32_t *regs
= (uint32_t *)(info
+ 1);
1323 void *sigmask
= (regs
+ 20);
1324 /* XXX: is there a standard glibc define ? */
1325 unsigned long pc
= regs
[1];
1327 struct sigcontext
*sc
= puc
;
1328 unsigned long pc
= sc
->sigc_regs
.tpc
;
1329 void *sigmask
= (void *)sc
->sigc_mask
;
1332 /* XXX: need kernel patch to get write flag faster */
1334 insn
= *(uint32_t *)pc
;
1335 if ((insn
>> 30) == 3) {
1336 switch((insn
>> 19) & 0x3f) {
1348 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1349 is_write
, sigmask
, NULL
);
1352 #elif defined(__arm__)
1354 int cpu_signal_handler(int host_signum
, void *pinfo
,
1357 siginfo_t
*info
= pinfo
;
1358 struct ucontext
*uc
= puc
;
1362 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1363 pc
= uc
->uc_mcontext
.gregs
[R15
];
1365 pc
= uc
->uc_mcontext
.arm_pc
;
1367 /* XXX: compute is_write */
1369 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1371 &uc
->uc_sigmask
, puc
);
1374 #elif defined(__mc68000)
1376 int cpu_signal_handler(int host_signum
, void *pinfo
,
1379 siginfo_t
*info
= pinfo
;
1380 struct ucontext
*uc
= puc
;
1384 pc
= uc
->uc_mcontext
.gregs
[16];
1385 /* XXX: compute is_write */
1387 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1389 &uc
->uc_sigmask
, puc
);
1392 #elif defined(__ia64)
1395 /* This ought to be in <bits/siginfo.h>... */
1396 # define __ISR_VALID 1
1399 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1401 siginfo_t
*info
= pinfo
;
1402 struct ucontext
*uc
= puc
;
1406 ip
= uc
->uc_mcontext
.sc_ip
;
1407 switch (host_signum
) {
1413 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1414 /* ISR.W (write-access) is bit 33: */
1415 is_write
= (info
->si_isr
>> 33) & 1;
1421 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1423 &uc
->uc_sigmask
, puc
);
1426 #elif defined(__s390__)
1428 int cpu_signal_handler(int host_signum
, void *pinfo
,
1431 siginfo_t
*info
= pinfo
;
1432 struct ucontext
*uc
= puc
;
1436 pc
= uc
->uc_mcontext
.psw
.addr
;
1437 /* XXX: compute is_write */
1439 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1440 is_write
, &uc
->uc_sigmask
, puc
);
1443 #elif defined(__mips__)
1445 int cpu_signal_handler(int host_signum
, void *pinfo
,
1448 siginfo_t
*info
= pinfo
;
1449 struct ucontext
*uc
= puc
;
1450 greg_t pc
= uc
->uc_mcontext
.pc
;
1453 /* XXX: compute is_write */
1455 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1456 is_write
, &uc
->uc_sigmask
, puc
);
1459 #elif defined(__hppa__)
1461 int cpu_signal_handler(int host_signum
, void *pinfo
,
1464 struct siginfo
*info
= pinfo
;
1465 struct ucontext
*uc
= puc
;
1469 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1470 /* FIXME: compute is_write */
1472 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1474 &uc
->uc_sigmask
, puc
);
1479 #error host CPU specific signal handler needed
1483 #endif /* !defined(CONFIG_SOFTMMU) */