2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
40 int tb_invalidated_flag
;
43 //#define DEBUG_SIGNAL
45 #define SAVE_GLOBALS()
46 #define RESTORE_GLOBALS()
48 #if defined(__sparc__) && !defined(HOST_SOLARIS)
50 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
51 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
52 // Work around ugly bugs in glibc that mangle global register contents
54 static volatile void *saved_env
;
55 static volatile unsigned long saved_t0
, saved_i7
;
57 #define SAVE_GLOBALS() do { \
60 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
63 #undef RESTORE_GLOBALS
64 #define RESTORE_GLOBALS() do { \
65 env = (void *)saved_env; \
67 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
70 static int sparc_setjmp(jmp_buf buf
)
80 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
82 static void sparc_longjmp(jmp_buf buf
, int val
)
87 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
91 void cpu_loop_exit(void)
93 /* NOTE: the register at this point must be saved by hand because
94 longjmp restore them */
96 longjmp(env
->jmp_env
, 1);
99 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
103 /* exit the current TB from a signal handler. The host registers are
104 restored in a state compatible with the CPU emulator
106 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
108 #if !defined(CONFIG_SOFTMMU)
109 struct ucontext
*uc
= puc
;
114 /* XXX: restore cpu registers saved in host registers */
116 #if !defined(CONFIG_SOFTMMU)
118 /* XXX: use siglongjmp ? */
119 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
122 longjmp(env
->jmp_env
, 1);
125 static TranslationBlock
*tb_find_slow(target_ulong pc
,
126 target_ulong cs_base
,
129 TranslationBlock
*tb
, **ptb1
;
132 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
137 tb_invalidated_flag
= 0;
139 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
141 /* find translated block using physical mappings */
142 phys_pc
= get_phys_addr_code(env
, pc
);
143 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
145 h
= tb_phys_hash_func(phys_pc
);
146 ptb1
= &tb_phys_hash
[h
];
152 tb
->page_addr
[0] == phys_page1
&&
153 tb
->cs_base
== cs_base
&&
154 tb
->flags
== flags
) {
155 /* check next page if needed */
156 if (tb
->page_addr
[1] != -1) {
157 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
159 phys_page2
= get_phys_addr_code(env
, virt_page2
);
160 if (tb
->page_addr
[1] == phys_page2
)
166 ptb1
= &tb
->phys_hash_next
;
169 /* if no translated code available, then translate it now */
172 /* flush must be done */
174 /* cannot fail at this point */
176 /* don't forget to invalidate previous TB info */
177 tb_invalidated_flag
= 1;
179 tc_ptr
= code_gen_ptr
;
181 tb
->cs_base
= cs_base
;
184 cpu_gen_code(env
, tb
, &code_gen_size
);
186 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
188 /* check next page if needed */
189 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
191 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
192 phys_page2
= get_phys_addr_code(env
, virt_page2
);
194 tb_link_phys(tb
, phys_pc
, phys_page2
);
197 /* we add the TB in the virtual pc hash table */
198 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
199 spin_unlock(&tb_lock
);
203 static inline TranslationBlock
*tb_find_fast(void)
205 TranslationBlock
*tb
;
206 target_ulong cs_base
, pc
;
209 /* we record a subset of the CPU state. It will
210 always be the same before a given translated block
212 #if defined(TARGET_I386)
214 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
215 flags
|= env
->intercept
;
216 cs_base
= env
->segs
[R_CS
].base
;
217 pc
= cs_base
+ env
->eip
;
218 #elif defined(TARGET_ARM)
219 flags
= env
->thumb
| (env
->vfp
.vec_len
<< 1)
220 | (env
->vfp
.vec_stride
<< 4);
221 if ((env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
)
223 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30))
225 flags
|= (env
->condexec_bits
<< 8);
228 #elif defined(TARGET_SPARC)
229 #ifdef TARGET_SPARC64
230 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
231 flags
= (((env
->pstate
& PS_PEF
) >> 1) | ((env
->fprs
& FPRS_FEF
) << 2))
232 | (env
->pstate
& PS_PRIV
) | ((env
->lsu
& (DMMU_E
| IMMU_E
)) >> 2);
234 // FPU enable . Supervisor
235 flags
= (env
->psref
<< 4) | env
->psrs
;
239 #elif defined(TARGET_PPC)
243 #elif defined(TARGET_MIPS)
244 flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
);
246 pc
= env
->PC
[env
->current_tc
];
247 #elif defined(TARGET_M68K)
248 flags
= (env
->fpcr
& M68K_FPCR_PREC
) /* Bit 6 */
249 | (env
->sr
& SR_S
) /* Bit 13 */
250 | ((env
->macsr
>> 4) & 0xf); /* Bits 0-3 */
253 #elif defined(TARGET_SH4)
257 #elif defined(TARGET_ALPHA)
261 #elif defined(TARGET_CRIS)
265 #elif defined(TARGET_IA64)
267 cs_base
= 0; /* XXXXX */
270 #error unsupported CPU
272 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
273 if (__builtin_expect(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
274 tb
->flags
!= flags
, 0)) {
275 tb
= tb_find_slow(pc
, cs_base
, flags
);
276 /* Note: we do it here to avoid a gcc bug on Mac OS X when
277 doing it in tb_find_slow */
278 if (tb_invalidated_flag
) {
279 /* as some TB could have been invalidated because
280 of memory exceptions while generating the code, we
281 must recompute the hash index here */
288 #define BREAK_CHAIN T0 = 0
290 /* main execution loop */
292 int cpu_exec(CPUState
*env1
)
294 #define DECLARE_HOST_REGS 1
295 #include "hostregs_helper.h"
296 #if defined(TARGET_SPARC)
297 #if defined(reg_REGWPTR)
298 uint32_t *saved_regwptr
;
301 int ret
, interrupt_request
;
302 long (*gen_func
)(void);
303 TranslationBlock
*tb
;
306 if (cpu_halted(env1
) == EXCP_HALTED
)
309 cpu_single_env
= env1
;
311 /* first we save global registers */
312 #define SAVE_HOST_REGS 1
313 #include "hostregs_helper.h"
318 #if defined(TARGET_I386)
319 /* put eflags in CPU temporary format */
320 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
321 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
322 CC_OP
= CC_OP_EFLAGS
;
323 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
324 #elif defined(TARGET_SPARC)
325 #if defined(reg_REGWPTR)
326 saved_regwptr
= REGWPTR
;
328 #elif defined(TARGET_M68K)
329 env
->cc_op
= CC_OP_FLAGS
;
330 env
->cc_dest
= env
->sr
& 0xf;
331 env
->cc_x
= (env
->sr
>> 4) & 1;
332 #elif defined(TARGET_ALPHA)
333 #elif defined(TARGET_ARM)
334 #elif defined(TARGET_PPC)
335 #elif defined(TARGET_MIPS)
336 #elif defined(TARGET_SH4)
337 #elif defined(TARGET_CRIS)
338 #elif defined(TARGET_IA64)
341 #error unsupported target CPU
343 env
->exception_index
= -1;
345 /* prepare setjmp context for exception handling */
347 if (setjmp(env
->jmp_env
) == 0) {
348 env
->current_tb
= NULL
;
349 /* if an exception is pending, we execute it here */
350 if (env
->exception_index
>= 0) {
351 if (env
->exception_index
>= EXCP_INTERRUPT
) {
352 /* exit request from the cpu execution loop */
353 ret
= env
->exception_index
;
355 } else if (env
->user_mode_only
) {
356 /* if user mode only, we simulate a fake exception
357 which will be handled outside the cpu execution
359 #if defined(TARGET_I386)
360 do_interrupt_user(env
->exception_index
,
361 env
->exception_is_int
,
363 env
->exception_next_eip
);
365 ret
= env
->exception_index
;
368 #if defined(TARGET_I386)
369 /* simulate a real cpu exception. On i386, it can
370 trigger new exceptions, but we do not handle
371 double or triple faults yet. */
372 do_interrupt(env
->exception_index
,
373 env
->exception_is_int
,
375 env
->exception_next_eip
, 0);
376 /* successfully delivered */
377 env
->old_exception
= -1;
378 #elif defined(TARGET_PPC)
380 #elif defined(TARGET_MIPS)
382 #elif defined(TARGET_SPARC)
383 do_interrupt(env
->exception_index
);
384 #elif defined(TARGET_ARM)
386 #elif defined(TARGET_SH4)
388 #elif defined(TARGET_ALPHA)
390 #elif defined(TARGET_CRIS)
392 #elif defined(TARGET_M68K)
394 #elif defined(TARGET_IA64)
398 env
->exception_index
= -1;
401 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0) {
403 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
404 ret
= kqemu_cpu_exec(env
);
405 /* put eflags in CPU temporary format */
406 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
407 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
408 CC_OP
= CC_OP_EFLAGS
;
409 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
412 longjmp(env
->jmp_env
, 1);
413 } else if (ret
== 2) {
414 /* softmmu execution needed */
416 if (env
->interrupt_request
!= 0) {
417 /* hardware interrupt will be executed just after */
419 /* otherwise, we restart */
420 longjmp(env
->jmp_env
, 1);
428 longjmp(env
->jmp_env
, 1);
430 T0
= 0; /* force lookup of first TB */
433 interrupt_request
= env
->interrupt_request
;
434 if (__builtin_expect(interrupt_request
, 0)
435 #if defined(TARGET_I386)
436 && env
->hflags
& HF_GIF_MASK
439 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
440 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
441 env
->exception_index
= EXCP_DEBUG
;
444 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
445 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
446 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
447 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
449 env
->exception_index
= EXCP_HLT
;
453 #if defined(TARGET_I386)
454 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
455 !(env
->hflags
& HF_SMM_MASK
)) {
456 svm_check_intercept(SVM_EXIT_SMI
);
457 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
460 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
461 (env
->eflags
& IF_MASK
|| env
->hflags
& HF_HIF_MASK
) &&
462 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
464 svm_check_intercept(SVM_EXIT_INTR
);
465 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
466 intno
= cpu_get_pic_interrupt(env
);
467 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
468 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
470 do_interrupt(intno
, 0, 0, 0, 1);
471 /* ensure that no TB jump will be modified as
472 the program flow was changed */
474 #if !defined(CONFIG_USER_ONLY)
475 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
476 (env
->eflags
& IF_MASK
) && !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
478 /* FIXME: this should respect TPR */
479 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
480 svm_check_intercept(SVM_EXIT_VINTR
);
481 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
482 if (loglevel
& CPU_LOG_TB_IN_ASM
)
483 fprintf(logfile
, "Servicing virtual hardware INT=0x%02x\n", intno
);
484 do_interrupt(intno
, 0, 0, -1, 1);
485 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
),
486 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
)) & ~V_IRQ_MASK
);
490 #elif defined(TARGET_PPC)
492 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
496 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
497 ppc_hw_interrupt(env
);
498 if (env
->pending_interrupts
== 0)
499 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
502 #elif defined(TARGET_MIPS)
503 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
504 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
505 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
506 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
507 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
508 !(env
->hflags
& MIPS_HFLAG_DM
)) {
510 env
->exception_index
= EXCP_EXT_INTERRUPT
;
515 #elif defined(TARGET_SPARC)
516 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
518 int pil
= env
->interrupt_index
& 15;
519 int type
= env
->interrupt_index
& 0xf0;
521 if (((type
== TT_EXTINT
) &&
522 (pil
== 15 || pil
> env
->psrpil
)) ||
524 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
525 do_interrupt(env
->interrupt_index
);
526 env
->interrupt_index
= 0;
527 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
532 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
533 //do_interrupt(0, 0, 0, 0, 0);
534 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
536 #elif defined(TARGET_ARM)
537 if (interrupt_request
& CPU_INTERRUPT_FIQ
538 && !(env
->uncached_cpsr
& CPSR_F
)) {
539 env
->exception_index
= EXCP_FIQ
;
543 /* ARMv7-M interrupt return works by loading a magic value
544 into the PC. On real hardware the load causes the
545 return to occur. The qemu implementation performs the
546 jump normally, then does the exception return when the
547 CPU tries to execute code at the magic address.
548 This will cause the magic PC value to be pushed to
549 the stack if an interrupt occured at the wrong time.
550 We avoid this by disabling interrupts when
551 pc contains a magic address. */
552 if (interrupt_request
& CPU_INTERRUPT_HARD
553 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
554 || !(env
->uncached_cpsr
& CPSR_I
))) {
555 env
->exception_index
= EXCP_IRQ
;
559 #elif defined(TARGET_SH4)
560 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
564 #elif defined(TARGET_ALPHA)
565 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
569 #elif defined(TARGET_CRIS)
570 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
574 #elif defined(TARGET_M68K)
575 if (interrupt_request
& CPU_INTERRUPT_HARD
576 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
577 < env
->pending_level
) {
578 /* Real hardware gets the interrupt vector via an
579 IACK cycle at this point. Current emulated
580 hardware doesn't rely on this, so we
581 provide/save the vector when the interrupt is
583 env
->exception_index
= env
->pending_vector
;
588 /* Don't use the cached interupt_request value,
589 do_interrupt may have updated the EXITTB flag. */
590 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
591 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
592 /* ensure that no TB jump will be modified as
593 the program flow was changed */
596 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
597 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
598 env
->exception_index
= EXCP_INTERRUPT
;
603 if ((loglevel
& CPU_LOG_TB_CPU
)) {
604 /* restore flags in standard format */
606 #if defined(TARGET_I386)
607 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
608 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
609 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
610 #elif defined(TARGET_ARM)
611 cpu_dump_state(env
, logfile
, fprintf
, 0);
612 #elif defined(TARGET_SPARC)
613 REGWPTR
= env
->regbase
+ (env
->cwp
* 16);
614 env
->regwptr
= REGWPTR
;
615 cpu_dump_state(env
, logfile
, fprintf
, 0);
616 #elif defined(TARGET_PPC)
617 cpu_dump_state(env
, logfile
, fprintf
, 0);
618 #elif defined(TARGET_M68K)
619 cpu_m68k_flush_flags(env
, env
->cc_op
);
620 env
->cc_op
= CC_OP_FLAGS
;
621 env
->sr
= (env
->sr
& 0xffe0)
622 | env
->cc_dest
| (env
->cc_x
<< 4);
623 cpu_dump_state(env
, logfile
, fprintf
, 0);
624 #elif defined(TARGET_MIPS)
625 cpu_dump_state(env
, logfile
, fprintf
, 0);
626 #elif defined(TARGET_SH4)
627 cpu_dump_state(env
, logfile
, fprintf
, 0);
628 #elif defined(TARGET_ALPHA)
629 cpu_dump_state(env
, logfile
, fprintf
, 0);
630 #elif defined(TARGET_CRIS)
631 cpu_dump_state(env
, logfile
, fprintf
, 0);
633 #error unsupported target CPU
639 if ((loglevel
& CPU_LOG_EXEC
)) {
640 fprintf(logfile
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
641 (long)tb
->tc_ptr
, tb
->pc
,
642 lookup_symbol(tb
->pc
));
646 /* see if we can patch the calling TB. When the TB
647 spans two pages, we cannot safely do a direct
652 (env
->kqemu_enabled
!= 2) &&
654 tb
->page_addr
[1] == -1) {
656 tb_add_jump((TranslationBlock
*)(long)(T0
& ~3), T0
& 3, tb
);
657 spin_unlock(&tb_lock
);
661 env
->current_tb
= tb
;
662 /* execute the generated code */
663 gen_func
= (void *)tc_ptr
;
664 #if defined(__sparc__)
665 __asm__
__volatile__("call %0\n\t"
669 : "i0", "i1", "i2", "i3", "i4", "i5",
670 "o0", "o1", "o2", "o3", "o4", "o5",
671 "l0", "l1", "l2", "l3", "l4", "l5",
673 #elif defined(__arm__)
674 asm volatile ("mov pc, %0\n\t"
675 ".global exec_loop\n\t"
679 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
680 #elif defined(__ia64)
687 fp
.gp
= code_gen_buffer
+ 2 * (1 << 20);
688 (*(void (*)(void)) &fp
)();
692 env
->current_tb
= NULL
;
693 /* reset soft MMU for next block (it can currently
694 only be set by a memory fault) */
695 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
696 if (env
->hflags
& HF_SOFTMMU_MASK
) {
697 env
->hflags
&= ~HF_SOFTMMU_MASK
;
698 /* do not allow linking to another block */
702 #if defined(USE_KQEMU)
703 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
704 if (kqemu_is_ok(env
) &&
705 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
716 #if defined(TARGET_I386)
717 /* restore flags in standard format */
718 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
719 #elif defined(TARGET_ARM)
720 /* XXX: Save/restore host fpu exception state?. */
721 #elif defined(TARGET_SPARC)
722 #if defined(reg_REGWPTR)
723 REGWPTR
= saved_regwptr
;
725 #elif defined(TARGET_PPC)
726 #elif defined(TARGET_M68K)
727 cpu_m68k_flush_flags(env
, env
->cc_op
);
728 env
->cc_op
= CC_OP_FLAGS
;
729 env
->sr
= (env
->sr
& 0xffe0)
730 | env
->cc_dest
| (env
->cc_x
<< 4);
731 #elif defined(TARGET_MIPS)
732 #elif defined(TARGET_SH4)
733 #elif defined(TARGET_IA64)
734 #elif defined(TARGET_ALPHA)
735 #elif defined(TARGET_CRIS)
738 #error unsupported target CPU
741 /* restore global registers */
743 #include "hostregs_helper.h"
745 /* fail safe : never use cpu_single_env outside cpu_exec() */
746 cpu_single_env
= NULL
;
750 /* must only be called from the generated code as an exception can be
752 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
754 /* XXX: cannot enable it yet because it yields to MMU exception
755 where NIP != read address on PowerPC */
757 target_ulong phys_addr
;
758 phys_addr
= get_phys_addr_code(env
, start
);
759 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
763 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
765 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
767 CPUX86State
*saved_env
;
771 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
773 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
774 (selector
<< 4), 0xffff, 0);
776 load_seg(seg_reg
, selector
);
781 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
783 CPUX86State
*saved_env
;
788 helper_fsave(ptr
, data32
);
793 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
795 CPUX86State
*saved_env
;
800 helper_frstor(ptr
, data32
);
805 #endif /* TARGET_I386 */
807 #if !defined(CONFIG_SOFTMMU)
809 #if defined(TARGET_I386)
811 /* 'pc' is the host PC at which the exception was raised. 'address' is
812 the effective address of the memory exception. 'is_write' is 1 if a
813 write caused the exception and otherwise 0'. 'old_set' is the
814 signal set which should be restored */
815 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
816 int is_write
, sigset_t
*old_set
,
819 TranslationBlock
*tb
;
823 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
824 #if defined(DEBUG_SIGNAL)
825 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
826 pc
, address
, is_write
, *(unsigned long *)old_set
);
828 /* XXX: locking issue */
829 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
833 /* see if it is an MMU fault */
834 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
836 return 0; /* not an MMU fault */
838 return 1; /* the MMU fault was handled without causing real CPU fault */
839 /* now we have a real cpu fault */
842 /* the PC is inside the translated code. It means that we have
843 a virtual CPU fault */
844 cpu_restore_state(tb
, env
, pc
, puc
);
848 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
849 env
->eip
, env
->cr
[2], env
->error_code
);
851 /* we restore the process signal mask as the sigreturn should
852 do it (XXX: use sigsetjmp) */
853 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
854 raise_exception_err(env
->exception_index
, env
->error_code
);
856 /* activate soft MMU for this block */
857 env
->hflags
|= HF_SOFTMMU_MASK
;
858 cpu_resume_from_signal(env
, puc
);
860 /* never comes here */
864 #elif defined(TARGET_ARM)
865 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
866 int is_write
, sigset_t
*old_set
,
869 TranslationBlock
*tb
;
873 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
874 #if defined(DEBUG_SIGNAL)
875 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
876 pc
, address
, is_write
, *(unsigned long *)old_set
);
878 /* XXX: locking issue */
879 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
882 /* see if it is an MMU fault */
883 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
885 return 0; /* not an MMU fault */
887 return 1; /* the MMU fault was handled without causing real CPU fault */
888 /* now we have a real cpu fault */
891 /* the PC is inside the translated code. It means that we have
892 a virtual CPU fault */
893 cpu_restore_state(tb
, env
, pc
, puc
);
895 /* we restore the process signal mask as the sigreturn should
896 do it (XXX: use sigsetjmp) */
897 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
900 #elif defined(TARGET_SPARC)
901 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
902 int is_write
, sigset_t
*old_set
,
905 TranslationBlock
*tb
;
909 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
910 #if defined(DEBUG_SIGNAL)
911 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
912 pc
, address
, is_write
, *(unsigned long *)old_set
);
914 /* XXX: locking issue */
915 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
918 /* see if it is an MMU fault */
919 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
921 return 0; /* not an MMU fault */
923 return 1; /* the MMU fault was handled without causing real CPU fault */
924 /* now we have a real cpu fault */
927 /* the PC is inside the translated code. It means that we have
928 a virtual CPU fault */
929 cpu_restore_state(tb
, env
, pc
, puc
);
931 /* we restore the process signal mask as the sigreturn should
932 do it (XXX: use sigsetjmp) */
933 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
936 #elif defined (TARGET_PPC)
937 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
938 int is_write
, sigset_t
*old_set
,
941 TranslationBlock
*tb
;
945 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
946 #if defined(DEBUG_SIGNAL)
947 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
948 pc
, address
, is_write
, *(unsigned long *)old_set
);
950 /* XXX: locking issue */
951 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
955 /* see if it is an MMU fault */
956 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
958 return 0; /* not an MMU fault */
960 return 1; /* the MMU fault was handled without causing real CPU fault */
962 /* now we have a real cpu fault */
965 /* the PC is inside the translated code. It means that we have
966 a virtual CPU fault */
967 cpu_restore_state(tb
, env
, pc
, puc
);
971 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
972 env
->nip
, env
->error_code
, tb
);
974 /* we restore the process signal mask as the sigreturn should
975 do it (XXX: use sigsetjmp) */
976 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
977 do_raise_exception_err(env
->exception_index
, env
->error_code
);
979 /* activate soft MMU for this block */
980 cpu_resume_from_signal(env
, puc
);
982 /* never comes here */
986 #elif defined(TARGET_M68K)
987 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
988 int is_write
, sigset_t
*old_set
,
991 TranslationBlock
*tb
;
995 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
996 #if defined(DEBUG_SIGNAL)
997 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
998 pc
, address
, is_write
, *(unsigned long *)old_set
);
1000 /* XXX: locking issue */
1001 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
1004 /* see if it is an MMU fault */
1005 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1007 return 0; /* not an MMU fault */
1009 return 1; /* the MMU fault was handled without causing real CPU fault */
1010 /* now we have a real cpu fault */
1011 tb
= tb_find_pc(pc
);
1013 /* the PC is inside the translated code. It means that we have
1014 a virtual CPU fault */
1015 cpu_restore_state(tb
, env
, pc
, puc
);
1017 /* we restore the process signal mask as the sigreturn should
1018 do it (XXX: use sigsetjmp) */
1019 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1021 /* never comes here */
1025 #elif defined (TARGET_MIPS)
1026 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1027 int is_write
, sigset_t
*old_set
,
1030 TranslationBlock
*tb
;
1034 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1035 #if defined(DEBUG_SIGNAL)
1036 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1037 pc
, address
, is_write
, *(unsigned long *)old_set
);
1039 /* XXX: locking issue */
1040 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1044 /* see if it is an MMU fault */
1045 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1047 return 0; /* not an MMU fault */
1049 return 1; /* the MMU fault was handled without causing real CPU fault */
1051 /* now we have a real cpu fault */
1052 tb
= tb_find_pc(pc
);
1054 /* the PC is inside the translated code. It means that we have
1055 a virtual CPU fault */
1056 cpu_restore_state(tb
, env
, pc
, puc
);
1060 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1061 env
->PC
, env
->error_code
, tb
);
1063 /* we restore the process signal mask as the sigreturn should
1064 do it (XXX: use sigsetjmp) */
1065 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1066 do_raise_exception_err(env
->exception_index
, env
->error_code
);
1068 /* activate soft MMU for this block */
1069 cpu_resume_from_signal(env
, puc
);
1071 /* never comes here */
1075 #elif defined (TARGET_SH4)
1076 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1077 int is_write
, sigset_t
*old_set
,
1080 TranslationBlock
*tb
;
1084 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1085 #if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc
, address
, is_write
, *(unsigned long *)old_set
);
1089 /* XXX: locking issue */
1090 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1094 /* see if it is an MMU fault */
1095 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1097 return 0; /* not an MMU fault */
1099 return 1; /* the MMU fault was handled without causing real CPU fault */
1101 /* now we have a real cpu fault */
1102 tb
= tb_find_pc(pc
);
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb
, env
, pc
, puc
);
1109 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1110 env
->nip
, env
->error_code
, tb
);
1112 /* we restore the process signal mask as the sigreturn should
1113 do it (XXX: use sigsetjmp) */
1114 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1116 /* never comes here */
1120 #elif defined (TARGET_ALPHA)
1121 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1122 int is_write
, sigset_t
*old_set
,
1125 TranslationBlock
*tb
;
1129 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1130 #if defined(DEBUG_SIGNAL)
1131 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1132 pc
, address
, is_write
, *(unsigned long *)old_set
);
1134 /* XXX: locking issue */
1135 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1139 /* see if it is an MMU fault */
1140 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1142 return 0; /* not an MMU fault */
1144 return 1; /* the MMU fault was handled without causing real CPU fault */
1146 /* now we have a real cpu fault */
1147 tb
= tb_find_pc(pc
);
1149 /* the PC is inside the translated code. It means that we have
1150 a virtual CPU fault */
1151 cpu_restore_state(tb
, env
, pc
, puc
);
1154 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1155 env
->nip
, env
->error_code
, tb
);
1157 /* we restore the process signal mask as the sigreturn should
1158 do it (XXX: use sigsetjmp) */
1159 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1161 /* never comes here */
1164 #elif defined (TARGET_CRIS)
1165 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1166 int is_write
, sigset_t
*old_set
,
1169 TranslationBlock
*tb
;
1173 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1174 #if defined(DEBUG_SIGNAL)
1175 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1176 pc
, address
, is_write
, *(unsigned long *)old_set
);
1178 /* XXX: locking issue */
1179 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1183 /* see if it is an MMU fault */
1184 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1186 return 0; /* not an MMU fault */
1188 return 1; /* the MMU fault was handled without causing real CPU fault */
1190 /* now we have a real cpu fault */
1191 tb
= tb_find_pc(pc
);
1193 /* the PC is inside the translated code. It means that we have
1194 a virtual CPU fault */
1195 cpu_restore_state(tb
, env
, pc
, puc
);
1197 /* we restore the process signal mask as the sigreturn should
1198 do it (XXX: use sigsetjmp) */
1199 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1201 /* never comes here */
1206 #error unsupported target CPU
1209 #if defined(__i386__)
1211 #if defined(__APPLE__)
1212 # include <sys/ucontext.h>
1214 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1215 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1216 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1218 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1219 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1220 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1223 int cpu_signal_handler(int host_signum
, void *pinfo
,
1226 siginfo_t
*info
= pinfo
;
1227 struct ucontext
*uc
= puc
;
1235 #define REG_TRAPNO TRAPNO
1238 trapno
= TRAP_sig(uc
);
1239 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1241 (ERROR_sig(uc
) >> 1) & 1 : 0,
1242 &uc
->uc_sigmask
, puc
);
1245 #elif defined(__x86_64__)
1247 int cpu_signal_handler(int host_signum
, void *pinfo
,
1250 siginfo_t
*info
= pinfo
;
1251 struct ucontext
*uc
= puc
;
1254 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
1255 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1256 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
1257 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
1258 &uc
->uc_sigmask
, puc
);
1261 #elif defined(__powerpc__)
1263 /***********************************************************************
1264 * signal context platform-specific definitions
1268 /* All Registers access - only for local access */
1269 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1270 /* Gpr Registers access */
1271 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1272 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1273 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1274 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1275 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1276 # define LR_sig(context) REG_sig(link, context) /* Link register */
1277 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1278 /* Float Registers access */
1279 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1280 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1281 /* Exception Registers access */
1282 # define DAR_sig(context) REG_sig(dar, context)
1283 # define DSISR_sig(context) REG_sig(dsisr, context)
1284 # define TRAP_sig(context) REG_sig(trap, context)
1288 # include <sys/ucontext.h>
1289 typedef struct ucontext SIGCONTEXT
;
1290 /* All Registers access - only for local access */
1291 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1292 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1293 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1294 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1295 /* Gpr Registers access */
1296 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1297 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1298 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1299 # define CTR_sig(context) REG_sig(ctr, context)
1300 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1301 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1302 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1303 /* Float Registers access */
1304 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1305 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1306 /* Exception Registers access */
1307 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1308 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1309 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1310 #endif /* __APPLE__ */
1312 int cpu_signal_handler(int host_signum
, void *pinfo
,
1315 siginfo_t
*info
= pinfo
;
1316 struct ucontext
*uc
= puc
;
1324 if (DSISR_sig(uc
) & 0x00800000)
1327 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1330 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1331 is_write
, &uc
->uc_sigmask
, puc
);
1334 #elif defined(__alpha__)
1336 int cpu_signal_handler(int host_signum
, void *pinfo
,
1339 siginfo_t
*info
= pinfo
;
1340 struct ucontext
*uc
= puc
;
1341 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1342 uint32_t insn
= *pc
;
1345 /* XXX: need kernel patch to get write flag faster */
1346 switch (insn
>> 26) {
1361 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1362 is_write
, &uc
->uc_sigmask
, puc
);
1364 #elif defined(__sparc__)
1366 int cpu_signal_handler(int host_signum
, void *pinfo
,
1369 siginfo_t
*info
= pinfo
;
1370 uint32_t *regs
= (uint32_t *)(info
+ 1);
1371 void *sigmask
= (regs
+ 20);
1376 /* XXX: is there a standard glibc define ? */
1378 /* XXX: need kernel patch to get write flag faster */
1380 insn
= *(uint32_t *)pc
;
1381 if ((insn
>> 30) == 3) {
1382 switch((insn
>> 19) & 0x3f) {
1394 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1395 is_write
, sigmask
, NULL
);
1398 #elif defined(__arm__)
1400 int cpu_signal_handler(int host_signum
, void *pinfo
,
1403 siginfo_t
*info
= pinfo
;
1404 struct ucontext
*uc
= puc
;
1408 pc
= uc
->uc_mcontext
.gregs
[R15
];
1409 /* XXX: compute is_write */
1411 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1413 &uc
->uc_sigmask
, puc
);
1416 #elif defined(__mc68000)
1418 int cpu_signal_handler(int host_signum
, void *pinfo
,
1421 siginfo_t
*info
= pinfo
;
1422 struct ucontext
*uc
= puc
;
1426 pc
= uc
->uc_mcontext
.gregs
[16];
1427 /* XXX: compute is_write */
1429 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1431 &uc
->uc_sigmask
, puc
);
1434 #elif defined(__ia64)
1437 /* This ought to be in <bits/siginfo.h>... */
1438 # define __ISR_VALID 1
1441 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1443 siginfo_t
*info
= pinfo
;
1444 struct ucontext
*uc
= puc
;
1448 ip
= uc
->uc_mcontext
.sc_ip
;
1449 switch (host_signum
) {
1455 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1456 /* ISR.W (write-access) is bit 33: */
1457 is_write
= (info
->si_isr
>> 33) & 1;
1463 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1465 &uc
->uc_sigmask
, puc
);
1468 #elif defined(__s390__)
1470 int cpu_signal_handler(int host_signum
, void *pinfo
,
1473 siginfo_t
*info
= pinfo
;
1474 struct ucontext
*uc
= puc
;
1478 pc
= uc
->uc_mcontext
.psw
.addr
;
1479 /* XXX: compute is_write */
1481 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1482 is_write
, &uc
->uc_sigmask
, puc
);
1485 #elif defined(__mips__)
1487 int cpu_signal_handler(int host_signum
, void *pinfo
,
1490 siginfo_t
*info
= pinfo
;
1491 struct ucontext
*uc
= puc
;
1492 greg_t pc
= uc
->uc_mcontext
.pc
;
1495 /* XXX: compute is_write */
1497 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1498 is_write
, &uc
->uc_sigmask
, puc
);
1503 #error host CPU specific signal handler needed
1507 #endif /* !defined(CONFIG_SOFTMMU) */