2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 #if !defined(TARGET_IA64)
28 #if !defined(CONFIG_SOFTMMU)
40 #include <sys/ucontext.h>
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
49 #define env cpu_single_env
52 int tb_invalidated_flag
;
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState
*env
)
59 return cpu_has_work(env
);
62 void cpu_loop_exit(void)
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
67 longjmp(env
->jmp_env
, 1);
70 /* exit the current TB from a signal handler. The host registers are
71 restored in a state compatible with the CPU emulator
73 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
75 #if !defined(CONFIG_SOFTMMU)
77 struct ucontext
*uc
= puc
;
78 #elif defined(__OpenBSD__)
79 struct sigcontext
*uc
= puc
;
85 /* XXX: restore cpu registers saved in host registers */
87 #if !defined(CONFIG_SOFTMMU)
89 /* XXX: use siglongjmp ? */
91 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
92 #elif defined(__OpenBSD__)
93 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
97 env
->exception_index
= -1;
98 longjmp(env
->jmp_env
, 1);
101 /* Execute the code without caching the generated code. An interpreter
102 could be used if available. */
103 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
105 unsigned long next_tb
;
106 TranslationBlock
*tb
;
108 /* Should never happen.
109 We only end up here when an existing TB is too long. */
110 if (max_cycles
> CF_COUNT_MASK
)
111 max_cycles
= CF_COUNT_MASK
;
113 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
115 env
->current_tb
= tb
;
116 /* execute the generated code */
117 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
119 if ((next_tb
& 3) == 2) {
120 /* Restore PC. This may happen if async event occurs before
121 the TB starts executing. */
122 cpu_pc_from_tb(env
, tb
);
124 tb_phys_invalidate(tb
, -1);
128 static TranslationBlock
*tb_find_slow(target_ulong pc
,
129 target_ulong cs_base
,
132 TranslationBlock
*tb
, **ptb1
;
134 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
136 tb_invalidated_flag
= 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc
= get_phys_addr_code(env
, pc
);
142 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
144 h
= tb_phys_hash_func(phys_pc
);
145 ptb1
= &tb_phys_hash
[h
];
151 tb
->page_addr
[0] == phys_page1
&&
152 tb
->cs_base
== cs_base
&&
153 tb
->flags
== flags
) {
154 /* check next page if needed */
155 if (tb
->page_addr
[1] != -1) {
156 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
158 phys_page2
= get_phys_addr_code(env
, virt_page2
);
159 if (tb
->page_addr
[1] == phys_page2
)
165 ptb1
= &tb
->phys_hash_next
;
168 /* if no translated code available, then translate it now */
169 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
172 /* we add the TB in the virtual pc hash table */
173 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
177 static inline TranslationBlock
*tb_find_fast(void)
179 TranslationBlock
*tb
;
180 target_ulong cs_base
, pc
;
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
186 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
187 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
188 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
189 tb
->flags
!= flags
)) {
190 tb
= tb_find_slow(pc
, cs_base
, flags
);
195 static CPUDebugExcpHandler
*debug_excp_handler
;
197 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
199 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
201 debug_excp_handler
= handler
;
205 static void cpu_handle_debug_exception(CPUState
*env
)
209 if (!env
->watchpoint_hit
)
210 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
211 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
213 if (debug_excp_handler
)
214 debug_excp_handler(env
);
217 /* main execution loop */
219 int cpu_exec(CPUState
*env1
)
221 #define DECLARE_HOST_REGS 1
222 #include "hostregs_helper.h"
223 int ret
, interrupt_request
;
224 TranslationBlock
*tb
;
226 unsigned long next_tb
;
228 if (cpu_halted(env1
) == EXCP_HALTED
)
231 cpu_single_env
= env1
;
233 /* first we save global registers */
234 #define SAVE_HOST_REGS 1
235 #include "hostregs_helper.h"
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
242 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
243 CC_OP
= CC_OP_EFLAGS
;
244 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env
->cc_op
= CC_OP_FLAGS
;
248 env
->cc_dest
= env
->sr
& 0xf;
249 env
->cc_x
= (env
->sr
>> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MICROBLAZE)
254 #elif defined(TARGET_MIPS)
255 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_CRIS)
257 #elif defined(TARGET_IA64)
260 #error unsupported target CPU
262 env
->exception_index
= -1;
264 /* prepare setjmp context for exception handling */
266 if (setjmp(env
->jmp_env
) == 0) {
267 #if defined(__sparc__) && !defined(HOST_SOLARIS)
269 env
= cpu_single_env
;
270 #define env cpu_single_env
272 env
->current_tb
= NULL
;
273 /* if an exception is pending, we execute it here */
274 if (env
->exception_index
>= 0) {
275 if (env
->exception_index
>= EXCP_INTERRUPT
) {
276 /* exit request from the cpu execution loop */
277 ret
= env
->exception_index
;
278 if (ret
== EXCP_DEBUG
)
279 cpu_handle_debug_exception(env
);
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
286 #if defined(TARGET_I386)
287 do_interrupt_user(env
->exception_index
,
288 env
->exception_is_int
,
290 env
->exception_next_eip
);
291 /* successfully delivered */
292 env
->old_exception
= -1;
294 ret
= env
->exception_index
;
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env
->exception_index
,
302 env
->exception_is_int
,
304 env
->exception_next_eip
, 0);
305 /* successfully delivered */
306 env
->old_exception
= -1;
307 #elif defined(TARGET_PPC)
309 #elif defined(TARGET_MICROBLAZE)
311 #elif defined(TARGET_MIPS)
313 #elif defined(TARGET_SPARC)
315 #elif defined(TARGET_ARM)
317 #elif defined(TARGET_SH4)
319 #elif defined(TARGET_ALPHA)
321 #elif defined(TARGET_CRIS)
323 #elif defined(TARGET_M68K)
325 #elif defined(TARGET_IA64)
330 env
->exception_index
= -1;
333 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
335 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
336 ret
= kqemu_cpu_exec(env
);
337 /* put eflags in CPU temporary format */
338 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
339 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
340 CC_OP
= CC_OP_EFLAGS
;
341 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
344 longjmp(env
->jmp_env
, 1);
345 } else if (ret
== 2) {
346 /* softmmu execution needed */
348 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
349 /* hardware interrupt will be executed just after */
351 /* otherwise, we restart */
352 longjmp(env
->jmp_env
, 1);
360 longjmp(env
->jmp_env
, 1);
363 next_tb
= 0; /* force lookup of first TB */
365 interrupt_request
= env
->interrupt_request
;
366 if (unlikely(interrupt_request
)) {
367 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
368 /* Mask out external interrupts for this step. */
369 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
374 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
375 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
376 env
->exception_index
= EXCP_DEBUG
;
379 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
380 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
381 defined(TARGET_MICROBLAZE)
382 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
383 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
385 env
->exception_index
= EXCP_HLT
;
389 #if defined(TARGET_I386)
390 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
391 svm_check_intercept(SVM_EXIT_INIT
);
393 env
->exception_index
= EXCP_HALTED
;
395 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
397 } else if (env
->hflags2
& HF2_GIF_MASK
) {
398 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
399 !(env
->hflags
& HF_SMM_MASK
)) {
400 svm_check_intercept(SVM_EXIT_SMI
);
401 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
404 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
405 !(env
->hflags2
& HF2_NMI_MASK
)) {
406 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
407 env
->hflags2
|= HF2_NMI_MASK
;
408 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
410 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
411 (((env
->hflags2
& HF2_VINTR_MASK
) &&
412 (env
->hflags2
& HF2_HIF_MASK
)) ||
413 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
414 (env
->eflags
& IF_MASK
&&
415 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
417 svm_check_intercept(SVM_EXIT_INTR
);
418 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
419 intno
= cpu_get_pic_interrupt(env
);
420 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
421 #if defined(__sparc__) && !defined(HOST_SOLARIS)
423 env
= cpu_single_env
;
424 #define env cpu_single_env
426 do_interrupt(intno
, 0, 0, 0, 1);
427 /* ensure that no TB jump will be modified as
428 the program flow was changed */
430 #if !defined(CONFIG_USER_ONLY)
431 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
432 (env
->eflags
& IF_MASK
) &&
433 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
435 /* FIXME: this should respect TPR */
436 svm_check_intercept(SVM_EXIT_VINTR
);
437 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
438 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
439 do_interrupt(intno
, 0, 0, 0, 1);
440 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
445 #elif defined(TARGET_PPC)
447 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
451 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
452 ppc_hw_interrupt(env
);
453 if (env
->pending_interrupts
== 0)
454 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
457 #elif defined(TARGET_MICROBLAZE)
458 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
459 && (env
->sregs
[SR_MSR
] & MSR_IE
)
460 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
461 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
462 env
->exception_index
= EXCP_IRQ
;
466 #elif defined(TARGET_MIPS)
467 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
468 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
469 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
470 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
471 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
472 !(env
->hflags
& MIPS_HFLAG_DM
)) {
474 env
->exception_index
= EXCP_EXT_INTERRUPT
;
479 #elif defined(TARGET_SPARC)
480 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
482 int pil
= env
->interrupt_index
& 15;
483 int type
= env
->interrupt_index
& 0xf0;
485 if (((type
== TT_EXTINT
) &&
486 (pil
== 15 || pil
> env
->psrpil
)) ||
488 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
489 env
->exception_index
= env
->interrupt_index
;
491 env
->interrupt_index
= 0;
492 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
497 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
498 //do_interrupt(0, 0, 0, 0, 0);
499 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
501 #elif defined(TARGET_ARM)
502 if (interrupt_request
& CPU_INTERRUPT_FIQ
503 && !(env
->uncached_cpsr
& CPSR_F
)) {
504 env
->exception_index
= EXCP_FIQ
;
508 /* ARMv7-M interrupt return works by loading a magic value
509 into the PC. On real hardware the load causes the
510 return to occur. The qemu implementation performs the
511 jump normally, then does the exception return when the
512 CPU tries to execute code at the magic address.
513 This will cause the magic PC value to be pushed to
514 the stack if an interrupt occured at the wrong time.
515 We avoid this by disabling interrupts when
516 pc contains a magic address. */
517 if (interrupt_request
& CPU_INTERRUPT_HARD
518 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
519 || !(env
->uncached_cpsr
& CPSR_I
))) {
520 env
->exception_index
= EXCP_IRQ
;
524 #elif defined(TARGET_SH4)
525 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
529 #elif defined(TARGET_ALPHA)
530 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
534 #elif defined(TARGET_CRIS)
535 if (interrupt_request
& CPU_INTERRUPT_HARD
536 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
537 env
->exception_index
= EXCP_IRQ
;
541 if (interrupt_request
& CPU_INTERRUPT_NMI
542 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
543 env
->exception_index
= EXCP_NMI
;
547 #elif defined(TARGET_M68K)
548 if (interrupt_request
& CPU_INTERRUPT_HARD
549 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
550 < env
->pending_level
) {
551 /* Real hardware gets the interrupt vector via an
552 IACK cycle at this point. Current emulated
553 hardware doesn't rely on this, so we
554 provide/save the vector when the interrupt is
556 env
->exception_index
= env
->pending_vector
;
561 /* Don't use the cached interupt_request value,
562 do_interrupt may have updated the EXITTB flag. */
563 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
564 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
565 /* ensure that no TB jump will be modified as
566 the program flow was changed */
570 if (unlikely(env
->exit_request
)) {
571 env
->exit_request
= 0;
572 env
->exception_index
= EXCP_INTERRUPT
;
576 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
577 /* restore flags in standard format */
579 #if defined(TARGET_I386)
580 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
581 log_cpu_state(env
, X86_DUMP_CCOP
);
582 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
583 #elif defined(TARGET_ARM)
584 log_cpu_state(env
, 0);
585 #elif defined(TARGET_SPARC)
586 log_cpu_state(env
, 0);
587 #elif defined(TARGET_PPC)
588 log_cpu_state(env
, 0);
589 #elif defined(TARGET_M68K)
590 cpu_m68k_flush_flags(env
, env
->cc_op
);
591 env
->cc_op
= CC_OP_FLAGS
;
592 env
->sr
= (env
->sr
& 0xffe0)
593 | env
->cc_dest
| (env
->cc_x
<< 4);
594 log_cpu_state(env
, 0);
595 #elif defined(TARGET_MICROBLAZE)
596 log_cpu_state(env
, 0);
597 #elif defined(TARGET_MIPS)
598 log_cpu_state(env
, 0);
599 #elif defined(TARGET_SH4)
600 log_cpu_state(env
, 0);
601 #elif defined(TARGET_ALPHA)
602 log_cpu_state(env
, 0);
603 #elif defined(TARGET_CRIS)
604 log_cpu_state(env
, 0);
606 #error unsupported target CPU
612 /* Note: we do it here to avoid a gcc bug on Mac OS X when
613 doing it in tb_find_slow */
614 if (tb_invalidated_flag
) {
615 /* as some TB could have been invalidated because
616 of memory exceptions while generating the code, we
617 must recompute the hash index here */
619 tb_invalidated_flag
= 0;
622 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
623 (long)tb
->tc_ptr
, tb
->pc
,
624 lookup_symbol(tb
->pc
));
626 /* see if we can patch the calling TB. When the TB
627 spans two pages, we cannot safely do a direct
632 (env
->kqemu_enabled
!= 2) &&
634 tb
->page_addr
[1] == -1) {
635 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
638 spin_unlock(&tb_lock
);
639 env
->current_tb
= tb
;
641 /* cpu_interrupt might be called while translating the
642 TB, but before it is linked into a potentially
643 infinite loop and becomes env->current_tb. Avoid
644 starting execution if there is a pending interrupt. */
645 if (unlikely (env
->exit_request
))
646 env
->current_tb
= NULL
;
648 while (env
->current_tb
) {
650 /* execute the generated code */
651 #if defined(__sparc__) && !defined(HOST_SOLARIS)
653 env
= cpu_single_env
;
654 #define env cpu_single_env
656 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
657 env
->current_tb
= NULL
;
658 if ((next_tb
& 3) == 2) {
659 /* Instruction counter expired. */
661 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
663 cpu_pc_from_tb(env
, tb
);
664 insns_left
= env
->icount_decr
.u32
;
665 if (env
->icount_extra
&& insns_left
>= 0) {
666 /* Refill decrementer and continue execution. */
667 env
->icount_extra
+= insns_left
;
668 if (env
->icount_extra
> 0xffff) {
671 insns_left
= env
->icount_extra
;
673 env
->icount_extra
-= insns_left
;
674 env
->icount_decr
.u16
.low
= insns_left
;
676 if (insns_left
> 0) {
677 /* Execute remaining instructions. */
678 cpu_exec_nocache(insns_left
, tb
);
680 env
->exception_index
= EXCP_INTERRUPT
;
686 /* reset soft MMU for next block (it can currently
687 only be set by a memory fault) */
688 #if defined(CONFIG_KQEMU)
689 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
690 if (kqemu_is_ok(env
) &&
691 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
702 #if defined(TARGET_I386)
703 /* restore flags in standard format */
704 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
705 #elif defined(TARGET_ARM)
706 /* XXX: Save/restore host fpu exception state?. */
707 #elif defined(TARGET_SPARC)
708 #elif defined(TARGET_PPC)
709 #elif defined(TARGET_M68K)
710 cpu_m68k_flush_flags(env
, env
->cc_op
);
711 env
->cc_op
= CC_OP_FLAGS
;
712 env
->sr
= (env
->sr
& 0xffe0)
713 | env
->cc_dest
| (env
->cc_x
<< 4);
714 #elif defined(TARGET_MICROBLAZE)
715 #elif defined(TARGET_MIPS)
716 #elif defined(TARGET_SH4)
717 #elif defined(TARGET_IA64)
718 #elif defined(TARGET_ALPHA)
719 #elif defined(TARGET_CRIS)
722 #error unsupported target CPU
725 /* restore global registers */
726 #include "hostregs_helper.h"
728 /* fail safe : never use cpu_single_env outside cpu_exec() */
729 cpu_single_env
= NULL
;
733 /* must only be called from the generated code as an exception can be
735 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
737 /* XXX: cannot enable it yet because it yields to MMU exception
738 where NIP != read address on PowerPC */
740 target_ulong phys_addr
;
741 phys_addr
= get_phys_addr_code(env
, start
);
742 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
746 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
748 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
750 CPUX86State
*saved_env
;
754 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
756 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
757 (selector
<< 4), 0xffff, 0);
759 helper_load_seg(seg_reg
, selector
);
764 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
766 CPUX86State
*saved_env
;
771 helper_fsave(ptr
, data32
);
776 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
778 CPUX86State
*saved_env
;
783 helper_frstor(ptr
, data32
);
788 #endif /* TARGET_I386 */
790 #if !defined(CONFIG_SOFTMMU)
792 #if defined(TARGET_I386)
794 /* 'pc' is the host PC at which the exception was raised. 'address' is
795 the effective address of the memory exception. 'is_write' is 1 if a
796 write caused the exception and otherwise 0'. 'old_set' is the
797 signal set which should be restored */
798 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
799 int is_write
, sigset_t
*old_set
,
802 TranslationBlock
*tb
;
806 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
807 #if defined(DEBUG_SIGNAL)
808 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
809 pc
, address
, is_write
, *(unsigned long *)old_set
);
811 /* XXX: locking issue */
812 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
816 /* see if it is an MMU fault */
817 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
819 return 0; /* not an MMU fault */
821 return 1; /* the MMU fault was handled without causing real CPU fault */
822 /* now we have a real cpu fault */
825 /* the PC is inside the translated code. It means that we have
826 a virtual CPU fault */
827 cpu_restore_state(tb
, env
, pc
, puc
);
831 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
832 env
->eip
, env
->cr
[2], env
->error_code
);
834 /* we restore the process signal mask as the sigreturn should
835 do it (XXX: use sigsetjmp) */
836 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
837 raise_exception_err(env
->exception_index
, env
->error_code
);
839 /* activate soft MMU for this block */
840 env
->hflags
|= HF_SOFTMMU_MASK
;
841 cpu_resume_from_signal(env
, puc
);
843 /* never comes here */
847 #elif defined(TARGET_ARM)
848 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
849 int is_write
, sigset_t
*old_set
,
852 TranslationBlock
*tb
;
856 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
857 #if defined(DEBUG_SIGNAL)
858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
859 pc
, address
, is_write
, *(unsigned long *)old_set
);
861 /* XXX: locking issue */
862 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
865 /* see if it is an MMU fault */
866 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
868 return 0; /* not an MMU fault */
870 return 1; /* the MMU fault was handled without causing real CPU fault */
871 /* now we have a real cpu fault */
874 /* the PC is inside the translated code. It means that we have
875 a virtual CPU fault */
876 cpu_restore_state(tb
, env
, pc
, puc
);
878 /* we restore the process signal mask as the sigreturn should
879 do it (XXX: use sigsetjmp) */
880 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
882 /* never comes here */
885 #elif defined(TARGET_SPARC)
886 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
887 int is_write
, sigset_t
*old_set
,
890 TranslationBlock
*tb
;
894 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
895 #if defined(DEBUG_SIGNAL)
896 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
897 pc
, address
, is_write
, *(unsigned long *)old_set
);
899 /* XXX: locking issue */
900 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
903 /* see if it is an MMU fault */
904 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
906 return 0; /* not an MMU fault */
908 return 1; /* the MMU fault was handled without causing real CPU fault */
909 /* now we have a real cpu fault */
912 /* the PC is inside the translated code. It means that we have
913 a virtual CPU fault */
914 cpu_restore_state(tb
, env
, pc
, puc
);
916 /* we restore the process signal mask as the sigreturn should
917 do it (XXX: use sigsetjmp) */
918 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
920 /* never comes here */
923 #elif defined (TARGET_PPC)
924 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
925 int is_write
, sigset_t
*old_set
,
928 TranslationBlock
*tb
;
932 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
933 #if defined(DEBUG_SIGNAL)
934 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
935 pc
, address
, is_write
, *(unsigned long *)old_set
);
937 /* XXX: locking issue */
938 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
942 /* see if it is an MMU fault */
943 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
945 return 0; /* not an MMU fault */
947 return 1; /* the MMU fault was handled without causing real CPU fault */
949 /* now we have a real cpu fault */
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
954 cpu_restore_state(tb
, env
, pc
, puc
);
958 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
959 env
->nip
, env
->error_code
, tb
);
961 /* we restore the process signal mask as the sigreturn should
962 do it (XXX: use sigsetjmp) */
963 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
966 /* activate soft MMU for this block */
967 cpu_resume_from_signal(env
, puc
);
969 /* never comes here */
973 #elif defined(TARGET_M68K)
974 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
975 int is_write
, sigset_t
*old_set
,
978 TranslationBlock
*tb
;
982 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
983 #if defined(DEBUG_SIGNAL)
984 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
985 pc
, address
, is_write
, *(unsigned long *)old_set
);
987 /* XXX: locking issue */
988 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
991 /* see if it is an MMU fault */
992 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
994 return 0; /* not an MMU fault */
996 return 1; /* the MMU fault was handled without causing real CPU fault */
997 /* now we have a real cpu fault */
1000 /* the PC is inside the translated code. It means that we have
1001 a virtual CPU fault */
1002 cpu_restore_state(tb
, env
, pc
, puc
);
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1008 /* never comes here */
1012 #elif defined (TARGET_MIPS)
1013 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1014 int is_write
, sigset_t
*old_set
,
1017 TranslationBlock
*tb
;
1021 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1022 #if defined(DEBUG_SIGNAL)
1023 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1024 pc
, address
, is_write
, *(unsigned long *)old_set
);
1026 /* XXX: locking issue */
1027 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1031 /* see if it is an MMU fault */
1032 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1034 return 0; /* not an MMU fault */
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1038 /* now we have a real cpu fault */
1039 tb
= tb_find_pc(pc
);
1041 /* the PC is inside the translated code. It means that we have
1042 a virtual CPU fault */
1043 cpu_restore_state(tb
, env
, pc
, puc
);
1047 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1048 env
->PC
, env
->error_code
, tb
);
1050 /* we restore the process signal mask as the sigreturn should
1051 do it (XXX: use sigsetjmp) */
1052 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1055 /* activate soft MMU for this block */
1056 cpu_resume_from_signal(env
, puc
);
1058 /* never comes here */
1062 #elif defined (TARGET_MICROBLAZE)
1063 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1064 int is_write
, sigset_t
*old_set
,
1067 TranslationBlock
*tb
;
1071 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1072 #if defined(DEBUG_SIGNAL)
1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1074 pc
, address
, is_write
, *(unsigned long *)old_set
);
1076 /* XXX: locking issue */
1077 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1081 /* see if it is an MMU fault */
1082 ret
= cpu_mb_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1084 return 0; /* not an MMU fault */
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1088 /* now we have a real cpu fault */
1089 tb
= tb_find_pc(pc
);
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb
, env
, pc
, puc
);
1097 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1098 env
->PC
, env
->error_code
, tb
);
1100 /* we restore the process signal mask as the sigreturn should
1101 do it (XXX: use sigsetjmp) */
1102 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1105 /* activate soft MMU for this block */
1106 cpu_resume_from_signal(env
, puc
);
1108 /* never comes here */
1112 #elif defined (TARGET_SH4)
1113 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1114 int is_write
, sigset_t
*old_set
,
1117 TranslationBlock
*tb
;
1121 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1122 #if defined(DEBUG_SIGNAL)
1123 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1124 pc
, address
, is_write
, *(unsigned long *)old_set
);
1126 /* XXX: locking issue */
1127 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1131 /* see if it is an MMU fault */
1132 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1134 return 0; /* not an MMU fault */
1136 return 1; /* the MMU fault was handled without causing real CPU fault */
1138 /* now we have a real cpu fault */
1139 tb
= tb_find_pc(pc
);
1141 /* the PC is inside the translated code. It means that we have
1142 a virtual CPU fault */
1143 cpu_restore_state(tb
, env
, pc
, puc
);
1146 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1147 env
->nip
, env
->error_code
, tb
);
1149 /* we restore the process signal mask as the sigreturn should
1150 do it (XXX: use sigsetjmp) */
1151 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1153 /* never comes here */
1157 #elif defined (TARGET_ALPHA)
1158 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1159 int is_write
, sigset_t
*old_set
,
1162 TranslationBlock
*tb
;
1166 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1167 #if defined(DEBUG_SIGNAL)
1168 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1169 pc
, address
, is_write
, *(unsigned long *)old_set
);
1171 /* XXX: locking issue */
1172 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1176 /* see if it is an MMU fault */
1177 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1179 return 0; /* not an MMU fault */
1181 return 1; /* the MMU fault was handled without causing real CPU fault */
1183 /* now we have a real cpu fault */
1184 tb
= tb_find_pc(pc
);
1186 /* the PC is inside the translated code. It means that we have
1187 a virtual CPU fault */
1188 cpu_restore_state(tb
, env
, pc
, puc
);
1191 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1192 env
->nip
, env
->error_code
, tb
);
1194 /* we restore the process signal mask as the sigreturn should
1195 do it (XXX: use sigsetjmp) */
1196 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1198 /* never comes here */
1201 #elif defined (TARGET_CRIS)
1202 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1203 int is_write
, sigset_t
*old_set
,
1206 TranslationBlock
*tb
;
1210 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1211 #if defined(DEBUG_SIGNAL)
1212 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1213 pc
, address
, is_write
, *(unsigned long *)old_set
);
1215 /* XXX: locking issue */
1216 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1220 /* see if it is an MMU fault */
1221 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1223 return 0; /* not an MMU fault */
1225 return 1; /* the MMU fault was handled without causing real CPU fault */
1227 /* now we have a real cpu fault */
1228 tb
= tb_find_pc(pc
);
1230 /* the PC is inside the translated code. It means that we have
1231 a virtual CPU fault */
1232 cpu_restore_state(tb
, env
, pc
, puc
);
1234 /* we restore the process signal mask as the sigreturn should
1235 do it (XXX: use sigsetjmp) */
1236 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1238 /* never comes here */
1243 #error unsupported target CPU
1246 #if defined(__i386__)
1248 #if defined(__APPLE__)
1249 # include <sys/ucontext.h>
1251 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1252 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1253 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1254 # define MASK_sig(context) ((context)->uc_sigmask)
1255 #elif defined(__OpenBSD__)
1256 # define EIP_sig(context) ((context)->sc_eip)
1257 # define TRAP_sig(context) ((context)->sc_trapno)
1258 # define ERROR_sig(context) ((context)->sc_err)
1259 # define MASK_sig(context) ((context)->sc_mask)
1261 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1262 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1263 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1264 # define MASK_sig(context) ((context)->uc_sigmask)
1267 int cpu_signal_handler(int host_signum
, void *pinfo
,
1270 siginfo_t
*info
= pinfo
;
1271 #if defined(__OpenBSD__)
1272 struct sigcontext
*uc
= puc
;
1274 struct ucontext
*uc
= puc
;
1283 #define REG_TRAPNO TRAPNO
1286 trapno
= TRAP_sig(uc
);
1287 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1289 (ERROR_sig(uc
) >> 1) & 1 : 0,
1290 &MASK_sig(uc
), puc
);
1293 #elif defined(__x86_64__)
1296 #define PC_sig(context) _UC_MACHINE_PC(context)
1297 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1298 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1299 #define MASK_sig(context) ((context)->uc_sigmask)
1300 #elif defined(__OpenBSD__)
1301 #define PC_sig(context) ((context)->sc_rip)
1302 #define TRAP_sig(context) ((context)->sc_trapno)
1303 #define ERROR_sig(context) ((context)->sc_err)
1304 #define MASK_sig(context) ((context)->sc_mask)
1306 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1307 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1308 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1309 #define MASK_sig(context) ((context)->uc_sigmask)
1312 int cpu_signal_handler(int host_signum
, void *pinfo
,
1315 siginfo_t
*info
= pinfo
;
1318 ucontext_t
*uc
= puc
;
1319 #elif defined(__OpenBSD__)
1320 struct sigcontext
*uc
= puc
;
1322 struct ucontext
*uc
= puc
;
1326 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1327 TRAP_sig(uc
) == 0xe ?
1328 (ERROR_sig(uc
) >> 1) & 1 : 0,
1329 &MASK_sig(uc
), puc
);
1332 #elif defined(_ARCH_PPC)
1334 /***********************************************************************
1335 * signal context platform-specific definitions
1339 /* All Registers access - only for local access */
1340 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1341 /* Gpr Registers access */
1342 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1343 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1344 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1345 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1346 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1347 # define LR_sig(context) REG_sig(link, context) /* Link register */
1348 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1349 /* Float Registers access */
1350 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1351 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1352 /* Exception Registers access */
1353 # define DAR_sig(context) REG_sig(dar, context)
1354 # define DSISR_sig(context) REG_sig(dsisr, context)
1355 # define TRAP_sig(context) REG_sig(trap, context)
1359 # include <sys/ucontext.h>
1360 typedef struct ucontext SIGCONTEXT
;
1361 /* All Registers access - only for local access */
1362 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1363 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1364 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1365 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1366 /* Gpr Registers access */
1367 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1368 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1369 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1370 # define CTR_sig(context) REG_sig(ctr, context)
1371 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1372 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1373 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1374 /* Float Registers access */
1375 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1376 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1377 /* Exception Registers access */
1378 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1379 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1380 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1381 #endif /* __APPLE__ */
1383 int cpu_signal_handler(int host_signum
, void *pinfo
,
1386 siginfo_t
*info
= pinfo
;
1387 struct ucontext
*uc
= puc
;
1395 if (DSISR_sig(uc
) & 0x00800000)
1398 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1401 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1402 is_write
, &uc
->uc_sigmask
, puc
);
1405 #elif defined(__alpha__)
1407 int cpu_signal_handler(int host_signum
, void *pinfo
,
1410 siginfo_t
*info
= pinfo
;
1411 struct ucontext
*uc
= puc
;
1412 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1413 uint32_t insn
= *pc
;
1416 /* XXX: need kernel patch to get write flag faster */
1417 switch (insn
>> 26) {
1432 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1433 is_write
, &uc
->uc_sigmask
, puc
);
1435 #elif defined(__sparc__)
1437 int cpu_signal_handler(int host_signum
, void *pinfo
,
1440 siginfo_t
*info
= pinfo
;
1443 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1444 uint32_t *regs
= (uint32_t *)(info
+ 1);
1445 void *sigmask
= (regs
+ 20);
1446 /* XXX: is there a standard glibc define ? */
1447 unsigned long pc
= regs
[1];
1450 struct sigcontext
*sc
= puc
;
1451 unsigned long pc
= sc
->sigc_regs
.tpc
;
1452 void *sigmask
= (void *)sc
->sigc_mask
;
1453 #elif defined(__OpenBSD__)
1454 struct sigcontext
*uc
= puc
;
1455 unsigned long pc
= uc
->sc_pc
;
1456 void *sigmask
= (void *)(long)uc
->sc_mask
;
1460 /* XXX: need kernel patch to get write flag faster */
1462 insn
= *(uint32_t *)pc
;
1463 if ((insn
>> 30) == 3) {
1464 switch((insn
>> 19) & 0x3f) {
1488 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1489 is_write
, sigmask
, NULL
);
1492 #elif defined(__arm__)
1494 int cpu_signal_handler(int host_signum
, void *pinfo
,
1497 siginfo_t
*info
= pinfo
;
1498 struct ucontext
*uc
= puc
;
1502 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1503 pc
= uc
->uc_mcontext
.gregs
[R15
];
1505 pc
= uc
->uc_mcontext
.arm_pc
;
1507 /* XXX: compute is_write */
1509 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1511 &uc
->uc_sigmask
, puc
);
1514 #elif defined(__mc68000)
1516 int cpu_signal_handler(int host_signum
, void *pinfo
,
1519 siginfo_t
*info
= pinfo
;
1520 struct ucontext
*uc
= puc
;
1524 pc
= uc
->uc_mcontext
.gregs
[16];
1525 /* XXX: compute is_write */
1527 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1529 &uc
->uc_sigmask
, puc
);
1532 #elif defined(__ia64)
1535 /* This ought to be in <bits/siginfo.h>... */
1536 # define __ISR_VALID 1
1539 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1541 siginfo_t
*info
= pinfo
;
1542 struct ucontext
*uc
= puc
;
1546 ip
= uc
->uc_mcontext
.sc_ip
;
1547 switch (host_signum
) {
1553 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1554 /* ISR.W (write-access) is bit 33: */
1555 is_write
= (info
->si_isr
>> 33) & 1;
1561 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1563 &uc
->uc_sigmask
, puc
);
1566 #elif defined(__s390__)
1568 int cpu_signal_handler(int host_signum
, void *pinfo
,
1571 siginfo_t
*info
= pinfo
;
1572 struct ucontext
*uc
= puc
;
1576 pc
= uc
->uc_mcontext
.psw
.addr
;
1577 /* XXX: compute is_write */
1579 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1580 is_write
, &uc
->uc_sigmask
, puc
);
1583 #elif defined(__mips__)
1585 int cpu_signal_handler(int host_signum
, void *pinfo
,
1588 siginfo_t
*info
= pinfo
;
1589 struct ucontext
*uc
= puc
;
1590 greg_t pc
= uc
->uc_mcontext
.pc
;
1593 /* XXX: compute is_write */
1595 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1596 is_write
, &uc
->uc_sigmask
, puc
);
1599 #elif defined(__hppa__)
1601 int cpu_signal_handler(int host_signum
, void *pinfo
,
1604 struct siginfo
*info
= pinfo
;
1605 struct ucontext
*uc
= puc
;
1609 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1610 /* FIXME: compute is_write */
1612 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1614 &uc
->uc_sigmask
, puc
);
1619 #error host CPU specific signal handler needed
1623 #endif /* !defined(CONFIG_SOFTMMU) */