2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
26 #if !defined(CONFIG_SOFTMMU)
38 #include <sys/ucontext.h>
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
45 #define env cpu_single_env
48 int tb_invalidated_flag
;
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState
*env
)
55 return cpu_has_work(env
);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
63 longjmp(env
->jmp_env
, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
71 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext
*uc
= puc
;
74 #elif defined(__OpenBSD__)
75 struct sigcontext
*uc
= puc
;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
85 /* XXX: use siglongjmp ? */
87 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
93 env
->exception_index
= -1;
94 longjmp(env
->jmp_env
, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
101 unsigned long next_tb
;
102 TranslationBlock
*tb
;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles
> CF_COUNT_MASK
)
107 max_cycles
= CF_COUNT_MASK
;
109 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
111 env
->current_tb
= tb
;
112 /* execute the generated code */
113 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
115 if ((next_tb
& 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env
, tb
);
120 tb_phys_invalidate(tb
, -1);
124 static TranslationBlock
*tb_find_slow(target_ulong pc
,
125 target_ulong cs_base
,
128 TranslationBlock
*tb
, **ptb1
;
130 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
132 tb_invalidated_flag
= 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc
= get_phys_addr_code(env
, pc
);
138 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
140 h
= tb_phys_hash_func(phys_pc
);
141 ptb1
= &tb_phys_hash
[h
];
147 tb
->page_addr
[0] == phys_page1
&&
148 tb
->cs_base
== cs_base
&&
149 tb
->flags
== flags
) {
150 /* check next page if needed */
151 if (tb
->page_addr
[1] != -1) {
152 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
154 phys_page2
= get_phys_addr_code(env
, virt_page2
);
155 if (tb
->page_addr
[1] == phys_page2
)
161 ptb1
= &tb
->phys_hash_next
;
164 /* if no translated code available, then translate it now */
165 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
168 /* we add the TB in the virtual pc hash table */
169 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
173 static inline TranslationBlock
*tb_find_fast(void)
175 TranslationBlock
*tb
;
176 target_ulong cs_base
, pc
;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
183 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
184 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
185 tb
->flags
!= flags
)) {
186 tb
= tb_find_slow(pc
, cs_base
, flags
);
191 static CPUDebugExcpHandler
*debug_excp_handler
;
193 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
195 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
197 debug_excp_handler
= handler
;
201 static void cpu_handle_debug_exception(CPUState
*env
)
205 if (!env
->watchpoint_hit
)
206 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
207 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
209 if (debug_excp_handler
)
210 debug_excp_handler(env
);
213 /* main execution loop */
215 int cpu_exec(CPUState
*env1
)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret
, interrupt_request
;
220 TranslationBlock
*tb
;
222 unsigned long next_tb
;
224 if (cpu_halted(env1
) == EXCP_HALTED
)
227 cpu_single_env
= env1
;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
238 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
239 CC_OP
= CC_OP_EFLAGS
;
240 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env
->cc_op
= CC_OP_FLAGS
;
244 env
->cc_dest
= env
->sr
& 0xf;
245 env
->cc_x
= (env
->sr
>> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
255 #error unsupported target CPU
257 env
->exception_index
= -1;
259 /* prepare setjmp context for exception handling */
261 if (setjmp(env
->jmp_env
) == 0) {
262 #if defined(__sparc__) && !defined(HOST_SOLARIS)
264 env
= cpu_single_env
;
265 #define env cpu_single_env
267 env
->current_tb
= NULL
;
268 /* if an exception is pending, we execute it here */
269 if (env
->exception_index
>= 0) {
270 if (env
->exception_index
>= EXCP_INTERRUPT
) {
271 /* exit request from the cpu execution loop */
272 ret
= env
->exception_index
;
273 if (ret
== EXCP_DEBUG
)
274 cpu_handle_debug_exception(env
);
277 #if defined(CONFIG_USER_ONLY)
278 /* if user mode only, we simulate a fake exception
279 which will be handled outside the cpu execution
281 #if defined(TARGET_I386)
282 do_interrupt_user(env
->exception_index
,
283 env
->exception_is_int
,
285 env
->exception_next_eip
);
286 /* successfully delivered */
287 env
->old_exception
= -1;
289 ret
= env
->exception_index
;
292 #if defined(TARGET_I386)
293 /* simulate a real cpu exception. On i386, it can
294 trigger new exceptions, but we do not handle
295 double or triple faults yet. */
296 do_interrupt(env
->exception_index
,
297 env
->exception_is_int
,
299 env
->exception_next_eip
, 0);
300 /* successfully delivered */
301 env
->old_exception
= -1;
302 #elif defined(TARGET_PPC)
304 #elif defined(TARGET_MICROBLAZE)
306 #elif defined(TARGET_MIPS)
308 #elif defined(TARGET_SPARC)
310 #elif defined(TARGET_ARM)
312 #elif defined(TARGET_SH4)
314 #elif defined(TARGET_ALPHA)
316 #elif defined(TARGET_CRIS)
318 #elif defined(TARGET_M68K)
323 env
->exception_index
= -1;
326 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
328 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
329 ret
= kqemu_cpu_exec(env
);
330 /* put eflags in CPU temporary format */
331 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
332 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
333 CC_OP
= CC_OP_EFLAGS
;
334 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
337 longjmp(env
->jmp_env
, 1);
338 } else if (ret
== 2) {
339 /* softmmu execution needed */
341 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
342 /* hardware interrupt will be executed just after */
344 /* otherwise, we restart */
345 longjmp(env
->jmp_env
, 1);
353 longjmp(env
->jmp_env
, 1);
356 next_tb
= 0; /* force lookup of first TB */
358 interrupt_request
= env
->interrupt_request
;
359 if (unlikely(interrupt_request
)) {
360 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
361 /* Mask out external interrupts for this step. */
362 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
367 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
368 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
369 env
->exception_index
= EXCP_DEBUG
;
372 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
373 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
374 defined(TARGET_MICROBLAZE)
375 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
376 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
378 env
->exception_index
= EXCP_HLT
;
382 #if defined(TARGET_I386)
383 if (interrupt_request
& CPU_INTERRUPT_INIT
) {
384 svm_check_intercept(SVM_EXIT_INIT
);
386 env
->exception_index
= EXCP_HALTED
;
388 } else if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
390 } else if (env
->hflags2
& HF2_GIF_MASK
) {
391 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
392 !(env
->hflags
& HF_SMM_MASK
)) {
393 svm_check_intercept(SVM_EXIT_SMI
);
394 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
397 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
398 !(env
->hflags2
& HF2_NMI_MASK
)) {
399 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
400 env
->hflags2
|= HF2_NMI_MASK
;
401 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
403 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
404 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
405 do_interrupt(EXCP12_MCHK
, 0, 0, 0, 0);
407 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
408 (((env
->hflags2
& HF2_VINTR_MASK
) &&
409 (env
->hflags2
& HF2_HIF_MASK
)) ||
410 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
411 (env
->eflags
& IF_MASK
&&
412 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
414 svm_check_intercept(SVM_EXIT_INTR
);
415 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
416 intno
= cpu_get_pic_interrupt(env
);
417 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
418 #if defined(__sparc__) && !defined(HOST_SOLARIS)
420 env
= cpu_single_env
;
421 #define env cpu_single_env
423 do_interrupt(intno
, 0, 0, 0, 1);
424 /* ensure that no TB jump will be modified as
425 the program flow was changed */
427 #if !defined(CONFIG_USER_ONLY)
428 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
429 (env
->eflags
& IF_MASK
) &&
430 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
432 /* FIXME: this should respect TPR */
433 svm_check_intercept(SVM_EXIT_VINTR
);
434 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
435 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
436 do_interrupt(intno
, 0, 0, 0, 1);
437 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
442 #elif defined(TARGET_PPC)
444 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
448 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
449 ppc_hw_interrupt(env
);
450 if (env
->pending_interrupts
== 0)
451 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
454 #elif defined(TARGET_MICROBLAZE)
455 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
456 && (env
->sregs
[SR_MSR
] & MSR_IE
)
457 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
458 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
459 env
->exception_index
= EXCP_IRQ
;
463 #elif defined(TARGET_MIPS)
464 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
465 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
466 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
467 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
468 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
469 !(env
->hflags
& MIPS_HFLAG_DM
)) {
471 env
->exception_index
= EXCP_EXT_INTERRUPT
;
476 #elif defined(TARGET_SPARC)
477 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
478 cpu_interrupts_enabled(env
)) {
479 int pil
= env
->interrupt_index
& 15;
480 int type
= env
->interrupt_index
& 0xf0;
482 if (((type
== TT_EXTINT
) &&
483 (pil
== 15 || pil
> env
->psrpil
)) ||
485 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
486 env
->exception_index
= env
->interrupt_index
;
488 env
->interrupt_index
= 0;
489 #if !defined(CONFIG_USER_ONLY)
494 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
495 //do_interrupt(0, 0, 0, 0, 0);
496 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
498 #elif defined(TARGET_ARM)
499 if (interrupt_request
& CPU_INTERRUPT_FIQ
500 && !(env
->uncached_cpsr
& CPSR_F
)) {
501 env
->exception_index
= EXCP_FIQ
;
505 /* ARMv7-M interrupt return works by loading a magic value
506 into the PC. On real hardware the load causes the
507 return to occur. The qemu implementation performs the
508 jump normally, then does the exception return when the
509 CPU tries to execute code at the magic address.
510 This will cause the magic PC value to be pushed to
511 the stack if an interrupt occured at the wrong time.
512 We avoid this by disabling interrupts when
513 pc contains a magic address. */
514 if (interrupt_request
& CPU_INTERRUPT_HARD
515 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
516 || !(env
->uncached_cpsr
& CPSR_I
))) {
517 env
->exception_index
= EXCP_IRQ
;
521 #elif defined(TARGET_SH4)
522 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
526 #elif defined(TARGET_ALPHA)
527 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
531 #elif defined(TARGET_CRIS)
532 if (interrupt_request
& CPU_INTERRUPT_HARD
533 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
534 env
->exception_index
= EXCP_IRQ
;
538 if (interrupt_request
& CPU_INTERRUPT_NMI
539 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
540 env
->exception_index
= EXCP_NMI
;
544 #elif defined(TARGET_M68K)
545 if (interrupt_request
& CPU_INTERRUPT_HARD
546 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
547 < env
->pending_level
) {
548 /* Real hardware gets the interrupt vector via an
549 IACK cycle at this point. Current emulated
550 hardware doesn't rely on this, so we
551 provide/save the vector when the interrupt is
553 env
->exception_index
= env
->pending_vector
;
558 /* Don't use the cached interupt_request value,
559 do_interrupt may have updated the EXITTB flag. */
560 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
561 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
562 /* ensure that no TB jump will be modified as
563 the program flow was changed */
567 if (unlikely(env
->exit_request
)) {
568 env
->exit_request
= 0;
569 env
->exception_index
= EXCP_INTERRUPT
;
573 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
574 /* restore flags in standard format */
576 #if defined(TARGET_I386)
577 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
578 log_cpu_state(env
, X86_DUMP_CCOP
);
579 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
580 #elif defined(TARGET_ARM)
581 log_cpu_state(env
, 0);
582 #elif defined(TARGET_SPARC)
583 log_cpu_state(env
, 0);
584 #elif defined(TARGET_PPC)
585 log_cpu_state(env
, 0);
586 #elif defined(TARGET_M68K)
587 cpu_m68k_flush_flags(env
, env
->cc_op
);
588 env
->cc_op
= CC_OP_FLAGS
;
589 env
->sr
= (env
->sr
& 0xffe0)
590 | env
->cc_dest
| (env
->cc_x
<< 4);
591 log_cpu_state(env
, 0);
592 #elif defined(TARGET_MICROBLAZE)
593 log_cpu_state(env
, 0);
594 #elif defined(TARGET_MIPS)
595 log_cpu_state(env
, 0);
596 #elif defined(TARGET_SH4)
597 log_cpu_state(env
, 0);
598 #elif defined(TARGET_ALPHA)
599 log_cpu_state(env
, 0);
600 #elif defined(TARGET_CRIS)
601 log_cpu_state(env
, 0);
603 #error unsupported target CPU
609 /* Note: we do it here to avoid a gcc bug on Mac OS X when
610 doing it in tb_find_slow */
611 if (tb_invalidated_flag
) {
612 /* as some TB could have been invalidated because
613 of memory exceptions while generating the code, we
614 must recompute the hash index here */
616 tb_invalidated_flag
= 0;
619 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
620 (long)tb
->tc_ptr
, tb
->pc
,
621 lookup_symbol(tb
->pc
));
623 /* see if we can patch the calling TB. When the TB
624 spans two pages, we cannot safely do a direct
629 (env
->kqemu_enabled
!= 2) &&
631 tb
->page_addr
[1] == -1) {
632 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
635 spin_unlock(&tb_lock
);
636 env
->current_tb
= tb
;
638 /* cpu_interrupt might be called while translating the
639 TB, but before it is linked into a potentially
640 infinite loop and becomes env->current_tb. Avoid
641 starting execution if there is a pending interrupt. */
642 if (unlikely (env
->exit_request
))
643 env
->current_tb
= NULL
;
645 while (env
->current_tb
) {
647 /* execute the generated code */
648 #if defined(__sparc__) && !defined(HOST_SOLARIS)
650 env
= cpu_single_env
;
651 #define env cpu_single_env
653 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
654 env
->current_tb
= NULL
;
655 if ((next_tb
& 3) == 2) {
656 /* Instruction counter expired. */
658 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
660 cpu_pc_from_tb(env
, tb
);
661 insns_left
= env
->icount_decr
.u32
;
662 if (env
->icount_extra
&& insns_left
>= 0) {
663 /* Refill decrementer and continue execution. */
664 env
->icount_extra
+= insns_left
;
665 if (env
->icount_extra
> 0xffff) {
668 insns_left
= env
->icount_extra
;
670 env
->icount_extra
-= insns_left
;
671 env
->icount_decr
.u16
.low
= insns_left
;
673 if (insns_left
> 0) {
674 /* Execute remaining instructions. */
675 cpu_exec_nocache(insns_left
, tb
);
677 env
->exception_index
= EXCP_INTERRUPT
;
683 /* reset soft MMU for next block (it can currently
684 only be set by a memory fault) */
685 #if defined(CONFIG_KQEMU)
686 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
687 if (kqemu_is_ok(env
) &&
688 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
699 #if defined(TARGET_I386)
700 /* restore flags in standard format */
701 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
702 #elif defined(TARGET_ARM)
703 /* XXX: Save/restore host fpu exception state?. */
704 #elif defined(TARGET_SPARC)
705 #elif defined(TARGET_PPC)
706 #elif defined(TARGET_M68K)
707 cpu_m68k_flush_flags(env
, env
->cc_op
);
708 env
->cc_op
= CC_OP_FLAGS
;
709 env
->sr
= (env
->sr
& 0xffe0)
710 | env
->cc_dest
| (env
->cc_x
<< 4);
711 #elif defined(TARGET_MICROBLAZE)
712 #elif defined(TARGET_MIPS)
713 #elif defined(TARGET_SH4)
714 #elif defined(TARGET_ALPHA)
715 #elif defined(TARGET_CRIS)
718 #error unsupported target CPU
721 /* restore global registers */
722 #include "hostregs_helper.h"
724 /* fail safe : never use cpu_single_env outside cpu_exec() */
725 cpu_single_env
= NULL
;
729 /* must only be called from the generated code as an exception can be
731 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
733 /* XXX: cannot enable it yet because it yields to MMU exception
734 where NIP != read address on PowerPC */
736 target_ulong phys_addr
;
737 phys_addr
= get_phys_addr_code(env
, start
);
738 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
742 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
744 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
746 CPUX86State
*saved_env
;
750 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
752 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
753 (selector
<< 4), 0xffff, 0);
755 helper_load_seg(seg_reg
, selector
);
760 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
762 CPUX86State
*saved_env
;
767 helper_fsave(ptr
, data32
);
772 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
774 CPUX86State
*saved_env
;
779 helper_frstor(ptr
, data32
);
784 #endif /* TARGET_I386 */
786 #if !defined(CONFIG_SOFTMMU)
788 #if defined(TARGET_I386)
790 /* 'pc' is the host PC at which the exception was raised. 'address' is
791 the effective address of the memory exception. 'is_write' is 1 if a
792 write caused the exception and otherwise 0'. 'old_set' is the
793 signal set which should be restored */
794 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
795 int is_write
, sigset_t
*old_set
,
798 TranslationBlock
*tb
;
802 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
803 #if defined(DEBUG_SIGNAL)
804 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
805 pc
, address
, is_write
, *(unsigned long *)old_set
);
807 /* XXX: locking issue */
808 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
812 /* see if it is an MMU fault */
813 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
815 return 0; /* not an MMU fault */
817 return 1; /* the MMU fault was handled without causing real CPU fault */
818 /* now we have a real cpu fault */
821 /* the PC is inside the translated code. It means that we have
822 a virtual CPU fault */
823 cpu_restore_state(tb
, env
, pc
, puc
);
827 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
828 env
->eip
, env
->cr
[2], env
->error_code
);
830 /* we restore the process signal mask as the sigreturn should
831 do it (XXX: use sigsetjmp) */
832 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
833 raise_exception_err(env
->exception_index
, env
->error_code
);
835 /* activate soft MMU for this block */
836 env
->hflags
|= HF_SOFTMMU_MASK
;
837 cpu_resume_from_signal(env
, puc
);
839 /* never comes here */
843 #elif defined(TARGET_ARM)
844 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
845 int is_write
, sigset_t
*old_set
,
848 TranslationBlock
*tb
;
852 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
853 #if defined(DEBUG_SIGNAL)
854 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
855 pc
, address
, is_write
, *(unsigned long *)old_set
);
857 /* XXX: locking issue */
858 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
861 /* see if it is an MMU fault */
862 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
864 return 0; /* not an MMU fault */
866 return 1; /* the MMU fault was handled without causing real CPU fault */
867 /* now we have a real cpu fault */
870 /* the PC is inside the translated code. It means that we have
871 a virtual CPU fault */
872 cpu_restore_state(tb
, env
, pc
, puc
);
874 /* we restore the process signal mask as the sigreturn should
875 do it (XXX: use sigsetjmp) */
876 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
878 /* never comes here */
881 #elif defined(TARGET_SPARC)
882 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
883 int is_write
, sigset_t
*old_set
,
886 TranslationBlock
*tb
;
890 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
891 #if defined(DEBUG_SIGNAL)
892 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
893 pc
, address
, is_write
, *(unsigned long *)old_set
);
895 /* XXX: locking issue */
896 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
899 /* see if it is an MMU fault */
900 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
902 return 0; /* not an MMU fault */
904 return 1; /* the MMU fault was handled without causing real CPU fault */
905 /* now we have a real cpu fault */
908 /* the PC is inside the translated code. It means that we have
909 a virtual CPU fault */
910 cpu_restore_state(tb
, env
, pc
, puc
);
912 /* we restore the process signal mask as the sigreturn should
913 do it (XXX: use sigsetjmp) */
914 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
916 /* never comes here */
919 #elif defined (TARGET_PPC)
920 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
921 int is_write
, sigset_t
*old_set
,
924 TranslationBlock
*tb
;
928 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
929 #if defined(DEBUG_SIGNAL)
930 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
931 pc
, address
, is_write
, *(unsigned long *)old_set
);
933 /* XXX: locking issue */
934 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
938 /* see if it is an MMU fault */
939 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
941 return 0; /* not an MMU fault */
943 return 1; /* the MMU fault was handled without causing real CPU fault */
945 /* now we have a real cpu fault */
948 /* the PC is inside the translated code. It means that we have
949 a virtual CPU fault */
950 cpu_restore_state(tb
, env
, pc
, puc
);
954 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
955 env
->nip
, env
->error_code
, tb
);
957 /* we restore the process signal mask as the sigreturn should
958 do it (XXX: use sigsetjmp) */
959 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
962 /* activate soft MMU for this block */
963 cpu_resume_from_signal(env
, puc
);
965 /* never comes here */
969 #elif defined(TARGET_M68K)
970 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
971 int is_write
, sigset_t
*old_set
,
974 TranslationBlock
*tb
;
978 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
979 #if defined(DEBUG_SIGNAL)
980 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
981 pc
, address
, is_write
, *(unsigned long *)old_set
);
983 /* XXX: locking issue */
984 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
987 /* see if it is an MMU fault */
988 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
990 return 0; /* not an MMU fault */
992 return 1; /* the MMU fault was handled without causing real CPU fault */
993 /* now we have a real cpu fault */
996 /* the PC is inside the translated code. It means that we have
997 a virtual CPU fault */
998 cpu_restore_state(tb
, env
, pc
, puc
);
1000 /* we restore the process signal mask as the sigreturn should
1001 do it (XXX: use sigsetjmp) */
1002 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1004 /* never comes here */
1008 #elif defined (TARGET_MIPS)
1009 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1010 int is_write
, sigset_t
*old_set
,
1013 TranslationBlock
*tb
;
1017 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1018 #if defined(DEBUG_SIGNAL)
1019 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1020 pc
, address
, is_write
, *(unsigned long *)old_set
);
1022 /* XXX: locking issue */
1023 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1027 /* see if it is an MMU fault */
1028 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1030 return 0; /* not an MMU fault */
1032 return 1; /* the MMU fault was handled without causing real CPU fault */
1034 /* now we have a real cpu fault */
1035 tb
= tb_find_pc(pc
);
1037 /* the PC is inside the translated code. It means that we have
1038 a virtual CPU fault */
1039 cpu_restore_state(tb
, env
, pc
, puc
);
1043 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1044 env
->PC
, env
->error_code
, tb
);
1046 /* we restore the process signal mask as the sigreturn should
1047 do it (XXX: use sigsetjmp) */
1048 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1051 /* activate soft MMU for this block */
1052 cpu_resume_from_signal(env
, puc
);
1054 /* never comes here */
1058 #elif defined (TARGET_MICROBLAZE)
1059 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1060 int is_write
, sigset_t
*old_set
,
1063 TranslationBlock
*tb
;
1067 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1068 #if defined(DEBUG_SIGNAL)
1069 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1070 pc
, address
, is_write
, *(unsigned long *)old_set
);
1072 /* XXX: locking issue */
1073 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1077 /* see if it is an MMU fault */
1078 ret
= cpu_mb_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1080 return 0; /* not an MMU fault */
1082 return 1; /* the MMU fault was handled without causing real CPU fault */
1084 /* now we have a real cpu fault */
1085 tb
= tb_find_pc(pc
);
1087 /* the PC is inside the translated code. It means that we have
1088 a virtual CPU fault */
1089 cpu_restore_state(tb
, env
, pc
, puc
);
1093 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1094 env
->PC
, env
->error_code
, tb
);
1096 /* we restore the process signal mask as the sigreturn should
1097 do it (XXX: use sigsetjmp) */
1098 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1101 /* activate soft MMU for this block */
1102 cpu_resume_from_signal(env
, puc
);
1104 /* never comes here */
1108 #elif defined (TARGET_SH4)
1109 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1110 int is_write
, sigset_t
*old_set
,
1113 TranslationBlock
*tb
;
1117 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1118 #if defined(DEBUG_SIGNAL)
1119 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1120 pc
, address
, is_write
, *(unsigned long *)old_set
);
1122 /* XXX: locking issue */
1123 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1127 /* see if it is an MMU fault */
1128 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1130 return 0; /* not an MMU fault */
1132 return 1; /* the MMU fault was handled without causing real CPU fault */
1134 /* now we have a real cpu fault */
1135 tb
= tb_find_pc(pc
);
1137 /* the PC is inside the translated code. It means that we have
1138 a virtual CPU fault */
1139 cpu_restore_state(tb
, env
, pc
, puc
);
1142 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1143 env
->nip
, env
->error_code
, tb
);
1145 /* we restore the process signal mask as the sigreturn should
1146 do it (XXX: use sigsetjmp) */
1147 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1149 /* never comes here */
1153 #elif defined (TARGET_ALPHA)
1154 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1155 int is_write
, sigset_t
*old_set
,
1158 TranslationBlock
*tb
;
1162 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1163 #if defined(DEBUG_SIGNAL)
1164 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1165 pc
, address
, is_write
, *(unsigned long *)old_set
);
1167 /* XXX: locking issue */
1168 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1172 /* see if it is an MMU fault */
1173 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1175 return 0; /* not an MMU fault */
1177 return 1; /* the MMU fault was handled without causing real CPU fault */
1179 /* now we have a real cpu fault */
1180 tb
= tb_find_pc(pc
);
1182 /* the PC is inside the translated code. It means that we have
1183 a virtual CPU fault */
1184 cpu_restore_state(tb
, env
, pc
, puc
);
1187 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1188 env
->nip
, env
->error_code
, tb
);
1190 /* we restore the process signal mask as the sigreturn should
1191 do it (XXX: use sigsetjmp) */
1192 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1194 /* never comes here */
1197 #elif defined (TARGET_CRIS)
1198 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1199 int is_write
, sigset_t
*old_set
,
1202 TranslationBlock
*tb
;
1206 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1207 #if defined(DEBUG_SIGNAL)
1208 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1209 pc
, address
, is_write
, *(unsigned long *)old_set
);
1211 /* XXX: locking issue */
1212 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1216 /* see if it is an MMU fault */
1217 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1219 return 0; /* not an MMU fault */
1221 return 1; /* the MMU fault was handled without causing real CPU fault */
1223 /* now we have a real cpu fault */
1224 tb
= tb_find_pc(pc
);
1226 /* the PC is inside the translated code. It means that we have
1227 a virtual CPU fault */
1228 cpu_restore_state(tb
, env
, pc
, puc
);
1230 /* we restore the process signal mask as the sigreturn should
1231 do it (XXX: use sigsetjmp) */
1232 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1234 /* never comes here */
1239 #error unsupported target CPU
1242 #if defined(__i386__)
1244 #if defined(__APPLE__)
1245 # include <sys/ucontext.h>
1247 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1248 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1249 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1250 # define MASK_sig(context) ((context)->uc_sigmask)
1251 #elif defined(__OpenBSD__)
1252 # define EIP_sig(context) ((context)->sc_eip)
1253 # define TRAP_sig(context) ((context)->sc_trapno)
1254 # define ERROR_sig(context) ((context)->sc_err)
1255 # define MASK_sig(context) ((context)->sc_mask)
1257 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1258 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1259 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1260 # define MASK_sig(context) ((context)->uc_sigmask)
1263 int cpu_signal_handler(int host_signum
, void *pinfo
,
1266 siginfo_t
*info
= pinfo
;
1267 #if defined(__OpenBSD__)
1268 struct sigcontext
*uc
= puc
;
1270 struct ucontext
*uc
= puc
;
1279 #define REG_TRAPNO TRAPNO
1282 trapno
= TRAP_sig(uc
);
1283 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1285 (ERROR_sig(uc
) >> 1) & 1 : 0,
1286 &MASK_sig(uc
), puc
);
1289 #elif defined(__x86_64__)
1292 #define PC_sig(context) _UC_MACHINE_PC(context)
1293 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1294 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1295 #define MASK_sig(context) ((context)->uc_sigmask)
1296 #elif defined(__OpenBSD__)
1297 #define PC_sig(context) ((context)->sc_rip)
1298 #define TRAP_sig(context) ((context)->sc_trapno)
1299 #define ERROR_sig(context) ((context)->sc_err)
1300 #define MASK_sig(context) ((context)->sc_mask)
1302 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1303 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1304 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1305 #define MASK_sig(context) ((context)->uc_sigmask)
1308 int cpu_signal_handler(int host_signum
, void *pinfo
,
1311 siginfo_t
*info
= pinfo
;
1314 ucontext_t
*uc
= puc
;
1315 #elif defined(__OpenBSD__)
1316 struct sigcontext
*uc
= puc
;
1318 struct ucontext
*uc
= puc
;
1322 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1323 TRAP_sig(uc
) == 0xe ?
1324 (ERROR_sig(uc
) >> 1) & 1 : 0,
1325 &MASK_sig(uc
), puc
);
1328 #elif defined(_ARCH_PPC)
1330 /***********************************************************************
1331 * signal context platform-specific definitions
1335 /* All Registers access - only for local access */
1336 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1337 /* Gpr Registers access */
1338 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1339 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1340 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1341 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1342 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1343 # define LR_sig(context) REG_sig(link, context) /* Link register */
1344 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1345 /* Float Registers access */
1346 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1347 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1348 /* Exception Registers access */
1349 # define DAR_sig(context) REG_sig(dar, context)
1350 # define DSISR_sig(context) REG_sig(dsisr, context)
1351 # define TRAP_sig(context) REG_sig(trap, context)
1355 # include <sys/ucontext.h>
1356 typedef struct ucontext SIGCONTEXT
;
1357 /* All Registers access - only for local access */
1358 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1359 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1360 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1361 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1362 /* Gpr Registers access */
1363 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1364 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1365 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1366 # define CTR_sig(context) REG_sig(ctr, context)
1367 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1368 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1369 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1370 /* Float Registers access */
1371 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1372 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1373 /* Exception Registers access */
1374 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1375 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1376 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1377 #endif /* __APPLE__ */
1379 int cpu_signal_handler(int host_signum
, void *pinfo
,
1382 siginfo_t
*info
= pinfo
;
1383 struct ucontext
*uc
= puc
;
1391 if (DSISR_sig(uc
) & 0x00800000)
1394 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1397 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1398 is_write
, &uc
->uc_sigmask
, puc
);
1401 #elif defined(__alpha__)
1403 int cpu_signal_handler(int host_signum
, void *pinfo
,
1406 siginfo_t
*info
= pinfo
;
1407 struct ucontext
*uc
= puc
;
1408 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1409 uint32_t insn
= *pc
;
1412 /* XXX: need kernel patch to get write flag faster */
1413 switch (insn
>> 26) {
1428 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1429 is_write
, &uc
->uc_sigmask
, puc
);
1431 #elif defined(__sparc__)
1433 int cpu_signal_handler(int host_signum
, void *pinfo
,
1436 siginfo_t
*info
= pinfo
;
1439 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1440 uint32_t *regs
= (uint32_t *)(info
+ 1);
1441 void *sigmask
= (regs
+ 20);
1442 /* XXX: is there a standard glibc define ? */
1443 unsigned long pc
= regs
[1];
1446 struct sigcontext
*sc
= puc
;
1447 unsigned long pc
= sc
->sigc_regs
.tpc
;
1448 void *sigmask
= (void *)sc
->sigc_mask
;
1449 #elif defined(__OpenBSD__)
1450 struct sigcontext
*uc
= puc
;
1451 unsigned long pc
= uc
->sc_pc
;
1452 void *sigmask
= (void *)(long)uc
->sc_mask
;
1456 /* XXX: need kernel patch to get write flag faster */
1458 insn
= *(uint32_t *)pc
;
1459 if ((insn
>> 30) == 3) {
1460 switch((insn
>> 19) & 0x3f) {
1484 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1485 is_write
, sigmask
, NULL
);
1488 #elif defined(__arm__)
1490 int cpu_signal_handler(int host_signum
, void *pinfo
,
1493 siginfo_t
*info
= pinfo
;
1494 struct ucontext
*uc
= puc
;
1498 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1499 pc
= uc
->uc_mcontext
.gregs
[R15
];
1501 pc
= uc
->uc_mcontext
.arm_pc
;
1503 /* XXX: compute is_write */
1505 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1507 &uc
->uc_sigmask
, puc
);
1510 #elif defined(__mc68000)
1512 int cpu_signal_handler(int host_signum
, void *pinfo
,
1515 siginfo_t
*info
= pinfo
;
1516 struct ucontext
*uc
= puc
;
1520 pc
= uc
->uc_mcontext
.gregs
[16];
1521 /* XXX: compute is_write */
1523 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1525 &uc
->uc_sigmask
, puc
);
1528 #elif defined(__ia64)
1531 /* This ought to be in <bits/siginfo.h>... */
1532 # define __ISR_VALID 1
1535 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1537 siginfo_t
*info
= pinfo
;
1538 struct ucontext
*uc
= puc
;
1542 ip
= uc
->uc_mcontext
.sc_ip
;
1543 switch (host_signum
) {
1549 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1550 /* ISR.W (write-access) is bit 33: */
1551 is_write
= (info
->si_isr
>> 33) & 1;
1557 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1559 &uc
->uc_sigmask
, puc
);
1562 #elif defined(__s390__)
1564 int cpu_signal_handler(int host_signum
, void *pinfo
,
1567 siginfo_t
*info
= pinfo
;
1568 struct ucontext
*uc
= puc
;
1572 pc
= uc
->uc_mcontext
.psw
.addr
;
1573 /* XXX: compute is_write */
1575 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1576 is_write
, &uc
->uc_sigmask
, puc
);
1579 #elif defined(__mips__)
1581 int cpu_signal_handler(int host_signum
, void *pinfo
,
1584 siginfo_t
*info
= pinfo
;
1585 struct ucontext
*uc
= puc
;
1586 greg_t pc
= uc
->uc_mcontext
.pc
;
1589 /* XXX: compute is_write */
1591 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1592 is_write
, &uc
->uc_sigmask
, puc
);
1595 #elif defined(__hppa__)
1597 int cpu_signal_handler(int host_signum
, void *pinfo
,
1600 struct siginfo
*info
= pinfo
;
1601 struct ucontext
*uc
= puc
;
1605 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1606 /* FIXME: compute is_write */
1608 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1610 &uc
->uc_sigmask
, puc
);
1615 #error host CPU specific signal handler needed
1619 #endif /* !defined(CONFIG_SOFTMMU) */