2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
26 #if !defined(CONFIG_SOFTMMU)
38 #include <sys/ucontext.h>
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
45 #define env cpu_single_env
48 int tb_invalidated_flag
;
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState
*env
)
55 return cpu_has_work(env
);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
63 longjmp(env
->jmp_env
, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
71 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext
*uc
= puc
;
74 #elif defined(__OpenBSD__)
75 struct sigcontext
*uc
= puc
;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
85 /* XXX: use siglongjmp ? */
87 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK
, &uc
->sc_mask
, NULL
);
93 env
->exception_index
= -1;
94 longjmp(env
->jmp_env
, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles
, TranslationBlock
*orig_tb
)
101 unsigned long next_tb
;
102 TranslationBlock
*tb
;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles
> CF_COUNT_MASK
)
107 max_cycles
= CF_COUNT_MASK
;
109 tb
= tb_gen_code(env
, orig_tb
->pc
, orig_tb
->cs_base
, orig_tb
->flags
,
111 env
->current_tb
= tb
;
112 /* execute the generated code */
113 next_tb
= tcg_qemu_tb_exec(tb
->tc_ptr
);
115 if ((next_tb
& 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env
, tb
);
120 tb_phys_invalidate(tb
, -1);
124 static TranslationBlock
*tb_find_slow(target_ulong pc
,
125 target_ulong cs_base
,
128 TranslationBlock
*tb
, **ptb1
;
130 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
132 tb_invalidated_flag
= 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc
= get_phys_addr_code(env
, pc
);
138 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
140 h
= tb_phys_hash_func(phys_pc
);
141 ptb1
= &tb_phys_hash
[h
];
147 tb
->page_addr
[0] == phys_page1
&&
148 tb
->cs_base
== cs_base
&&
149 tb
->flags
== flags
) {
150 /* check next page if needed */
151 if (tb
->page_addr
[1] != -1) {
152 virt_page2
= (pc
& TARGET_PAGE_MASK
) +
154 phys_page2
= get_phys_addr_code(env
, virt_page2
);
155 if (tb
->page_addr
[1] == phys_page2
)
161 ptb1
= &tb
->phys_hash_next
;
164 /* if no translated code available, then translate it now */
165 tb
= tb_gen_code(env
, pc
, cs_base
, flags
, 0);
168 /* we add the TB in the virtual pc hash table */
169 env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)] = tb
;
173 static inline TranslationBlock
*tb_find_fast(void)
175 TranslationBlock
*tb
;
176 target_ulong cs_base
, pc
;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
183 tb
= env
->tb_jmp_cache
[tb_jmp_cache_hash_func(pc
)];
184 if (unlikely(!tb
|| tb
->pc
!= pc
|| tb
->cs_base
!= cs_base
||
185 tb
->flags
!= flags
)) {
186 tb
= tb_find_slow(pc
, cs_base
, flags
);
191 static CPUDebugExcpHandler
*debug_excp_handler
;
193 CPUDebugExcpHandler
*cpu_set_debug_excp_handler(CPUDebugExcpHandler
*handler
)
195 CPUDebugExcpHandler
*old_handler
= debug_excp_handler
;
197 debug_excp_handler
= handler
;
201 static void cpu_handle_debug_exception(CPUState
*env
)
205 if (!env
->watchpoint_hit
)
206 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
)
207 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
209 if (debug_excp_handler
)
210 debug_excp_handler(env
);
213 /* main execution loop */
215 int cpu_exec(CPUState
*env1
)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret
, interrupt_request
;
220 TranslationBlock
*tb
;
222 unsigned long next_tb
;
224 if (cpu_halted(env1
) == EXCP_HALTED
)
227 cpu_single_env
= env1
;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
238 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
239 CC_OP
= CC_OP_EFLAGS
;
240 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env
->cc_op
= CC_OP_FLAGS
;
244 env
->cc_dest
= env
->sr
& 0xf;
245 env
->cc_x
= (env
->sr
>> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
255 #error unsupported target CPU
257 env
->exception_index
= -1;
259 /* prepare setjmp context for exception handling */
261 if (setjmp(env
->jmp_env
) == 0) {
262 #if defined(__sparc__) && !defined(HOST_SOLARIS)
264 env
= cpu_single_env
;
265 #define env cpu_single_env
267 env
->current_tb
= NULL
;
268 /* if an exception is pending, we execute it here */
269 if (env
->exception_index
>= 0) {
270 if (env
->exception_index
>= EXCP_INTERRUPT
) {
271 /* exit request from the cpu execution loop */
272 ret
= env
->exception_index
;
273 if (ret
== EXCP_DEBUG
)
274 cpu_handle_debug_exception(env
);
277 #if defined(CONFIG_USER_ONLY)
278 /* if user mode only, we simulate a fake exception
279 which will be handled outside the cpu execution
281 #if defined(TARGET_I386)
282 do_interrupt_user(env
->exception_index
,
283 env
->exception_is_int
,
285 env
->exception_next_eip
);
286 /* successfully delivered */
287 env
->old_exception
= -1;
289 ret
= env
->exception_index
;
292 #if defined(TARGET_I386)
293 /* simulate a real cpu exception. On i386, it can
294 trigger new exceptions, but we do not handle
295 double or triple faults yet. */
296 do_interrupt(env
->exception_index
,
297 env
->exception_is_int
,
299 env
->exception_next_eip
, 0);
300 /* successfully delivered */
301 env
->old_exception
= -1;
302 #elif defined(TARGET_PPC)
304 #elif defined(TARGET_MICROBLAZE)
306 #elif defined(TARGET_MIPS)
308 #elif defined(TARGET_SPARC)
310 #elif defined(TARGET_ARM)
312 #elif defined(TARGET_SH4)
314 #elif defined(TARGET_ALPHA)
316 #elif defined(TARGET_CRIS)
318 #elif defined(TARGET_M68K)
323 env
->exception_index
= -1;
326 if (kqemu_is_ok(env
) && env
->interrupt_request
== 0 && env
->exit_request
== 0) {
328 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
329 ret
= kqemu_cpu_exec(env
);
330 /* put eflags in CPU temporary format */
331 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
332 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
333 CC_OP
= CC_OP_EFLAGS
;
334 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
337 longjmp(env
->jmp_env
, 1);
338 } else if (ret
== 2) {
339 /* softmmu execution needed */
341 if (env
->interrupt_request
!= 0 || env
->exit_request
!= 0) {
342 /* hardware interrupt will be executed just after */
344 /* otherwise, we restart */
345 longjmp(env
->jmp_env
, 1);
353 longjmp(env
->jmp_env
, 1);
356 next_tb
= 0; /* force lookup of first TB */
358 interrupt_request
= env
->interrupt_request
;
359 if (unlikely(interrupt_request
)) {
360 if (unlikely(env
->singlestep_enabled
& SSTEP_NOIRQ
)) {
361 /* Mask out external interrupts for this step. */
362 interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
367 if (interrupt_request
& CPU_INTERRUPT_DEBUG
) {
368 env
->interrupt_request
&= ~CPU_INTERRUPT_DEBUG
;
369 env
->exception_index
= EXCP_DEBUG
;
372 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
373 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
374 defined(TARGET_MICROBLAZE)
375 if (interrupt_request
& CPU_INTERRUPT_HALT
) {
376 env
->interrupt_request
&= ~CPU_INTERRUPT_HALT
;
378 env
->exception_index
= EXCP_HLT
;
382 #if defined(TARGET_I386)
383 if (env
->hflags2
& HF2_GIF_MASK
) {
384 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
385 !(env
->hflags
& HF_SMM_MASK
)) {
386 svm_check_intercept(SVM_EXIT_SMI
);
387 env
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
390 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
391 !(env
->hflags2
& HF2_NMI_MASK
)) {
392 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
393 env
->hflags2
|= HF2_NMI_MASK
;
394 do_interrupt(EXCP02_NMI
, 0, 0, 0, 1);
396 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
397 (((env
->hflags2
& HF2_VINTR_MASK
) &&
398 (env
->hflags2
& HF2_HIF_MASK
)) ||
399 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
400 (env
->eflags
& IF_MASK
&&
401 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
403 svm_check_intercept(SVM_EXIT_INTR
);
404 env
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VIRQ
);
405 intno
= cpu_get_pic_interrupt(env
);
406 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing hardware INT=0x%02x\n", intno
);
407 #if defined(__sparc__) && !defined(HOST_SOLARIS)
409 env
= cpu_single_env
;
410 #define env cpu_single_env
412 do_interrupt(intno
, 0, 0, 0, 1);
413 /* ensure that no TB jump will be modified as
414 the program flow was changed */
416 #if !defined(CONFIG_USER_ONLY)
417 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
418 (env
->eflags
& IF_MASK
) &&
419 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
421 /* FIXME: this should respect TPR */
422 svm_check_intercept(SVM_EXIT_VINTR
);
423 intno
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_vector
));
424 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Servicing virtual hardware INT=0x%02x\n", intno
);
425 do_interrupt(intno
, 0, 0, 0, 1);
426 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
431 #elif defined(TARGET_PPC)
433 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
437 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
438 ppc_hw_interrupt(env
);
439 if (env
->pending_interrupts
== 0)
440 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
443 #elif defined(TARGET_MICROBLAZE)
444 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
445 && (env
->sregs
[SR_MSR
] & MSR_IE
)
446 && !(env
->sregs
[SR_MSR
] & (MSR_EIP
| MSR_BIP
))
447 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
448 env
->exception_index
= EXCP_IRQ
;
452 #elif defined(TARGET_MIPS)
453 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
454 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
) &&
455 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
456 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
457 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
458 !(env
->hflags
& MIPS_HFLAG_DM
)) {
460 env
->exception_index
= EXCP_EXT_INTERRUPT
;
465 #elif defined(TARGET_SPARC)
466 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
468 int pil
= env
->interrupt_index
& 15;
469 int type
= env
->interrupt_index
& 0xf0;
471 if (((type
== TT_EXTINT
) &&
472 (pil
== 15 || pil
> env
->psrpil
)) ||
474 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
475 env
->exception_index
= env
->interrupt_index
;
477 env
->interrupt_index
= 0;
478 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
483 } else if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
484 //do_interrupt(0, 0, 0, 0, 0);
485 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
487 #elif defined(TARGET_ARM)
488 if (interrupt_request
& CPU_INTERRUPT_FIQ
489 && !(env
->uncached_cpsr
& CPSR_F
)) {
490 env
->exception_index
= EXCP_FIQ
;
494 /* ARMv7-M interrupt return works by loading a magic value
495 into the PC. On real hardware the load causes the
496 return to occur. The qemu implementation performs the
497 jump normally, then does the exception return when the
498 CPU tries to execute code at the magic address.
499 This will cause the magic PC value to be pushed to
500 the stack if an interrupt occured at the wrong time.
501 We avoid this by disabling interrupts when
502 pc contains a magic address. */
503 if (interrupt_request
& CPU_INTERRUPT_HARD
504 && ((IS_M(env
) && env
->regs
[15] < 0xfffffff0)
505 || !(env
->uncached_cpsr
& CPSR_I
))) {
506 env
->exception_index
= EXCP_IRQ
;
510 #elif defined(TARGET_SH4)
511 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
515 #elif defined(TARGET_ALPHA)
516 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
520 #elif defined(TARGET_CRIS)
521 if (interrupt_request
& CPU_INTERRUPT_HARD
522 && (env
->pregs
[PR_CCS
] & I_FLAG
)) {
523 env
->exception_index
= EXCP_IRQ
;
527 if (interrupt_request
& CPU_INTERRUPT_NMI
528 && (env
->pregs
[PR_CCS
] & M_FLAG
)) {
529 env
->exception_index
= EXCP_NMI
;
533 #elif defined(TARGET_M68K)
534 if (interrupt_request
& CPU_INTERRUPT_HARD
535 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
)
536 < env
->pending_level
) {
537 /* Real hardware gets the interrupt vector via an
538 IACK cycle at this point. Current emulated
539 hardware doesn't rely on this, so we
540 provide/save the vector when the interrupt is
542 env
->exception_index
= env
->pending_vector
;
547 /* Don't use the cached interupt_request value,
548 do_interrupt may have updated the EXITTB flag. */
549 if (env
->interrupt_request
& CPU_INTERRUPT_EXITTB
) {
550 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
551 /* ensure that no TB jump will be modified as
552 the program flow was changed */
556 if (unlikely(env
->exit_request
)) {
557 env
->exit_request
= 0;
558 env
->exception_index
= EXCP_INTERRUPT
;
562 if (qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
563 /* restore flags in standard format */
565 #if defined(TARGET_I386)
566 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
567 log_cpu_state(env
, X86_DUMP_CCOP
);
568 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
569 #elif defined(TARGET_ARM)
570 log_cpu_state(env
, 0);
571 #elif defined(TARGET_SPARC)
572 log_cpu_state(env
, 0);
573 #elif defined(TARGET_PPC)
574 log_cpu_state(env
, 0);
575 #elif defined(TARGET_M68K)
576 cpu_m68k_flush_flags(env
, env
->cc_op
);
577 env
->cc_op
= CC_OP_FLAGS
;
578 env
->sr
= (env
->sr
& 0xffe0)
579 | env
->cc_dest
| (env
->cc_x
<< 4);
580 log_cpu_state(env
, 0);
581 #elif defined(TARGET_MICROBLAZE)
582 log_cpu_state(env
, 0);
583 #elif defined(TARGET_MIPS)
584 log_cpu_state(env
, 0);
585 #elif defined(TARGET_SH4)
586 log_cpu_state(env
, 0);
587 #elif defined(TARGET_ALPHA)
588 log_cpu_state(env
, 0);
589 #elif defined(TARGET_CRIS)
590 log_cpu_state(env
, 0);
592 #error unsupported target CPU
598 /* Note: we do it here to avoid a gcc bug on Mac OS X when
599 doing it in tb_find_slow */
600 if (tb_invalidated_flag
) {
601 /* as some TB could have been invalidated because
602 of memory exceptions while generating the code, we
603 must recompute the hash index here */
605 tb_invalidated_flag
= 0;
608 qemu_log_mask(CPU_LOG_EXEC
, "Trace 0x%08lx [" TARGET_FMT_lx
"] %s\n",
609 (long)tb
->tc_ptr
, tb
->pc
,
610 lookup_symbol(tb
->pc
));
612 /* see if we can patch the calling TB. When the TB
613 spans two pages, we cannot safely do a direct
618 (env
->kqemu_enabled
!= 2) &&
620 tb
->page_addr
[1] == -1) {
621 tb_add_jump((TranslationBlock
*)(next_tb
& ~3), next_tb
& 3, tb
);
624 spin_unlock(&tb_lock
);
625 env
->current_tb
= tb
;
627 /* cpu_interrupt might be called while translating the
628 TB, but before it is linked into a potentially
629 infinite loop and becomes env->current_tb. Avoid
630 starting execution if there is a pending interrupt. */
631 if (unlikely (env
->exit_request
))
632 env
->current_tb
= NULL
;
634 while (env
->current_tb
) {
636 /* execute the generated code */
637 #if defined(__sparc__) && !defined(HOST_SOLARIS)
639 env
= cpu_single_env
;
640 #define env cpu_single_env
642 next_tb
= tcg_qemu_tb_exec(tc_ptr
);
643 env
->current_tb
= NULL
;
644 if ((next_tb
& 3) == 2) {
645 /* Instruction counter expired. */
647 tb
= (TranslationBlock
*)(long)(next_tb
& ~3);
649 cpu_pc_from_tb(env
, tb
);
650 insns_left
= env
->icount_decr
.u32
;
651 if (env
->icount_extra
&& insns_left
>= 0) {
652 /* Refill decrementer and continue execution. */
653 env
->icount_extra
+= insns_left
;
654 if (env
->icount_extra
> 0xffff) {
657 insns_left
= env
->icount_extra
;
659 env
->icount_extra
-= insns_left
;
660 env
->icount_decr
.u16
.low
= insns_left
;
662 if (insns_left
> 0) {
663 /* Execute remaining instructions. */
664 cpu_exec_nocache(insns_left
, tb
);
666 env
->exception_index
= EXCP_INTERRUPT
;
672 /* reset soft MMU for next block (it can currently
673 only be set by a memory fault) */
674 #if defined(CONFIG_KQEMU)
675 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
676 if (kqemu_is_ok(env
) &&
677 (cpu_get_time_fast() - env
->last_io_time
) >= MIN_CYCLE_BEFORE_SWITCH
) {
688 #if defined(TARGET_I386)
689 /* restore flags in standard format */
690 env
->eflags
= env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
691 #elif defined(TARGET_ARM)
692 /* XXX: Save/restore host fpu exception state?. */
693 #elif defined(TARGET_SPARC)
694 #elif defined(TARGET_PPC)
695 #elif defined(TARGET_M68K)
696 cpu_m68k_flush_flags(env
, env
->cc_op
);
697 env
->cc_op
= CC_OP_FLAGS
;
698 env
->sr
= (env
->sr
& 0xffe0)
699 | env
->cc_dest
| (env
->cc_x
<< 4);
700 #elif defined(TARGET_MICROBLAZE)
701 #elif defined(TARGET_MIPS)
702 #elif defined(TARGET_SH4)
703 #elif defined(TARGET_ALPHA)
704 #elif defined(TARGET_CRIS)
707 #error unsupported target CPU
710 /* restore global registers */
711 #include "hostregs_helper.h"
713 /* fail safe : never use cpu_single_env outside cpu_exec() */
714 cpu_single_env
= NULL
;
718 /* must only be called from the generated code as an exception can be
720 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
722 /* XXX: cannot enable it yet because it yields to MMU exception
723 where NIP != read address on PowerPC */
725 target_ulong phys_addr
;
726 phys_addr
= get_phys_addr_code(env
, start
);
727 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
731 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
733 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
735 CPUX86State
*saved_env
;
739 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
741 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
742 (selector
<< 4), 0xffff, 0);
744 helper_load_seg(seg_reg
, selector
);
749 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
751 CPUX86State
*saved_env
;
756 helper_fsave(ptr
, data32
);
761 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
763 CPUX86State
*saved_env
;
768 helper_frstor(ptr
, data32
);
773 #endif /* TARGET_I386 */
775 #if !defined(CONFIG_SOFTMMU)
777 #if defined(TARGET_I386)
779 /* 'pc' is the host PC at which the exception was raised. 'address' is
780 the effective address of the memory exception. 'is_write' is 1 if a
781 write caused the exception and otherwise 0'. 'old_set' is the
782 signal set which should be restored */
783 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
784 int is_write
, sigset_t
*old_set
,
787 TranslationBlock
*tb
;
791 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
792 #if defined(DEBUG_SIGNAL)
793 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
794 pc
, address
, is_write
, *(unsigned long *)old_set
);
796 /* XXX: locking issue */
797 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
801 /* see if it is an MMU fault */
802 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
804 return 0; /* not an MMU fault */
806 return 1; /* the MMU fault was handled without causing real CPU fault */
807 /* now we have a real cpu fault */
810 /* the PC is inside the translated code. It means that we have
811 a virtual CPU fault */
812 cpu_restore_state(tb
, env
, pc
, puc
);
816 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
817 env
->eip
, env
->cr
[2], env
->error_code
);
819 /* we restore the process signal mask as the sigreturn should
820 do it (XXX: use sigsetjmp) */
821 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
822 raise_exception_err(env
->exception_index
, env
->error_code
);
824 /* activate soft MMU for this block */
825 env
->hflags
|= HF_SOFTMMU_MASK
;
826 cpu_resume_from_signal(env
, puc
);
828 /* never comes here */
832 #elif defined(TARGET_ARM)
833 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
834 int is_write
, sigset_t
*old_set
,
837 TranslationBlock
*tb
;
841 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
842 #if defined(DEBUG_SIGNAL)
843 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
844 pc
, address
, is_write
, *(unsigned long *)old_set
);
846 /* XXX: locking issue */
847 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
850 /* see if it is an MMU fault */
851 ret
= cpu_arm_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
853 return 0; /* not an MMU fault */
855 return 1; /* the MMU fault was handled without causing real CPU fault */
856 /* now we have a real cpu fault */
859 /* the PC is inside the translated code. It means that we have
860 a virtual CPU fault */
861 cpu_restore_state(tb
, env
, pc
, puc
);
863 /* we restore the process signal mask as the sigreturn should
864 do it (XXX: use sigsetjmp) */
865 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
867 /* never comes here */
870 #elif defined(TARGET_SPARC)
871 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
872 int is_write
, sigset_t
*old_set
,
875 TranslationBlock
*tb
;
879 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
880 #if defined(DEBUG_SIGNAL)
881 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
882 pc
, address
, is_write
, *(unsigned long *)old_set
);
884 /* XXX: locking issue */
885 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
888 /* see if it is an MMU fault */
889 ret
= cpu_sparc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
891 return 0; /* not an MMU fault */
893 return 1; /* the MMU fault was handled without causing real CPU fault */
894 /* now we have a real cpu fault */
897 /* the PC is inside the translated code. It means that we have
898 a virtual CPU fault */
899 cpu_restore_state(tb
, env
, pc
, puc
);
901 /* we restore the process signal mask as the sigreturn should
902 do it (XXX: use sigsetjmp) */
903 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
905 /* never comes here */
908 #elif defined (TARGET_PPC)
909 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
910 int is_write
, sigset_t
*old_set
,
913 TranslationBlock
*tb
;
917 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
918 #if defined(DEBUG_SIGNAL)
919 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
920 pc
, address
, is_write
, *(unsigned long *)old_set
);
922 /* XXX: locking issue */
923 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
927 /* see if it is an MMU fault */
928 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
930 return 0; /* not an MMU fault */
932 return 1; /* the MMU fault was handled without causing real CPU fault */
934 /* now we have a real cpu fault */
937 /* the PC is inside the translated code. It means that we have
938 a virtual CPU fault */
939 cpu_restore_state(tb
, env
, pc
, puc
);
943 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
944 env
->nip
, env
->error_code
, tb
);
946 /* we restore the process signal mask as the sigreturn should
947 do it (XXX: use sigsetjmp) */
948 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
951 /* activate soft MMU for this block */
952 cpu_resume_from_signal(env
, puc
);
954 /* never comes here */
958 #elif defined(TARGET_M68K)
959 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
960 int is_write
, sigset_t
*old_set
,
963 TranslationBlock
*tb
;
967 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
968 #if defined(DEBUG_SIGNAL)
969 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
970 pc
, address
, is_write
, *(unsigned long *)old_set
);
972 /* XXX: locking issue */
973 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
976 /* see if it is an MMU fault */
977 ret
= cpu_m68k_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
979 return 0; /* not an MMU fault */
981 return 1; /* the MMU fault was handled without causing real CPU fault */
982 /* now we have a real cpu fault */
985 /* the PC is inside the translated code. It means that we have
986 a virtual CPU fault */
987 cpu_restore_state(tb
, env
, pc
, puc
);
989 /* we restore the process signal mask as the sigreturn should
990 do it (XXX: use sigsetjmp) */
991 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
993 /* never comes here */
997 #elif defined (TARGET_MIPS)
998 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
999 int is_write
, sigset_t
*old_set
,
1002 TranslationBlock
*tb
;
1006 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1007 #if defined(DEBUG_SIGNAL)
1008 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1009 pc
, address
, is_write
, *(unsigned long *)old_set
);
1011 /* XXX: locking issue */
1012 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1016 /* see if it is an MMU fault */
1017 ret
= cpu_mips_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1019 return 0; /* not an MMU fault */
1021 return 1; /* the MMU fault was handled without causing real CPU fault */
1023 /* now we have a real cpu fault */
1024 tb
= tb_find_pc(pc
);
1026 /* the PC is inside the translated code. It means that we have
1027 a virtual CPU fault */
1028 cpu_restore_state(tb
, env
, pc
, puc
);
1032 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1033 env
->PC
, env
->error_code
, tb
);
1035 /* we restore the process signal mask as the sigreturn should
1036 do it (XXX: use sigsetjmp) */
1037 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1040 /* activate soft MMU for this block */
1041 cpu_resume_from_signal(env
, puc
);
1043 /* never comes here */
1047 #elif defined (TARGET_MICROBLAZE)
1048 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1049 int is_write
, sigset_t
*old_set
,
1052 TranslationBlock
*tb
;
1056 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1057 #if defined(DEBUG_SIGNAL)
1058 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1059 pc
, address
, is_write
, *(unsigned long *)old_set
);
1061 /* XXX: locking issue */
1062 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1066 /* see if it is an MMU fault */
1067 ret
= cpu_mb_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1069 return 0; /* not an MMU fault */
1071 return 1; /* the MMU fault was handled without causing real CPU fault */
1073 /* now we have a real cpu fault */
1074 tb
= tb_find_pc(pc
);
1076 /* the PC is inside the translated code. It means that we have
1077 a virtual CPU fault */
1078 cpu_restore_state(tb
, env
, pc
, puc
);
1082 printf("PF exception: PC=0x" TARGET_FMT_lx
" error=0x%x %p\n",
1083 env
->PC
, env
->error_code
, tb
);
1085 /* we restore the process signal mask as the sigreturn should
1086 do it (XXX: use sigsetjmp) */
1087 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1090 /* activate soft MMU for this block */
1091 cpu_resume_from_signal(env
, puc
);
1093 /* never comes here */
1097 #elif defined (TARGET_SH4)
1098 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1099 int is_write
, sigset_t
*old_set
,
1102 TranslationBlock
*tb
;
1106 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1107 #if defined(DEBUG_SIGNAL)
1108 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1109 pc
, address
, is_write
, *(unsigned long *)old_set
);
1111 /* XXX: locking issue */
1112 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1116 /* see if it is an MMU fault */
1117 ret
= cpu_sh4_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1119 return 0; /* not an MMU fault */
1121 return 1; /* the MMU fault was handled without causing real CPU fault */
1123 /* now we have a real cpu fault */
1124 tb
= tb_find_pc(pc
);
1126 /* the PC is inside the translated code. It means that we have
1127 a virtual CPU fault */
1128 cpu_restore_state(tb
, env
, pc
, puc
);
1131 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1132 env
->nip
, env
->error_code
, tb
);
1134 /* we restore the process signal mask as the sigreturn should
1135 do it (XXX: use sigsetjmp) */
1136 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1138 /* never comes here */
1142 #elif defined (TARGET_ALPHA)
1143 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1144 int is_write
, sigset_t
*old_set
,
1147 TranslationBlock
*tb
;
1151 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1152 #if defined(DEBUG_SIGNAL)
1153 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1154 pc
, address
, is_write
, *(unsigned long *)old_set
);
1156 /* XXX: locking issue */
1157 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1161 /* see if it is an MMU fault */
1162 ret
= cpu_alpha_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1164 return 0; /* not an MMU fault */
1166 return 1; /* the MMU fault was handled without causing real CPU fault */
1168 /* now we have a real cpu fault */
1169 tb
= tb_find_pc(pc
);
1171 /* the PC is inside the translated code. It means that we have
1172 a virtual CPU fault */
1173 cpu_restore_state(tb
, env
, pc
, puc
);
1176 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1177 env
->nip
, env
->error_code
, tb
);
1179 /* we restore the process signal mask as the sigreturn should
1180 do it (XXX: use sigsetjmp) */
1181 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1183 /* never comes here */
1186 #elif defined (TARGET_CRIS)
1187 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
1188 int is_write
, sigset_t
*old_set
,
1191 TranslationBlock
*tb
;
1195 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
1196 #if defined(DEBUG_SIGNAL)
1197 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1198 pc
, address
, is_write
, *(unsigned long *)old_set
);
1200 /* XXX: locking issue */
1201 if (is_write
&& page_unprotect(h2g(address
), pc
, puc
)) {
1205 /* see if it is an MMU fault */
1206 ret
= cpu_cris_handle_mmu_fault(env
, address
, is_write
, MMU_USER_IDX
, 0);
1208 return 0; /* not an MMU fault */
1210 return 1; /* the MMU fault was handled without causing real CPU fault */
1212 /* now we have a real cpu fault */
1213 tb
= tb_find_pc(pc
);
1215 /* the PC is inside the translated code. It means that we have
1216 a virtual CPU fault */
1217 cpu_restore_state(tb
, env
, pc
, puc
);
1219 /* we restore the process signal mask as the sigreturn should
1220 do it (XXX: use sigsetjmp) */
1221 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
1223 /* never comes here */
1228 #error unsupported target CPU
1231 #if defined(__i386__)
1233 #if defined(__APPLE__)
1234 # include <sys/ucontext.h>
1236 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1237 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1238 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1239 # define MASK_sig(context) ((context)->uc_sigmask)
1240 #elif defined(__OpenBSD__)
1241 # define EIP_sig(context) ((context)->sc_eip)
1242 # define TRAP_sig(context) ((context)->sc_trapno)
1243 # define ERROR_sig(context) ((context)->sc_err)
1244 # define MASK_sig(context) ((context)->sc_mask)
1246 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1247 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1248 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1249 # define MASK_sig(context) ((context)->uc_sigmask)
1252 int cpu_signal_handler(int host_signum
, void *pinfo
,
1255 siginfo_t
*info
= pinfo
;
1256 #if defined(__OpenBSD__)
1257 struct sigcontext
*uc
= puc
;
1259 struct ucontext
*uc
= puc
;
1268 #define REG_TRAPNO TRAPNO
1271 trapno
= TRAP_sig(uc
);
1272 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1274 (ERROR_sig(uc
) >> 1) & 1 : 0,
1275 &MASK_sig(uc
), puc
);
1278 #elif defined(__x86_64__)
1281 #define PC_sig(context) _UC_MACHINE_PC(context)
1282 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1283 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1284 #define MASK_sig(context) ((context)->uc_sigmask)
1285 #elif defined(__OpenBSD__)
1286 #define PC_sig(context) ((context)->sc_rip)
1287 #define TRAP_sig(context) ((context)->sc_trapno)
1288 #define ERROR_sig(context) ((context)->sc_err)
1289 #define MASK_sig(context) ((context)->sc_mask)
1291 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1292 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1293 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1294 #define MASK_sig(context) ((context)->uc_sigmask)
1297 int cpu_signal_handler(int host_signum
, void *pinfo
,
1300 siginfo_t
*info
= pinfo
;
1303 ucontext_t
*uc
= puc
;
1304 #elif defined(__OpenBSD__)
1305 struct sigcontext
*uc
= puc
;
1307 struct ucontext
*uc
= puc
;
1311 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1312 TRAP_sig(uc
) == 0xe ?
1313 (ERROR_sig(uc
) >> 1) & 1 : 0,
1314 &MASK_sig(uc
), puc
);
1317 #elif defined(_ARCH_PPC)
1319 /***********************************************************************
1320 * signal context platform-specific definitions
1324 /* All Registers access - only for local access */
1325 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1326 /* Gpr Registers access */
1327 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1328 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1329 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1330 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1331 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1332 # define LR_sig(context) REG_sig(link, context) /* Link register */
1333 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1334 /* Float Registers access */
1335 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1336 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1337 /* Exception Registers access */
1338 # define DAR_sig(context) REG_sig(dar, context)
1339 # define DSISR_sig(context) REG_sig(dsisr, context)
1340 # define TRAP_sig(context) REG_sig(trap, context)
1344 # include <sys/ucontext.h>
1345 typedef struct ucontext SIGCONTEXT
;
1346 /* All Registers access - only for local access */
1347 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1348 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1349 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1350 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1351 /* Gpr Registers access */
1352 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1353 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1354 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1355 # define CTR_sig(context) REG_sig(ctr, context)
1356 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1357 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1358 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1359 /* Float Registers access */
1360 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1361 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1362 /* Exception Registers access */
1363 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1364 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1365 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1366 #endif /* __APPLE__ */
1368 int cpu_signal_handler(int host_signum
, void *pinfo
,
1371 siginfo_t
*info
= pinfo
;
1372 struct ucontext
*uc
= puc
;
1380 if (DSISR_sig(uc
) & 0x00800000)
1383 if (TRAP_sig(uc
) != 0x400 && (DSISR_sig(uc
) & 0x02000000))
1386 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1387 is_write
, &uc
->uc_sigmask
, puc
);
1390 #elif defined(__alpha__)
1392 int cpu_signal_handler(int host_signum
, void *pinfo
,
1395 siginfo_t
*info
= pinfo
;
1396 struct ucontext
*uc
= puc
;
1397 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
1398 uint32_t insn
= *pc
;
1401 /* XXX: need kernel patch to get write flag faster */
1402 switch (insn
>> 26) {
1417 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1418 is_write
, &uc
->uc_sigmask
, puc
);
1420 #elif defined(__sparc__)
1422 int cpu_signal_handler(int host_signum
, void *pinfo
,
1425 siginfo_t
*info
= pinfo
;
1428 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1429 uint32_t *regs
= (uint32_t *)(info
+ 1);
1430 void *sigmask
= (regs
+ 20);
1431 /* XXX: is there a standard glibc define ? */
1432 unsigned long pc
= regs
[1];
1435 struct sigcontext
*sc
= puc
;
1436 unsigned long pc
= sc
->sigc_regs
.tpc
;
1437 void *sigmask
= (void *)sc
->sigc_mask
;
1438 #elif defined(__OpenBSD__)
1439 struct sigcontext
*uc
= puc
;
1440 unsigned long pc
= uc
->sc_pc
;
1441 void *sigmask
= (void *)(long)uc
->sc_mask
;
1445 /* XXX: need kernel patch to get write flag faster */
1447 insn
= *(uint32_t *)pc
;
1448 if ((insn
>> 30) == 3) {
1449 switch((insn
>> 19) & 0x3f) {
1473 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1474 is_write
, sigmask
, NULL
);
1477 #elif defined(__arm__)
1479 int cpu_signal_handler(int host_signum
, void *pinfo
,
1482 siginfo_t
*info
= pinfo
;
1483 struct ucontext
*uc
= puc
;
1487 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1488 pc
= uc
->uc_mcontext
.gregs
[R15
];
1490 pc
= uc
->uc_mcontext
.arm_pc
;
1492 /* XXX: compute is_write */
1494 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1496 &uc
->uc_sigmask
, puc
);
1499 #elif defined(__mc68000)
1501 int cpu_signal_handler(int host_signum
, void *pinfo
,
1504 siginfo_t
*info
= pinfo
;
1505 struct ucontext
*uc
= puc
;
1509 pc
= uc
->uc_mcontext
.gregs
[16];
1510 /* XXX: compute is_write */
1512 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1514 &uc
->uc_sigmask
, puc
);
1517 #elif defined(__ia64)
1520 /* This ought to be in <bits/siginfo.h>... */
1521 # define __ISR_VALID 1
1524 int cpu_signal_handler(int host_signum
, void *pinfo
, void *puc
)
1526 siginfo_t
*info
= pinfo
;
1527 struct ucontext
*uc
= puc
;
1531 ip
= uc
->uc_mcontext
.sc_ip
;
1532 switch (host_signum
) {
1538 if (info
->si_code
&& (info
->si_segvflags
& __ISR_VALID
))
1539 /* ISR.W (write-access) is bit 33: */
1540 is_write
= (info
->si_isr
>> 33) & 1;
1546 return handle_cpu_signal(ip
, (unsigned long)info
->si_addr
,
1548 &uc
->uc_sigmask
, puc
);
1551 #elif defined(__s390__)
1553 int cpu_signal_handler(int host_signum
, void *pinfo
,
1556 siginfo_t
*info
= pinfo
;
1557 struct ucontext
*uc
= puc
;
1561 pc
= uc
->uc_mcontext
.psw
.addr
;
1562 /* XXX: compute is_write */
1564 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1565 is_write
, &uc
->uc_sigmask
, puc
);
1568 #elif defined(__mips__)
1570 int cpu_signal_handler(int host_signum
, void *pinfo
,
1573 siginfo_t
*info
= pinfo
;
1574 struct ucontext
*uc
= puc
;
1575 greg_t pc
= uc
->uc_mcontext
.pc
;
1578 /* XXX: compute is_write */
1580 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1581 is_write
, &uc
->uc_sigmask
, puc
);
1584 #elif defined(__hppa__)
1586 int cpu_signal_handler(int host_signum
, void *pinfo
,
1589 struct siginfo
*info
= pinfo
;
1590 struct ucontext
*uc
= puc
;
1594 pc
= uc
->uc_mcontext
.sc_iaoq
[0];
1595 /* FIXME: compute is_write */
1597 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
1599 &uc
->uc_sigmask
, puc
);
1604 #error host CPU specific signal handler needed
1608 #endif /* !defined(CONFIG_SOFTMMU) */