2 * i386 emulator main execution loop
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag
;
41 //#define DEBUG_SIGNAL
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
44 /* XXX: unify with i386 target */
45 void cpu_loop_exit(void)
47 longjmp(env
->jmp_env
, 1);
51 /* exit the current TB from a signal handler. The host registers are
52 restored in a state compatible with the CPU emulator
54 void cpu_resume_from_signal(CPUState
*env1
, void *puc
)
56 #if !defined(CONFIG_SOFTMMU)
57 struct ucontext
*uc
= puc
;
62 /* XXX: restore cpu registers saved in host registers */
64 #if !defined(CONFIG_SOFTMMU)
66 /* XXX: use siglongjmp ? */
67 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
70 longjmp(env
->jmp_env
, 1);
73 /* main execution loop */
75 int cpu_exec(CPUState
*env1
)
77 int saved_T0
, saved_T1
, saved_T2
;
104 int saved_i7
, tmp_T0
;
106 int code_gen_size
, ret
, interrupt_request
;
107 void (*gen_func
)(void);
108 TranslationBlock
*tb
, **ptb
;
109 uint8_t *tc_ptr
, *cs_base
, *pc
;
112 /* first we save global registers */
119 /* we also save i7 because longjmp may not restore it */
120 asm volatile ("mov %%i7, %0" : "=r" (saved_i7
));
123 #if defined(TARGET_I386)
126 EAX
= env
->regs
[R_EAX
];
130 ECX
= env
->regs
[R_ECX
];
134 EDX
= env
->regs
[R_EDX
];
138 EBX
= env
->regs
[R_EBX
];
142 ESP
= env
->regs
[R_ESP
];
146 EBP
= env
->regs
[R_EBP
];
150 ESI
= env
->regs
[R_ESI
];
154 EDI
= env
->regs
[R_EDI
];
157 /* put eflags in CPU temporary format */
158 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
159 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
160 CC_OP
= CC_OP_EFLAGS
;
161 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
162 #elif defined(TARGET_ARM)
166 env
->CF
= (psr
>> 29) & 1;
167 env
->NZF
= (psr
& 0xc0000000) ^ 0x40000000;
168 env
->VF
= (psr
<< 3) & 0x80000000;
169 env
->cpsr
= psr
& ~0xf0000000;
171 #elif defined(TARGET_SPARC)
172 #elif defined(TARGET_PPC)
174 #error unsupported target CPU
176 env
->exception_index
= -1;
178 /* prepare setjmp context for exception handling */
180 if (setjmp(env
->jmp_env
) == 0) {
181 env
->current_tb
= NULL
;
182 /* if an exception is pending, we execute it here */
183 if (env
->exception_index
>= 0) {
184 if (env
->exception_index
>= EXCP_INTERRUPT
) {
185 /* exit request from the cpu execution loop */
186 ret
= env
->exception_index
;
188 } else if (env
->user_mode_only
) {
189 /* if user mode only, we simulate a fake exception
190 which will be hanlded outside the cpu execution
192 #if defined(TARGET_I386)
193 do_interrupt_user(env
->exception_index
,
194 env
->exception_is_int
,
196 env
->exception_next_eip
);
198 ret
= env
->exception_index
;
201 #if defined(TARGET_I386)
202 /* simulate a real cpu exception. On i386, it can
203 trigger new exceptions, but we do not handle
204 double or triple faults yet. */
205 do_interrupt(env
->exception_index
,
206 env
->exception_is_int
,
208 env
->exception_next_eip
, 0);
209 #elif defined(TARGET_PPC)
213 env
->exception_index
= -1;
215 T0
= 0; /* force lookup of first TB */
218 /* g1 can be modified by some libc? functions */
221 interrupt_request
= env
->interrupt_request
;
222 if (__builtin_expect(interrupt_request
, 0)) {
223 #if defined(TARGET_I386)
224 /* if hardware interrupt pending, we execute it */
225 if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
226 (env
->eflags
& IF_MASK
) &&
227 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
229 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
230 intno
= cpu_get_pic_interrupt(env
);
231 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
232 fprintf(logfile
, "Servicing hardware INT=0x%02x\n", intno
);
234 do_interrupt(intno
, 0, 0, 0, 1);
235 /* ensure that no TB jump will be modified as
236 the program flow was changed */
243 #elif defined(TARGET_PPC)
245 if ((interrupt_request
& CPU_INTERRUPT_RESET
)) {
250 if ((interrupt_request
& CPU_INTERRUPT_HARD
)) {
252 env
->exception_index
= EXCP_EXTERNAL
;
255 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
256 } else if ((interrupt_request
& CPU_INTERRUPT_TIMER
)) {
258 env
->exception_index
= EXCP_DECR
;
261 env
->interrupt_request
&= ~CPU_INTERRUPT_TIMER
;
265 if (interrupt_request
& CPU_INTERRUPT_EXITTB
) {
266 env
->interrupt_request
&= ~CPU_INTERRUPT_EXITTB
;
267 /* ensure that no TB jump will be modified as
268 the program flow was changed */
275 if (interrupt_request
& CPU_INTERRUPT_EXIT
) {
276 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
277 env
->exception_index
= EXCP_INTERRUPT
;
282 if (loglevel
& CPU_LOG_EXEC
) {
283 #if defined(TARGET_I386)
284 /* restore flags in standard format */
285 env
->regs
[R_EAX
] = EAX
;
286 env
->regs
[R_EBX
] = EBX
;
287 env
->regs
[R_ECX
] = ECX
;
288 env
->regs
[R_EDX
] = EDX
;
289 env
->regs
[R_ESI
] = ESI
;
290 env
->regs
[R_EDI
] = EDI
;
291 env
->regs
[R_EBP
] = EBP
;
292 env
->regs
[R_ESP
] = ESP
;
293 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
294 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
295 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
296 #elif defined(TARGET_ARM)
297 env
->cpsr
= compute_cpsr();
298 cpu_arm_dump_state(env
, logfile
, 0);
299 env
->cpsr
&= ~0xf0000000;
300 #elif defined(TARGET_SPARC)
301 cpu_sparc_dump_state (env
, logfile
, 0);
302 #elif defined(TARGET_PPC)
303 cpu_ppc_dump_state(env
, logfile
, 0);
305 #error unsupported target CPU
309 /* we record a subset of the CPU state. It will
310 always be the same before a given translated block
312 #if defined(TARGET_I386)
314 flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
315 cs_base
= env
->segs
[R_CS
].base
;
316 pc
= cs_base
+ env
->eip
;
317 #elif defined(TARGET_ARM)
320 pc
= (uint8_t *)env
->regs
[15];
321 #elif defined(TARGET_SPARC)
323 cs_base
= (uint8_t *)env
->npc
;
324 pc
= (uint8_t *) env
->pc
;
325 #elif defined(TARGET_PPC)
328 pc
= (uint8_t *)env
->nip
;
330 #error unsupported CPU
332 tb
= tb_find(&ptb
, (unsigned long)pc
, (unsigned long)cs_base
,
335 TranslationBlock
**ptb1
;
337 target_ulong phys_pc
, phys_page1
, phys_page2
, virt_page2
;
342 tb_invalidated_flag
= 0;
344 /* find translated block using physical mappings */
345 phys_pc
= get_phys_addr_code(env
, (unsigned long)pc
);
346 phys_page1
= phys_pc
& TARGET_PAGE_MASK
;
348 h
= tb_phys_hash_func(phys_pc
);
349 ptb1
= &tb_phys_hash
[h
];
354 if (tb
->pc
== (unsigned long)pc
&&
355 tb
->page_addr
[0] == phys_page1
&&
356 tb
->cs_base
== (unsigned long)cs_base
&&
357 tb
->flags
== flags
) {
358 /* check next page if needed */
359 if (tb
->page_addr
[1] != -1) {
360 virt_page2
= ((unsigned long)pc
& TARGET_PAGE_MASK
) +
362 phys_page2
= get_phys_addr_code(env
, virt_page2
);
363 if (tb
->page_addr
[1] == phys_page2
)
369 ptb1
= &tb
->phys_hash_next
;
372 /* if no translated code available, then translate it now */
373 tb
= tb_alloc((unsigned long)pc
);
375 /* flush must be done */
377 /* cannot fail at this point */
378 tb
= tb_alloc((unsigned long)pc
);
379 /* don't forget to invalidate previous TB info */
380 ptb
= &tb_hash
[tb_hash_func((unsigned long)pc
)];
383 tc_ptr
= code_gen_ptr
;
385 tb
->cs_base
= (unsigned long)cs_base
;
387 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
388 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
390 /* check next page if needed */
391 virt_page2
= ((unsigned long)pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
393 if (((unsigned long)pc
& TARGET_PAGE_MASK
) != virt_page2
) {
394 phys_page2
= get_phys_addr_code(env
, virt_page2
);
396 tb_link_phys(tb
, phys_pc
, phys_page2
);
399 if (tb_invalidated_flag
) {
400 /* as some TB could have been invalidated because
401 of memory exceptions while generating the code, we
402 must recompute the hash index here */
403 ptb
= &tb_hash
[tb_hash_func((unsigned long)pc
)];
405 ptb
= &(*ptb
)->hash_next
;
408 /* we add the TB in the virtual pc hash table */
410 tb
->hash_next
= NULL
;
412 spin_unlock(&tb_lock
);
415 if (loglevel
& CPU_LOG_EXEC
) {
416 fprintf(logfile
, "Trace 0x%08lx [0x%08lx] %s\n",
417 (long)tb
->tc_ptr
, (long)tb
->pc
,
418 lookup_symbol((void *)tb
->pc
));
424 /* see if we can patch the calling TB. */
426 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
427 && (tb
->cflags
& CF_CODE_COPY
) ==
428 (((TranslationBlock
*)(T0
& ~3))->cflags
& CF_CODE_COPY
)
432 tb_add_jump((TranslationBlock
*)(T0
& ~3), T0
& 3, tb
);
433 #if defined(USE_CODE_COPY)
434 /* propagates the FP use info */
435 ((TranslationBlock
*)(T0
& ~3))->cflags
|=
436 (tb
->cflags
& CF_FP_USED
);
438 spin_unlock(&tb_lock
);
441 env
->current_tb
= tb
;
442 /* execute the generated code */
443 gen_func
= (void *)tc_ptr
;
444 #if defined(__sparc__)
445 __asm__
__volatile__("call %0\n\t"
449 : "i0", "i1", "i2", "i3", "i4", "i5");
450 #elif defined(__arm__)
451 asm volatile ("mov pc, %0\n\t"
452 ".global exec_loop\n\t"
456 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
457 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
459 if (!(tb
->cflags
& CF_CODE_COPY
)) {
460 if ((tb
->cflags
& CF_FP_USED
) && env
->native_fp_regs
) {
461 save_native_fp_state(env
);
465 if ((tb
->cflags
& CF_FP_USED
) && !env
->native_fp_regs
) {
466 restore_native_fp_state(env
);
468 /* we work with native eflags */
469 CC_SRC
= cc_table
[CC_OP
].compute_all();
470 CC_OP
= CC_OP_EFLAGS
;
471 asm(".globl exec_loop\n"
476 " fs movl %11, %%eax\n"
477 " andl $0x400, %%eax\n"
478 " fs orl %8, %%eax\n"
481 " fs movl %%esp, %12\n"
482 " fs movl %0, %%eax\n"
483 " fs movl %1, %%ecx\n"
484 " fs movl %2, %%edx\n"
485 " fs movl %3, %%ebx\n"
486 " fs movl %4, %%esp\n"
487 " fs movl %5, %%ebp\n"
488 " fs movl %6, %%esi\n"
489 " fs movl %7, %%edi\n"
492 " fs movl %%esp, %4\n"
493 " fs movl %12, %%esp\n"
494 " fs movl %%eax, %0\n"
495 " fs movl %%ecx, %1\n"
496 " fs movl %%edx, %2\n"
497 " fs movl %%ebx, %3\n"
498 " fs movl %%ebp, %5\n"
499 " fs movl %%esi, %6\n"
500 " fs movl %%edi, %7\n"
503 " movl %%eax, %%ecx\n"
504 " andl $0x400, %%ecx\n"
506 " andl $0x8d5, %%eax\n"
507 " fs movl %%eax, %8\n"
509 " subl %%ecx, %%eax\n"
510 " fs movl %%eax, %11\n"
511 " fs movl %9, %%ebx\n" /* get T0 value */
514 : "m" (*(uint8_t *)offsetof(CPUState
, regs
[0])),
515 "m" (*(uint8_t *)offsetof(CPUState
, regs
[1])),
516 "m" (*(uint8_t *)offsetof(CPUState
, regs
[2])),
517 "m" (*(uint8_t *)offsetof(CPUState
, regs
[3])),
518 "m" (*(uint8_t *)offsetof(CPUState
, regs
[4])),
519 "m" (*(uint8_t *)offsetof(CPUState
, regs
[5])),
520 "m" (*(uint8_t *)offsetof(CPUState
, regs
[6])),
521 "m" (*(uint8_t *)offsetof(CPUState
, regs
[7])),
522 "m" (*(uint8_t *)offsetof(CPUState
, cc_src
)),
523 "m" (*(uint8_t *)offsetof(CPUState
, tmp0
)),
525 "m" (*(uint8_t *)offsetof(CPUState
, df
)),
526 "m" (*(uint8_t *)offsetof(CPUState
, saved_esp
))
534 env
->current_tb
= NULL
;
535 /* reset soft MMU for next block (it can currently
536 only be set by a memory fault) */
537 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
538 if (env
->hflags
& HF_SOFTMMU_MASK
) {
539 env
->hflags
&= ~HF_SOFTMMU_MASK
;
540 /* do not allow linking to another block */
550 #if defined(TARGET_I386)
551 #if defined(USE_CODE_COPY)
552 if (env
->native_fp_regs
) {
553 save_native_fp_state(env
);
556 /* restore flags in standard format */
557 env
->eflags
= env
->eflags
| cc_table
[CC_OP
].compute_all() | (DF
& DF_MASK
);
559 /* restore global registers */
584 #elif defined(TARGET_ARM)
585 env
->cpsr
= compute_cpsr();
586 #elif defined(TARGET_SPARC)
587 #elif defined(TARGET_PPC)
589 #error unsupported target CPU
592 asm volatile ("mov %0, %%i7" : : "r" (saved_i7
));
601 /* must only be called from the generated code as an exception can be
603 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
605 target_ulong phys_addr
;
606 phys_addr
= get_phys_addr_code(env
, start
);
607 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ end
- start
, 0);
610 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
612 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
614 CPUX86State
*saved_env
;
618 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
620 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
621 (uint8_t *)(selector
<< 4), 0xffff, 0);
623 load_seg(seg_reg
, selector
);
628 void cpu_x86_fsave(CPUX86State
*s
, uint8_t *ptr
, int data32
)
630 CPUX86State
*saved_env
;
635 helper_fsave(ptr
, data32
);
640 void cpu_x86_frstor(CPUX86State
*s
, uint8_t *ptr
, int data32
)
642 CPUX86State
*saved_env
;
647 helper_frstor(ptr
, data32
);
652 #endif /* TARGET_I386 */
654 #if !defined(CONFIG_SOFTMMU)
656 #if defined(TARGET_I386)
658 /* 'pc' is the host PC at which the exception was raised. 'address' is
659 the effective address of the memory exception. 'is_write' is 1 if a
660 write caused the exception and otherwise 0'. 'old_set' is the
661 signal set which should be restored */
662 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
663 int is_write
, sigset_t
*old_set
,
666 TranslationBlock
*tb
;
670 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
671 #if defined(DEBUG_SIGNAL)
672 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
673 pc
, address
, is_write
, *(unsigned long *)old_set
);
675 /* XXX: locking issue */
676 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
680 /* see if it is an MMU fault */
681 ret
= cpu_x86_handle_mmu_fault(env
, address
, is_write
,
682 ((env
->hflags
& HF_CPL_MASK
) == 3), 0);
684 return 0; /* not an MMU fault */
686 return 1; /* the MMU fault was handled without causing real CPU fault */
687 /* now we have a real cpu fault */
690 /* the PC is inside the translated code. It means that we have
691 a virtual CPU fault */
692 cpu_restore_state(tb
, env
, pc
, puc
);
696 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
697 env
->eip
, env
->cr
[2], env
->error_code
);
699 /* we restore the process signal mask as the sigreturn should
700 do it (XXX: use sigsetjmp) */
701 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
702 raise_exception_err(EXCP0E_PAGE
, env
->error_code
);
704 /* activate soft MMU for this block */
705 env
->hflags
|= HF_SOFTMMU_MASK
;
706 cpu_resume_from_signal(env
, puc
);
708 /* never comes here */
712 #elif defined(TARGET_ARM)
713 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
714 int is_write
, sigset_t
*old_set
,
720 #elif defined(TARGET_SPARC)
721 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
722 int is_write
, sigset_t
*old_set
,
725 /* XXX: locking issue */
726 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
731 #elif defined (TARGET_PPC)
732 static inline int handle_cpu_signal(unsigned long pc
, unsigned long address
,
733 int is_write
, sigset_t
*old_set
,
736 TranslationBlock
*tb
;
741 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
743 #if defined(DEBUG_SIGNAL)
744 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
745 pc
, address
, is_write
, *(unsigned long *)old_set
);
747 /* XXX: locking issue */
748 if (is_write
&& page_unprotect(address
, pc
, puc
)) {
752 /* see if it is an MMU fault */
753 ret
= cpu_ppc_handle_mmu_fault(env
, address
, is_write
, msr_pr
, 0);
755 return 0; /* not an MMU fault */
757 return 1; /* the MMU fault was handled without causing real CPU fault */
759 /* now we have a real cpu fault */
762 /* the PC is inside the translated code. It means that we have
763 a virtual CPU fault */
764 cpu_restore_state(tb
, env
, pc
, puc
);
768 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
769 env
->nip
, env
->error_code
, tb
);
771 /* we restore the process signal mask as the sigreturn should
772 do it (XXX: use sigsetjmp) */
773 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
774 do_raise_exception_err(env
->exception_index
, env
->error_code
);
776 /* activate soft MMU for this block */
777 cpu_resume_from_signal(env
, puc
);
779 /* never comes here */
783 #error unsupported target CPU
786 #if defined(__i386__)
788 #if defined(USE_CODE_COPY)
789 static void cpu_send_trap(unsigned long pc
, int trap
,
792 TranslationBlock
*tb
;
795 env
= cpu_single_env
; /* XXX: find a correct solution for multithread */
796 /* now we have a real cpu fault */
799 /* the PC is inside the translated code. It means that we have
800 a virtual CPU fault */
801 cpu_restore_state(tb
, env
, pc
, uc
);
803 sigprocmask(SIG_SETMASK
, &uc
->uc_sigmask
, NULL
);
804 raise_exception_err(trap
, env
->error_code
);
808 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
811 struct ucontext
*uc
= puc
;
819 #define REG_TRAPNO TRAPNO
821 pc
= uc
->uc_mcontext
.gregs
[REG_EIP
];
822 trapno
= uc
->uc_mcontext
.gregs
[REG_TRAPNO
];
823 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
824 if (trapno
== 0x00 || trapno
== 0x05) {
825 /* send division by zero or bound exception */
826 cpu_send_trap(pc
, trapno
, uc
);
830 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
832 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
833 &uc
->uc_sigmask
, puc
);
836 #elif defined(__x86_64__)
838 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
841 struct ucontext
*uc
= puc
;
844 pc
= uc
->uc_mcontext
.gregs
[REG_RIP
];
845 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
846 uc
->uc_mcontext
.gregs
[REG_TRAPNO
] == 0xe ?
847 (uc
->uc_mcontext
.gregs
[REG_ERR
] >> 1) & 1 : 0,
848 &uc
->uc_sigmask
, puc
);
851 #elif defined(__powerpc)
853 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
856 struct ucontext
*uc
= puc
;
857 struct pt_regs
*regs
= uc
->uc_mcontext
.regs
;
865 if (regs
->dsisr
& 0x00800000)
868 if (regs
->trap
!= 0x400 && (regs
->dsisr
& 0x02000000))
871 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
872 is_write
, &uc
->uc_sigmask
, puc
);
875 #elif defined(__alpha__)
877 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
880 struct ucontext
*uc
= puc
;
881 uint32_t *pc
= uc
->uc_mcontext
.sc_pc
;
885 /* XXX: need kernel patch to get write flag faster */
886 switch (insn
>> 26) {
901 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
902 is_write
, &uc
->uc_sigmask
, puc
);
904 #elif defined(__sparc__)
906 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
909 uint32_t *regs
= (uint32_t *)(info
+ 1);
910 void *sigmask
= (regs
+ 20);
915 /* XXX: is there a standard glibc define ? */
917 /* XXX: need kernel patch to get write flag faster */
919 insn
= *(uint32_t *)pc
;
920 if ((insn
>> 30) == 3) {
921 switch((insn
>> 19) & 0x3f) {
933 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
934 is_write
, sigmask
, NULL
);
937 #elif defined(__arm__)
939 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
942 struct ucontext
*uc
= puc
;
946 pc
= uc
->uc_mcontext
.gregs
[R15
];
947 /* XXX: compute is_write */
949 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
954 #elif defined(__mc68000)
956 int cpu_signal_handler(int host_signum
, struct siginfo
*info
,
959 struct ucontext
*uc
= puc
;
963 pc
= uc
->uc_mcontext
.gregs
[16];
964 /* XXX: compute is_write */
966 return handle_cpu_signal(pc
, (unsigned long)info
->si_addr
,
968 &uc
->uc_sigmask
, puc
);
973 #error host CPU specific signal handler needed
977 #endif /* !defined(CONFIG_SOFTMMU) */