4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
31 #include "qemu/cache-utils.h"
34 #include "qemu/timer.h"
35 #include "qemu/envlist.h"
45 const char *cpu_model
;
46 unsigned long mmap_min_addr
;
47 #if defined(CONFIG_USE_GUEST_BASE)
48 unsigned long guest_base
;
50 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
52 * When running 32-on-64 we should make sure we can fit all of the possible
53 * guest address space into a contiguous chunk of virtual host memory.
55 * This way we will never overlap with our own libraries or binaries or stack
56 * or anything else that QEMU maps.
59 /* MIPS only supports 31 bits of virtual address space for user space */
60 unsigned long reserved_va
= 0x77000000;
62 unsigned long reserved_va
= 0xf7000000;
65 unsigned long reserved_va
;
69 static void usage(void);
71 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
72 const char *qemu_uname_release
= CONFIG_UNAME_RELEASE
;
74 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
75 we allocate a bigger stack. Need a better solution, for example
76 by remapping the process stack directly at the right place */
77 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
79 void gemu_log(const char *fmt
, ...)
84 vfprintf(stderr
, fmt
, ap
);
88 #if defined(TARGET_I386)
89 int cpu_get_pic_interrupt(CPUX86State
*env
)
95 /***********************************************************/
96 /* Helper routines for implementing atomic operations. */
98 /* To implement exclusive operations we force all cpus to syncronise.
99 We don't require a full sync, only that no cpus are executing guest code.
100 The alternative is to map target atomic ops onto host equivalents,
101 which requires quite a lot of per host/target work. */
102 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
104 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
105 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
106 static int pending_cpus
;
108 /* Make sure everything is in a consistent state for calling fork(). */
109 void fork_start(void)
111 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
112 pthread_mutex_lock(&exclusive_lock
);
116 void fork_end(int child
)
118 mmap_fork_end(child
);
120 CPUState
*cpu
, *next_cpu
;
121 /* Child processes created by fork() only have a single thread.
122 Discard information about the parent threads. */
123 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
124 if (cpu
!= thread_cpu
) {
125 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
129 pthread_mutex_init(&exclusive_lock
, NULL
);
130 pthread_mutex_init(&cpu_list_mutex
, NULL
);
131 pthread_cond_init(&exclusive_cond
, NULL
);
132 pthread_cond_init(&exclusive_resume
, NULL
);
133 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
134 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
136 pthread_mutex_unlock(&exclusive_lock
);
137 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
141 /* Wait for pending exclusive operations to complete. The exclusive lock
143 static inline void exclusive_idle(void)
145 while (pending_cpus
) {
146 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
150 /* Start an exclusive operation.
151 Must only be called from outside cpu_arm_exec. */
152 static inline void start_exclusive(void)
156 pthread_mutex_lock(&exclusive_lock
);
160 /* Make all other cpus stop executing. */
161 CPU_FOREACH(other_cpu
) {
162 if (other_cpu
->running
) {
167 if (pending_cpus
> 1) {
168 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
172 /* Finish an exclusive operation. */
173 static inline void end_exclusive(void)
176 pthread_cond_broadcast(&exclusive_resume
);
177 pthread_mutex_unlock(&exclusive_lock
);
180 /* Wait for exclusive ops to finish, and begin cpu execution. */
181 static inline void cpu_exec_start(CPUState
*cpu
)
183 pthread_mutex_lock(&exclusive_lock
);
186 pthread_mutex_unlock(&exclusive_lock
);
189 /* Mark cpu as not executing, and release pending exclusive ops. */
190 static inline void cpu_exec_end(CPUState
*cpu
)
192 pthread_mutex_lock(&exclusive_lock
);
193 cpu
->running
= false;
194 if (pending_cpus
> 1) {
196 if (pending_cpus
== 1) {
197 pthread_cond_signal(&exclusive_cond
);
201 pthread_mutex_unlock(&exclusive_lock
);
204 void cpu_list_lock(void)
206 pthread_mutex_lock(&cpu_list_mutex
);
209 void cpu_list_unlock(void)
211 pthread_mutex_unlock(&cpu_list_mutex
);
216 /***********************************************************/
217 /* CPUX86 core interface */
219 void cpu_smm_update(CPUX86State
*env
)
223 uint64_t cpu_get_tsc(CPUX86State
*env
)
225 return cpu_get_real_ticks();
228 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
233 e1
= (addr
<< 16) | (limit
& 0xffff);
234 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
241 static uint64_t *idt_table
;
243 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
244 uint64_t addr
, unsigned int sel
)
247 e1
= (addr
& 0xffff) | (sel
<< 16);
248 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
252 p
[2] = tswap32(addr
>> 32);
255 /* only dpl matters as we do only user space emulation */
256 static void set_idt(int n
, unsigned int dpl
)
258 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
261 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
262 uint32_t addr
, unsigned int sel
)
265 e1
= (addr
& 0xffff) | (sel
<< 16);
266 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
272 /* only dpl matters as we do only user space emulation */
273 static void set_idt(int n
, unsigned int dpl
)
275 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
279 void cpu_loop(CPUX86State
*env
)
281 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
284 target_siginfo_t info
;
287 trapnr
= cpu_x86_exec(env
);
290 /* linux syscall from int $0x80 */
291 env
->regs
[R_EAX
] = do_syscall(env
,
303 /* linux syscall from syscall instruction */
304 env
->regs
[R_EAX
] = do_syscall(env
,
313 env
->eip
= env
->exception_next_eip
;
318 info
.si_signo
= SIGBUS
;
320 info
.si_code
= TARGET_SI_KERNEL
;
321 info
._sifields
._sigfault
._addr
= 0;
322 queue_signal(env
, info
.si_signo
, &info
);
325 /* XXX: potential problem if ABI32 */
326 #ifndef TARGET_X86_64
327 if (env
->eflags
& VM_MASK
) {
328 handle_vm86_fault(env
);
332 info
.si_signo
= SIGSEGV
;
334 info
.si_code
= TARGET_SI_KERNEL
;
335 info
._sifields
._sigfault
._addr
= 0;
336 queue_signal(env
, info
.si_signo
, &info
);
340 info
.si_signo
= SIGSEGV
;
342 if (!(env
->error_code
& 1))
343 info
.si_code
= TARGET_SEGV_MAPERR
;
345 info
.si_code
= TARGET_SEGV_ACCERR
;
346 info
._sifields
._sigfault
._addr
= env
->cr
[2];
347 queue_signal(env
, info
.si_signo
, &info
);
350 #ifndef TARGET_X86_64
351 if (env
->eflags
& VM_MASK
) {
352 handle_vm86_trap(env
, trapnr
);
356 /* division by zero */
357 info
.si_signo
= SIGFPE
;
359 info
.si_code
= TARGET_FPE_INTDIV
;
360 info
._sifields
._sigfault
._addr
= env
->eip
;
361 queue_signal(env
, info
.si_signo
, &info
);
366 #ifndef TARGET_X86_64
367 if (env
->eflags
& VM_MASK
) {
368 handle_vm86_trap(env
, trapnr
);
372 info
.si_signo
= SIGTRAP
;
374 if (trapnr
== EXCP01_DB
) {
375 info
.si_code
= TARGET_TRAP_BRKPT
;
376 info
._sifields
._sigfault
._addr
= env
->eip
;
378 info
.si_code
= TARGET_SI_KERNEL
;
379 info
._sifields
._sigfault
._addr
= 0;
381 queue_signal(env
, info
.si_signo
, &info
);
386 #ifndef TARGET_X86_64
387 if (env
->eflags
& VM_MASK
) {
388 handle_vm86_trap(env
, trapnr
);
392 info
.si_signo
= SIGSEGV
;
394 info
.si_code
= TARGET_SI_KERNEL
;
395 info
._sifields
._sigfault
._addr
= 0;
396 queue_signal(env
, info
.si_signo
, &info
);
400 info
.si_signo
= SIGILL
;
402 info
.si_code
= TARGET_ILL_ILLOPN
;
403 info
._sifields
._sigfault
._addr
= env
->eip
;
404 queue_signal(env
, info
.si_signo
, &info
);
407 /* just indicate that signals should be handled asap */
413 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
418 info
.si_code
= TARGET_TRAP_BRKPT
;
419 queue_signal(env
, info
.si_signo
, &info
);
424 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
425 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
429 process_pending_signals(env
);
436 #define get_user_code_u32(x, gaddr, doswap) \
437 ({ abi_long __r = get_user_u32((x), (gaddr)); \
438 if (!__r && (doswap)) { \
444 #define get_user_code_u16(x, gaddr, doswap) \
445 ({ abi_long __r = get_user_u16((x), (gaddr)); \
446 if (!__r && (doswap)) { \
453 /* Commpage handling -- there is no commpage for AArch64 */
456 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
458 * r0 = pointer to oldval
459 * r1 = pointer to newval
460 * r2 = pointer to target value
463 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
464 * C set if *ptr was changed, clear if no exchange happened
466 * Note segv's in kernel helpers are a bit tricky, we can set the
467 * data address sensibly but the PC address is just the entry point.
469 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
471 uint64_t oldval
, newval
, val
;
473 target_siginfo_t info
;
475 /* Based on the 32 bit code in do_kernel_trap */
477 /* XXX: This only works between threads, not between processes.
478 It's probably possible to implement this with native host
479 operations. However things like ldrex/strex are much harder so
480 there's not much point trying. */
482 cpsr
= cpsr_read(env
);
485 if (get_user_u64(oldval
, env
->regs
[0])) {
486 env
->cp15
.c6_data
= env
->regs
[0];
490 if (get_user_u64(newval
, env
->regs
[1])) {
491 env
->cp15
.c6_data
= env
->regs
[1];
495 if (get_user_u64(val
, addr
)) {
496 env
->cp15
.c6_data
= addr
;
503 if (put_user_u64(val
, addr
)) {
504 env
->cp15
.c6_data
= addr
;
514 cpsr_write(env
, cpsr
, CPSR_C
);
520 /* We get the PC of the entry address - which is as good as anything,
521 on a real kernel what you get depends on which mode it uses. */
522 info
.si_signo
= SIGSEGV
;
524 /* XXX: check env->error_code */
525 info
.si_code
= TARGET_SEGV_MAPERR
;
526 info
._sifields
._sigfault
._addr
= env
->cp15
.c6_data
;
527 queue_signal(env
, info
.si_signo
, &info
);
532 /* Handle a jump to the kernel code page. */
534 do_kernel_trap(CPUARMState
*env
)
540 switch (env
->regs
[15]) {
541 case 0xffff0fa0: /* __kernel_memory_barrier */
542 /* ??? No-op. Will need to do better for SMP. */
544 case 0xffff0fc0: /* __kernel_cmpxchg */
545 /* XXX: This only works between threads, not between processes.
546 It's probably possible to implement this with native host
547 operations. However things like ldrex/strex are much harder so
548 there's not much point trying. */
550 cpsr
= cpsr_read(env
);
552 /* FIXME: This should SEGV if the access fails. */
553 if (get_user_u32(val
, addr
))
555 if (val
== env
->regs
[0]) {
557 /* FIXME: Check for segfaults. */
558 put_user_u32(val
, addr
);
565 cpsr_write(env
, cpsr
, CPSR_C
);
568 case 0xffff0fe0: /* __kernel_get_tls */
569 env
->regs
[0] = env
->cp15
.c13_tls2
;
571 case 0xffff0f60: /* __kernel_cmpxchg64 */
572 arm_kernel_cmpxchg64_helper(env
);
578 /* Jump back to the caller. */
579 addr
= env
->regs
[14];
584 env
->regs
[15] = addr
;
590 static int do_strex(CPUARMState
*env
)
598 addr
= env
->exclusive_addr
;
599 if (addr
!= env
->exclusive_test
) {
602 size
= env
->exclusive_info
& 0xf;
605 segv
= get_user_u8(val
, addr
);
608 segv
= get_user_u16(val
, addr
);
612 segv
= get_user_u32(val
, addr
);
618 env
->cp15
.c6_data
= addr
;
621 if (val
!= env
->exclusive_val
) {
625 segv
= get_user_u32(val
, addr
+ 4);
627 env
->cp15
.c6_data
= addr
+ 4;
630 if (val
!= env
->exclusive_high
) {
634 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
637 segv
= put_user_u8(val
, addr
);
640 segv
= put_user_u16(val
, addr
);
644 segv
= put_user_u32(val
, addr
);
648 env
->cp15
.c6_data
= addr
;
652 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
653 segv
= put_user_u32(val
, addr
+ 4);
655 env
->cp15
.c6_data
= addr
+ 4;
662 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
669 void cpu_loop(CPUARMState
*env
)
671 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
673 unsigned int n
, insn
;
674 target_siginfo_t info
;
679 trapnr
= cpu_arm_exec(env
);
684 TaskState
*ts
= env
->opaque
;
688 /* we handle the FPU emulation here, as Linux */
689 /* we get the opcode */
690 /* FIXME - what to do if get_user() fails? */
691 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
693 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
694 if (rc
== 0) { /* illegal instruction */
695 info
.si_signo
= SIGILL
;
697 info
.si_code
= TARGET_ILL_ILLOPN
;
698 info
._sifields
._sigfault
._addr
= env
->regs
[15];
699 queue_signal(env
, info
.si_signo
, &info
);
700 } else if (rc
< 0) { /* FP exception */
703 /* translate softfloat flags to FPSR flags */
704 if (-rc
& float_flag_invalid
)
706 if (-rc
& float_flag_divbyzero
)
708 if (-rc
& float_flag_overflow
)
710 if (-rc
& float_flag_underflow
)
712 if (-rc
& float_flag_inexact
)
715 FPSR fpsr
= ts
->fpa
.fpsr
;
716 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
718 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
719 info
.si_signo
= SIGFPE
;
722 /* ordered by priority, least first */
723 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
724 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
725 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
726 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
727 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
729 info
._sifields
._sigfault
._addr
= env
->regs
[15];
730 queue_signal(env
, info
.si_signo
, &info
);
735 /* accumulate unenabled exceptions */
736 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
738 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
740 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
742 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
744 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
747 } else { /* everything OK */
758 if (trapnr
== EXCP_BKPT
) {
760 /* FIXME - what to do if get_user() fails? */
761 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
765 /* FIXME - what to do if get_user() fails? */
766 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
767 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
772 /* FIXME - what to do if get_user() fails? */
773 get_user_code_u16(insn
, env
->regs
[15] - 2,
777 /* FIXME - what to do if get_user() fails? */
778 get_user_code_u32(insn
, env
->regs
[15] - 4,
784 if (n
== ARM_NR_cacheflush
) {
786 } else if (n
== ARM_NR_semihosting
787 || n
== ARM_NR_thumb_semihosting
) {
788 env
->regs
[0] = do_arm_semihosting (env
);
789 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
791 if (env
->thumb
|| n
== 0) {
794 n
-= ARM_SYSCALL_BASE
;
797 if ( n
> ARM_NR_BASE
) {
799 case ARM_NR_cacheflush
:
803 cpu_set_tls(env
, env
->regs
[0]);
807 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
809 env
->regs
[0] = -TARGET_ENOSYS
;
813 env
->regs
[0] = do_syscall(env
,
829 /* just indicate that signals should be handled asap */
831 case EXCP_PREFETCH_ABORT
:
832 addr
= env
->cp15
.c6_insn
;
834 case EXCP_DATA_ABORT
:
835 addr
= env
->cp15
.c6_data
;
838 info
.si_signo
= SIGSEGV
;
840 /* XXX: check env->error_code */
841 info
.si_code
= TARGET_SEGV_MAPERR
;
842 info
._sifields
._sigfault
._addr
= addr
;
843 queue_signal(env
, info
.si_signo
, &info
);
850 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
855 info
.si_code
= TARGET_TRAP_BRKPT
;
856 queue_signal(env
, info
.si_signo
, &info
);
860 case EXCP_KERNEL_TRAP
:
861 if (do_kernel_trap(env
))
866 addr
= env
->cp15
.c6_data
;
872 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
874 cpu_dump_state(cs
, stderr
, fprintf
, 0);
877 process_pending_signals(env
);
883 /* AArch64 main loop */
884 void cpu_loop(CPUARMState
*env
)
886 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
888 target_siginfo_t info
;
893 trapnr
= cpu_arm_exec(env
);
898 env
->xregs
[0] = do_syscall(env
,
909 /* just indicate that signals should be handled asap */
912 info
.si_signo
= SIGILL
;
914 info
.si_code
= TARGET_ILL_ILLOPN
;
915 info
._sifields
._sigfault
._addr
= env
->pc
;
916 queue_signal(env
, info
.si_signo
, &info
);
918 case EXCP_PREFETCH_ABORT
:
919 addr
= env
->cp15
.c6_insn
;
921 case EXCP_DATA_ABORT
:
922 addr
= env
->cp15
.c6_data
;
924 info
.si_signo
= SIGSEGV
;
926 /* XXX: check env->error_code */
927 info
.si_code
= TARGET_SEGV_MAPERR
;
928 info
._sifields
._sigfault
._addr
= addr
;
929 queue_signal(env
, info
.si_signo
, &info
);
933 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
937 info
.si_code
= TARGET_TRAP_BRKPT
;
938 queue_signal(env
, info
.si_signo
, &info
);
943 addr
= env
->cp15
.c6_data
;
948 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
950 cpu_dump_state(cs
, stderr
, fprintf
, 0);
953 process_pending_signals(env
);
956 #endif /* ndef TARGET_ABI32 */
960 #ifdef TARGET_UNICORE32
962 void cpu_loop(CPUUniCore32State
*env
)
964 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
966 unsigned int n
, insn
;
967 target_siginfo_t info
;
971 trapnr
= uc32_cpu_exec(env
);
977 get_user_u32(insn
, env
->regs
[31] - 4);
980 if (n
>= UC32_SYSCALL_BASE
) {
982 n
-= UC32_SYSCALL_BASE
;
983 if (n
== UC32_SYSCALL_NR_set_tls
) {
984 cpu_set_tls(env
, env
->regs
[0]);
987 env
->regs
[0] = do_syscall(env
,
1002 case UC32_EXCP_DTRAP
:
1003 case UC32_EXCP_ITRAP
:
1004 info
.si_signo
= SIGSEGV
;
1006 /* XXX: check env->error_code */
1007 info
.si_code
= TARGET_SEGV_MAPERR
;
1008 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1009 queue_signal(env
, info
.si_signo
, &info
);
1011 case EXCP_INTERRUPT
:
1012 /* just indicate that signals should be handled asap */
1018 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1020 info
.si_signo
= sig
;
1022 info
.si_code
= TARGET_TRAP_BRKPT
;
1023 queue_signal(env
, info
.si_signo
, &info
);
1030 process_pending_signals(env
);
1034 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1035 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1041 #define SPARC64_STACK_BIAS 2047
1045 /* WARNING: dealing with register windows _is_ complicated. More info
1046 can be found at http://www.sics.se/~psm/sparcstack.html */
1047 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1049 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1050 /* wrap handling : if cwp is on the last window, then we use the
1051 registers 'after' the end */
1052 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1053 index
+= 16 * env
->nwindows
;
1057 /* save the register window 'cwp1' */
1058 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1063 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1064 #ifdef TARGET_SPARC64
1066 sp_ptr
+= SPARC64_STACK_BIAS
;
1068 #if defined(DEBUG_WIN)
1069 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1072 for(i
= 0; i
< 16; i
++) {
1073 /* FIXME - what to do if put_user() fails? */
1074 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1075 sp_ptr
+= sizeof(abi_ulong
);
1079 static void save_window(CPUSPARCState
*env
)
1081 #ifndef TARGET_SPARC64
1082 unsigned int new_wim
;
1083 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1084 ((1LL << env
->nwindows
) - 1);
1085 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1088 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1094 static void restore_window(CPUSPARCState
*env
)
1096 #ifndef TARGET_SPARC64
1097 unsigned int new_wim
;
1099 unsigned int i
, cwp1
;
1102 #ifndef TARGET_SPARC64
1103 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1104 ((1LL << env
->nwindows
) - 1);
1107 /* restore the invalid window */
1108 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1109 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1110 #ifdef TARGET_SPARC64
1112 sp_ptr
+= SPARC64_STACK_BIAS
;
1114 #if defined(DEBUG_WIN)
1115 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1118 for(i
= 0; i
< 16; i
++) {
1119 /* FIXME - what to do if get_user() fails? */
1120 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1121 sp_ptr
+= sizeof(abi_ulong
);
1123 #ifdef TARGET_SPARC64
1125 if (env
->cleanwin
< env
->nwindows
- 1)
1133 static void flush_windows(CPUSPARCState
*env
)
1139 /* if restore would invoke restore_window(), then we can stop */
1140 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1141 #ifndef TARGET_SPARC64
1142 if (env
->wim
& (1 << cwp1
))
1145 if (env
->canrestore
== 0)
1150 save_window_offset(env
, cwp1
);
1153 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1154 #ifndef TARGET_SPARC64
1155 /* set wim so that restore will reload the registers */
1156 env
->wim
= 1 << cwp1
;
1158 #if defined(DEBUG_WIN)
1159 printf("flush_windows: nb=%d\n", offset
- 1);
1163 void cpu_loop (CPUSPARCState
*env
)
1165 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1168 target_siginfo_t info
;
1171 trapnr
= cpu_sparc_exec (env
);
1173 /* Compute PSR before exposing state. */
1174 if (env
->cc_op
!= CC_OP_FLAGS
) {
1179 #ifndef TARGET_SPARC64
1186 ret
= do_syscall (env
, env
->gregs
[1],
1187 env
->regwptr
[0], env
->regwptr
[1],
1188 env
->regwptr
[2], env
->regwptr
[3],
1189 env
->regwptr
[4], env
->regwptr
[5],
1191 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1192 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1193 env
->xcc
|= PSR_CARRY
;
1195 env
->psr
|= PSR_CARRY
;
1199 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1200 env
->xcc
&= ~PSR_CARRY
;
1202 env
->psr
&= ~PSR_CARRY
;
1205 env
->regwptr
[0] = ret
;
1206 /* next instruction */
1208 env
->npc
= env
->npc
+ 4;
1210 case 0x83: /* flush windows */
1215 /* next instruction */
1217 env
->npc
= env
->npc
+ 4;
1219 #ifndef TARGET_SPARC64
1220 case TT_WIN_OVF
: /* window overflow */
1223 case TT_WIN_UNF
: /* window underflow */
1224 restore_window(env
);
1229 info
.si_signo
= TARGET_SIGSEGV
;
1231 /* XXX: check env->error_code */
1232 info
.si_code
= TARGET_SEGV_MAPERR
;
1233 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1234 queue_signal(env
, info
.si_signo
, &info
);
1238 case TT_SPILL
: /* window overflow */
1241 case TT_FILL
: /* window underflow */
1242 restore_window(env
);
1247 info
.si_signo
= TARGET_SIGSEGV
;
1249 /* XXX: check env->error_code */
1250 info
.si_code
= TARGET_SEGV_MAPERR
;
1251 if (trapnr
== TT_DFAULT
)
1252 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1254 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1255 queue_signal(env
, info
.si_signo
, &info
);
1258 #ifndef TARGET_ABI32
1261 sparc64_get_context(env
);
1265 sparc64_set_context(env
);
1269 case EXCP_INTERRUPT
:
1270 /* just indicate that signals should be handled asap */
1274 info
.si_signo
= TARGET_SIGILL
;
1276 info
.si_code
= TARGET_ILL_ILLOPC
;
1277 info
._sifields
._sigfault
._addr
= env
->pc
;
1278 queue_signal(env
, info
.si_signo
, &info
);
1285 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1288 info
.si_signo
= sig
;
1290 info
.si_code
= TARGET_TRAP_BRKPT
;
1291 queue_signal(env
, info
.si_signo
, &info
);
1296 printf ("Unhandled trap: 0x%x\n", trapnr
);
1297 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1300 process_pending_signals (env
);
1307 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1313 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1315 return cpu_ppc_get_tb(env
);
1318 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1320 return cpu_ppc_get_tb(env
) >> 32;
1323 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1325 return cpu_ppc_get_tb(env
);
1328 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1330 return cpu_ppc_get_tb(env
) >> 32;
1333 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1334 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1336 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1338 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1341 /* XXX: to be fixed */
1342 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1347 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1352 #define EXCP_DUMP(env, fmt, ...) \
1354 CPUState *cs = ENV_GET_CPU(env); \
1355 fprintf(stderr, fmt , ## __VA_ARGS__); \
1356 cpu_dump_state(cs, stderr, fprintf, 0); \
1357 qemu_log(fmt, ## __VA_ARGS__); \
1358 if (qemu_log_enabled()) { \
1359 log_cpu_state(cs, 0); \
1363 static int do_store_exclusive(CPUPPCState
*env
)
1366 target_ulong page_addr
;
1371 addr
= env
->reserve_ea
;
1372 page_addr
= addr
& TARGET_PAGE_MASK
;
1375 flags
= page_get_flags(page_addr
);
1376 if ((flags
& PAGE_READ
) == 0) {
1379 int reg
= env
->reserve_info
& 0x1f;
1380 int size
= (env
->reserve_info
>> 5) & 0xf;
1383 if (addr
== env
->reserve_addr
) {
1385 case 1: segv
= get_user_u8(val
, addr
); break;
1386 case 2: segv
= get_user_u16(val
, addr
); break;
1387 case 4: segv
= get_user_u32(val
, addr
); break;
1388 #if defined(TARGET_PPC64)
1389 case 8: segv
= get_user_u64(val
, addr
); break;
1393 if (!segv
&& val
== env
->reserve_val
) {
1394 val
= env
->gpr
[reg
];
1396 case 1: segv
= put_user_u8(val
, addr
); break;
1397 case 2: segv
= put_user_u16(val
, addr
); break;
1398 case 4: segv
= put_user_u32(val
, addr
); break;
1399 #if defined(TARGET_PPC64)
1400 case 8: segv
= put_user_u64(val
, addr
); break;
1409 env
->crf
[0] = (stored
<< 1) | xer_so
;
1410 env
->reserve_addr
= (target_ulong
)-1;
1420 void cpu_loop(CPUPPCState
*env
)
1422 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1423 target_siginfo_t info
;
1429 trapnr
= cpu_ppc_exec(env
);
1432 case POWERPC_EXCP_NONE
:
1435 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1436 cpu_abort(env
, "Critical interrupt while in user mode. "
1439 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1440 cpu_abort(env
, "Machine check exception while in user mode. "
1443 case POWERPC_EXCP_DSI
: /* Data storage exception */
1444 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1446 /* XXX: check this. Seems bugged */
1447 switch (env
->error_code
& 0xFF000000) {
1449 info
.si_signo
= TARGET_SIGSEGV
;
1451 info
.si_code
= TARGET_SEGV_MAPERR
;
1454 info
.si_signo
= TARGET_SIGILL
;
1456 info
.si_code
= TARGET_ILL_ILLADR
;
1459 info
.si_signo
= TARGET_SIGSEGV
;
1461 info
.si_code
= TARGET_SEGV_ACCERR
;
1464 /* Let's send a regular segfault... */
1465 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1467 info
.si_signo
= TARGET_SIGSEGV
;
1469 info
.si_code
= TARGET_SEGV_MAPERR
;
1472 info
._sifields
._sigfault
._addr
= env
->nip
;
1473 queue_signal(env
, info
.si_signo
, &info
);
1475 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1476 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1477 "\n", env
->spr
[SPR_SRR0
]);
1478 /* XXX: check this */
1479 switch (env
->error_code
& 0xFF000000) {
1481 info
.si_signo
= TARGET_SIGSEGV
;
1483 info
.si_code
= TARGET_SEGV_MAPERR
;
1487 info
.si_signo
= TARGET_SIGSEGV
;
1489 info
.si_code
= TARGET_SEGV_ACCERR
;
1492 /* Let's send a regular segfault... */
1493 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1495 info
.si_signo
= TARGET_SIGSEGV
;
1497 info
.si_code
= TARGET_SEGV_MAPERR
;
1500 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1501 queue_signal(env
, info
.si_signo
, &info
);
1503 case POWERPC_EXCP_EXTERNAL
: /* External input */
1504 cpu_abort(env
, "External interrupt while in user mode. "
1507 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1508 EXCP_DUMP(env
, "Unaligned memory access\n");
1509 /* XXX: check this */
1510 info
.si_signo
= TARGET_SIGBUS
;
1512 info
.si_code
= TARGET_BUS_ADRALN
;
1513 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1514 queue_signal(env
, info
.si_signo
, &info
);
1516 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1517 /* XXX: check this */
1518 switch (env
->error_code
& ~0xF) {
1519 case POWERPC_EXCP_FP
:
1520 EXCP_DUMP(env
, "Floating point program exception\n");
1521 info
.si_signo
= TARGET_SIGFPE
;
1523 switch (env
->error_code
& 0xF) {
1524 case POWERPC_EXCP_FP_OX
:
1525 info
.si_code
= TARGET_FPE_FLTOVF
;
1527 case POWERPC_EXCP_FP_UX
:
1528 info
.si_code
= TARGET_FPE_FLTUND
;
1530 case POWERPC_EXCP_FP_ZX
:
1531 case POWERPC_EXCP_FP_VXZDZ
:
1532 info
.si_code
= TARGET_FPE_FLTDIV
;
1534 case POWERPC_EXCP_FP_XX
:
1535 info
.si_code
= TARGET_FPE_FLTRES
;
1537 case POWERPC_EXCP_FP_VXSOFT
:
1538 info
.si_code
= TARGET_FPE_FLTINV
;
1540 case POWERPC_EXCP_FP_VXSNAN
:
1541 case POWERPC_EXCP_FP_VXISI
:
1542 case POWERPC_EXCP_FP_VXIDI
:
1543 case POWERPC_EXCP_FP_VXIMZ
:
1544 case POWERPC_EXCP_FP_VXVC
:
1545 case POWERPC_EXCP_FP_VXSQRT
:
1546 case POWERPC_EXCP_FP_VXCVI
:
1547 info
.si_code
= TARGET_FPE_FLTSUB
;
1550 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1555 case POWERPC_EXCP_INVAL
:
1556 EXCP_DUMP(env
, "Invalid instruction\n");
1557 info
.si_signo
= TARGET_SIGILL
;
1559 switch (env
->error_code
& 0xF) {
1560 case POWERPC_EXCP_INVAL_INVAL
:
1561 info
.si_code
= TARGET_ILL_ILLOPC
;
1563 case POWERPC_EXCP_INVAL_LSWX
:
1564 info
.si_code
= TARGET_ILL_ILLOPN
;
1566 case POWERPC_EXCP_INVAL_SPR
:
1567 info
.si_code
= TARGET_ILL_PRVREG
;
1569 case POWERPC_EXCP_INVAL_FP
:
1570 info
.si_code
= TARGET_ILL_COPROC
;
1573 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1574 env
->error_code
& 0xF);
1575 info
.si_code
= TARGET_ILL_ILLADR
;
1579 case POWERPC_EXCP_PRIV
:
1580 EXCP_DUMP(env
, "Privilege violation\n");
1581 info
.si_signo
= TARGET_SIGILL
;
1583 switch (env
->error_code
& 0xF) {
1584 case POWERPC_EXCP_PRIV_OPC
:
1585 info
.si_code
= TARGET_ILL_PRVOPC
;
1587 case POWERPC_EXCP_PRIV_REG
:
1588 info
.si_code
= TARGET_ILL_PRVREG
;
1591 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1592 env
->error_code
& 0xF);
1593 info
.si_code
= TARGET_ILL_PRVOPC
;
1597 case POWERPC_EXCP_TRAP
:
1598 cpu_abort(env
, "Tried to call a TRAP\n");
1601 /* Should not happen ! */
1602 cpu_abort(env
, "Unknown program exception (%02x)\n",
1606 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1607 queue_signal(env
, info
.si_signo
, &info
);
1609 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1610 EXCP_DUMP(env
, "No floating point allowed\n");
1611 info
.si_signo
= TARGET_SIGILL
;
1613 info
.si_code
= TARGET_ILL_COPROC
;
1614 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1615 queue_signal(env
, info
.si_signo
, &info
);
1617 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1618 cpu_abort(env
, "Syscall exception while in user mode. "
1621 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1622 EXCP_DUMP(env
, "No APU instruction allowed\n");
1623 info
.si_signo
= TARGET_SIGILL
;
1625 info
.si_code
= TARGET_ILL_COPROC
;
1626 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1627 queue_signal(env
, info
.si_signo
, &info
);
1629 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1630 cpu_abort(env
, "Decrementer interrupt while in user mode. "
1633 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1634 cpu_abort(env
, "Fix interval timer interrupt while in user mode. "
1637 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1638 cpu_abort(env
, "Watchdog timer interrupt while in user mode. "
1641 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1642 cpu_abort(env
, "Data TLB exception while in user mode. "
1645 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1646 cpu_abort(env
, "Instruction TLB exception while in user mode. "
1649 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1650 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1651 info
.si_signo
= TARGET_SIGILL
;
1653 info
.si_code
= TARGET_ILL_COPROC
;
1654 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1655 queue_signal(env
, info
.si_signo
, &info
);
1657 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1658 cpu_abort(env
, "Embedded floating-point data IRQ not handled\n");
1660 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1661 cpu_abort(env
, "Embedded floating-point round IRQ not handled\n");
1663 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1664 cpu_abort(env
, "Performance monitor exception not handled\n");
1666 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1667 cpu_abort(env
, "Doorbell interrupt while in user mode. "
1670 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1671 cpu_abort(env
, "Doorbell critical interrupt while in user mode. "
1674 case POWERPC_EXCP_RESET
: /* System reset exception */
1675 cpu_abort(env
, "Reset interrupt while in user mode. "
1678 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1679 cpu_abort(env
, "Data segment exception while in user mode. "
1682 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1683 cpu_abort(env
, "Instruction segment exception "
1684 "while in user mode. Aborting\n");
1686 /* PowerPC 64 with hypervisor mode support */
1687 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1688 cpu_abort(env
, "Hypervisor decrementer interrupt "
1689 "while in user mode. Aborting\n");
1691 case POWERPC_EXCP_TRACE
: /* Trace exception */
1693 * we use this exception to emulate step-by-step execution mode.
1696 /* PowerPC 64 with hypervisor mode support */
1697 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1698 cpu_abort(env
, "Hypervisor data storage exception "
1699 "while in user mode. Aborting\n");
1701 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1702 cpu_abort(env
, "Hypervisor instruction storage exception "
1703 "while in user mode. Aborting\n");
1705 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1706 cpu_abort(env
, "Hypervisor data segment exception "
1707 "while in user mode. Aborting\n");
1709 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1710 cpu_abort(env
, "Hypervisor instruction segment exception "
1711 "while in user mode. Aborting\n");
1713 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1714 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1715 info
.si_signo
= TARGET_SIGILL
;
1717 info
.si_code
= TARGET_ILL_COPROC
;
1718 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1719 queue_signal(env
, info
.si_signo
, &info
);
1721 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1722 cpu_abort(env
, "Programmable interval timer interrupt "
1723 "while in user mode. Aborting\n");
1725 case POWERPC_EXCP_IO
: /* IO error exception */
1726 cpu_abort(env
, "IO error exception while in user mode. "
1729 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1730 cpu_abort(env
, "Run mode exception while in user mode. "
1733 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1734 cpu_abort(env
, "Emulation trap exception not handled\n");
1736 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1737 cpu_abort(env
, "Instruction fetch TLB exception "
1738 "while in user-mode. Aborting");
1740 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1741 cpu_abort(env
, "Data load TLB exception while in user-mode. "
1744 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1745 cpu_abort(env
, "Data store TLB exception while in user-mode. "
1748 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1749 cpu_abort(env
, "Floating-point assist exception not handled\n");
1751 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1752 cpu_abort(env
, "Instruction address breakpoint exception "
1755 case POWERPC_EXCP_SMI
: /* System management interrupt */
1756 cpu_abort(env
, "System management interrupt while in user mode. "
1759 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1760 cpu_abort(env
, "Thermal interrupt interrupt while in user mode. "
1763 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1764 cpu_abort(env
, "Performance monitor exception not handled\n");
1766 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1767 cpu_abort(env
, "Vector assist exception not handled\n");
1769 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1770 cpu_abort(env
, "Soft patch exception not handled\n");
1772 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1773 cpu_abort(env
, "Maintenance exception while in user mode. "
1776 case POWERPC_EXCP_STOP
: /* stop translation */
1777 /* We did invalidate the instruction cache. Go on */
1779 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1780 /* We just stopped because of a branch. Go on */
1782 case POWERPC_EXCP_SYSCALL_USER
:
1783 /* system call in user-mode emulation */
1785 * PPC ABI uses overflow flag in cr0 to signal an error
1788 env
->crf
[0] &= ~0x1;
1789 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1790 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1792 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1793 /* Returning from a successful sigreturn syscall.
1794 Avoid corrupting register state. */
1797 if (ret
> (target_ulong
)(-515)) {
1803 case POWERPC_EXCP_STCX
:
1804 if (do_store_exclusive(env
)) {
1805 info
.si_signo
= TARGET_SIGSEGV
;
1807 info
.si_code
= TARGET_SEGV_MAPERR
;
1808 info
._sifields
._sigfault
._addr
= env
->nip
;
1809 queue_signal(env
, info
.si_signo
, &info
);
1816 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1818 info
.si_signo
= sig
;
1820 info
.si_code
= TARGET_TRAP_BRKPT
;
1821 queue_signal(env
, info
.si_signo
, &info
);
1825 case EXCP_INTERRUPT
:
1826 /* just indicate that signals should be handled asap */
1829 cpu_abort(env
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1832 process_pending_signals(env
);
1839 # ifdef TARGET_ABI_MIPSO32
1840 # define MIPS_SYS(name, args) args,
1841 static const uint8_t mips_syscall_args
[] = {
1842 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1843 MIPS_SYS(sys_exit
, 1)
1844 MIPS_SYS(sys_fork
, 0)
1845 MIPS_SYS(sys_read
, 3)
1846 MIPS_SYS(sys_write
, 3)
1847 MIPS_SYS(sys_open
, 3) /* 4005 */
1848 MIPS_SYS(sys_close
, 1)
1849 MIPS_SYS(sys_waitpid
, 3)
1850 MIPS_SYS(sys_creat
, 2)
1851 MIPS_SYS(sys_link
, 2)
1852 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1853 MIPS_SYS(sys_execve
, 0)
1854 MIPS_SYS(sys_chdir
, 1)
1855 MIPS_SYS(sys_time
, 1)
1856 MIPS_SYS(sys_mknod
, 3)
1857 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1858 MIPS_SYS(sys_lchown
, 3)
1859 MIPS_SYS(sys_ni_syscall
, 0)
1860 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
1861 MIPS_SYS(sys_lseek
, 3)
1862 MIPS_SYS(sys_getpid
, 0) /* 4020 */
1863 MIPS_SYS(sys_mount
, 5)
1864 MIPS_SYS(sys_oldumount
, 1)
1865 MIPS_SYS(sys_setuid
, 1)
1866 MIPS_SYS(sys_getuid
, 0)
1867 MIPS_SYS(sys_stime
, 1) /* 4025 */
1868 MIPS_SYS(sys_ptrace
, 4)
1869 MIPS_SYS(sys_alarm
, 1)
1870 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
1871 MIPS_SYS(sys_pause
, 0)
1872 MIPS_SYS(sys_utime
, 2) /* 4030 */
1873 MIPS_SYS(sys_ni_syscall
, 0)
1874 MIPS_SYS(sys_ni_syscall
, 0)
1875 MIPS_SYS(sys_access
, 2)
1876 MIPS_SYS(sys_nice
, 1)
1877 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
1878 MIPS_SYS(sys_sync
, 0)
1879 MIPS_SYS(sys_kill
, 2)
1880 MIPS_SYS(sys_rename
, 2)
1881 MIPS_SYS(sys_mkdir
, 2)
1882 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
1883 MIPS_SYS(sys_dup
, 1)
1884 MIPS_SYS(sys_pipe
, 0)
1885 MIPS_SYS(sys_times
, 1)
1886 MIPS_SYS(sys_ni_syscall
, 0)
1887 MIPS_SYS(sys_brk
, 1) /* 4045 */
1888 MIPS_SYS(sys_setgid
, 1)
1889 MIPS_SYS(sys_getgid
, 0)
1890 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
1891 MIPS_SYS(sys_geteuid
, 0)
1892 MIPS_SYS(sys_getegid
, 0) /* 4050 */
1893 MIPS_SYS(sys_acct
, 0)
1894 MIPS_SYS(sys_umount
, 2)
1895 MIPS_SYS(sys_ni_syscall
, 0)
1896 MIPS_SYS(sys_ioctl
, 3)
1897 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
1898 MIPS_SYS(sys_ni_syscall
, 2)
1899 MIPS_SYS(sys_setpgid
, 2)
1900 MIPS_SYS(sys_ni_syscall
, 0)
1901 MIPS_SYS(sys_olduname
, 1)
1902 MIPS_SYS(sys_umask
, 1) /* 4060 */
1903 MIPS_SYS(sys_chroot
, 1)
1904 MIPS_SYS(sys_ustat
, 2)
1905 MIPS_SYS(sys_dup2
, 2)
1906 MIPS_SYS(sys_getppid
, 0)
1907 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
1908 MIPS_SYS(sys_setsid
, 0)
1909 MIPS_SYS(sys_sigaction
, 3)
1910 MIPS_SYS(sys_sgetmask
, 0)
1911 MIPS_SYS(sys_ssetmask
, 1)
1912 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
1913 MIPS_SYS(sys_setregid
, 2)
1914 MIPS_SYS(sys_sigsuspend
, 0)
1915 MIPS_SYS(sys_sigpending
, 1)
1916 MIPS_SYS(sys_sethostname
, 2)
1917 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
1918 MIPS_SYS(sys_getrlimit
, 2)
1919 MIPS_SYS(sys_getrusage
, 2)
1920 MIPS_SYS(sys_gettimeofday
, 2)
1921 MIPS_SYS(sys_settimeofday
, 2)
1922 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
1923 MIPS_SYS(sys_setgroups
, 2)
1924 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
1925 MIPS_SYS(sys_symlink
, 2)
1926 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
1927 MIPS_SYS(sys_readlink
, 3) /* 4085 */
1928 MIPS_SYS(sys_uselib
, 1)
1929 MIPS_SYS(sys_swapon
, 2)
1930 MIPS_SYS(sys_reboot
, 3)
1931 MIPS_SYS(old_readdir
, 3)
1932 MIPS_SYS(old_mmap
, 6) /* 4090 */
1933 MIPS_SYS(sys_munmap
, 2)
1934 MIPS_SYS(sys_truncate
, 2)
1935 MIPS_SYS(sys_ftruncate
, 2)
1936 MIPS_SYS(sys_fchmod
, 2)
1937 MIPS_SYS(sys_fchown
, 3) /* 4095 */
1938 MIPS_SYS(sys_getpriority
, 2)
1939 MIPS_SYS(sys_setpriority
, 3)
1940 MIPS_SYS(sys_ni_syscall
, 0)
1941 MIPS_SYS(sys_statfs
, 2)
1942 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
1943 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
1944 MIPS_SYS(sys_socketcall
, 2)
1945 MIPS_SYS(sys_syslog
, 3)
1946 MIPS_SYS(sys_setitimer
, 3)
1947 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
1948 MIPS_SYS(sys_newstat
, 2)
1949 MIPS_SYS(sys_newlstat
, 2)
1950 MIPS_SYS(sys_newfstat
, 2)
1951 MIPS_SYS(sys_uname
, 1)
1952 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
1953 MIPS_SYS(sys_vhangup
, 0)
1954 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
1955 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
1956 MIPS_SYS(sys_wait4
, 4)
1957 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
1958 MIPS_SYS(sys_sysinfo
, 1)
1959 MIPS_SYS(sys_ipc
, 6)
1960 MIPS_SYS(sys_fsync
, 1)
1961 MIPS_SYS(sys_sigreturn
, 0)
1962 MIPS_SYS(sys_clone
, 6) /* 4120 */
1963 MIPS_SYS(sys_setdomainname
, 2)
1964 MIPS_SYS(sys_newuname
, 1)
1965 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
1966 MIPS_SYS(sys_adjtimex
, 1)
1967 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
1968 MIPS_SYS(sys_sigprocmask
, 3)
1969 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
1970 MIPS_SYS(sys_init_module
, 5)
1971 MIPS_SYS(sys_delete_module
, 1)
1972 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
1973 MIPS_SYS(sys_quotactl
, 0)
1974 MIPS_SYS(sys_getpgid
, 1)
1975 MIPS_SYS(sys_fchdir
, 1)
1976 MIPS_SYS(sys_bdflush
, 2)
1977 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
1978 MIPS_SYS(sys_personality
, 1)
1979 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
1980 MIPS_SYS(sys_setfsuid
, 1)
1981 MIPS_SYS(sys_setfsgid
, 1)
1982 MIPS_SYS(sys_llseek
, 5) /* 4140 */
1983 MIPS_SYS(sys_getdents
, 3)
1984 MIPS_SYS(sys_select
, 5)
1985 MIPS_SYS(sys_flock
, 2)
1986 MIPS_SYS(sys_msync
, 3)
1987 MIPS_SYS(sys_readv
, 3) /* 4145 */
1988 MIPS_SYS(sys_writev
, 3)
1989 MIPS_SYS(sys_cacheflush
, 3)
1990 MIPS_SYS(sys_cachectl
, 3)
1991 MIPS_SYS(sys_sysmips
, 4)
1992 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
1993 MIPS_SYS(sys_getsid
, 1)
1994 MIPS_SYS(sys_fdatasync
, 0)
1995 MIPS_SYS(sys_sysctl
, 1)
1996 MIPS_SYS(sys_mlock
, 2)
1997 MIPS_SYS(sys_munlock
, 2) /* 4155 */
1998 MIPS_SYS(sys_mlockall
, 1)
1999 MIPS_SYS(sys_munlockall
, 0)
2000 MIPS_SYS(sys_sched_setparam
, 2)
2001 MIPS_SYS(sys_sched_getparam
, 2)
2002 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2003 MIPS_SYS(sys_sched_getscheduler
, 1)
2004 MIPS_SYS(sys_sched_yield
, 0)
2005 MIPS_SYS(sys_sched_get_priority_max
, 1)
2006 MIPS_SYS(sys_sched_get_priority_min
, 1)
2007 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2008 MIPS_SYS(sys_nanosleep
, 2)
2009 MIPS_SYS(sys_mremap
, 5)
2010 MIPS_SYS(sys_accept
, 3)
2011 MIPS_SYS(sys_bind
, 3)
2012 MIPS_SYS(sys_connect
, 3) /* 4170 */
2013 MIPS_SYS(sys_getpeername
, 3)
2014 MIPS_SYS(sys_getsockname
, 3)
2015 MIPS_SYS(sys_getsockopt
, 5)
2016 MIPS_SYS(sys_listen
, 2)
2017 MIPS_SYS(sys_recv
, 4) /* 4175 */
2018 MIPS_SYS(sys_recvfrom
, 6)
2019 MIPS_SYS(sys_recvmsg
, 3)
2020 MIPS_SYS(sys_send
, 4)
2021 MIPS_SYS(sys_sendmsg
, 3)
2022 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2023 MIPS_SYS(sys_setsockopt
, 5)
2024 MIPS_SYS(sys_shutdown
, 2)
2025 MIPS_SYS(sys_socket
, 3)
2026 MIPS_SYS(sys_socketpair
, 4)
2027 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2028 MIPS_SYS(sys_getresuid
, 3)
2029 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2030 MIPS_SYS(sys_poll
, 3)
2031 MIPS_SYS(sys_nfsservctl
, 3)
2032 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2033 MIPS_SYS(sys_getresgid
, 3)
2034 MIPS_SYS(sys_prctl
, 5)
2035 MIPS_SYS(sys_rt_sigreturn
, 0)
2036 MIPS_SYS(sys_rt_sigaction
, 4)
2037 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2038 MIPS_SYS(sys_rt_sigpending
, 2)
2039 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2040 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2041 MIPS_SYS(sys_rt_sigsuspend
, 0)
2042 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2043 MIPS_SYS(sys_pwrite64
, 6)
2044 MIPS_SYS(sys_chown
, 3)
2045 MIPS_SYS(sys_getcwd
, 2)
2046 MIPS_SYS(sys_capget
, 2)
2047 MIPS_SYS(sys_capset
, 2) /* 4205 */
2048 MIPS_SYS(sys_sigaltstack
, 2)
2049 MIPS_SYS(sys_sendfile
, 4)
2050 MIPS_SYS(sys_ni_syscall
, 0)
2051 MIPS_SYS(sys_ni_syscall
, 0)
2052 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2053 MIPS_SYS(sys_truncate64
, 4)
2054 MIPS_SYS(sys_ftruncate64
, 4)
2055 MIPS_SYS(sys_stat64
, 2)
2056 MIPS_SYS(sys_lstat64
, 2)
2057 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2058 MIPS_SYS(sys_pivot_root
, 2)
2059 MIPS_SYS(sys_mincore
, 3)
2060 MIPS_SYS(sys_madvise
, 3)
2061 MIPS_SYS(sys_getdents64
, 3)
2062 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2063 MIPS_SYS(sys_ni_syscall
, 0)
2064 MIPS_SYS(sys_gettid
, 0)
2065 MIPS_SYS(sys_readahead
, 5)
2066 MIPS_SYS(sys_setxattr
, 5)
2067 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2068 MIPS_SYS(sys_fsetxattr
, 5)
2069 MIPS_SYS(sys_getxattr
, 4)
2070 MIPS_SYS(sys_lgetxattr
, 4)
2071 MIPS_SYS(sys_fgetxattr
, 4)
2072 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2073 MIPS_SYS(sys_llistxattr
, 3)
2074 MIPS_SYS(sys_flistxattr
, 3)
2075 MIPS_SYS(sys_removexattr
, 2)
2076 MIPS_SYS(sys_lremovexattr
, 2)
2077 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2078 MIPS_SYS(sys_tkill
, 2)
2079 MIPS_SYS(sys_sendfile64
, 5)
2080 MIPS_SYS(sys_futex
, 6)
2081 MIPS_SYS(sys_sched_setaffinity
, 3)
2082 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2083 MIPS_SYS(sys_io_setup
, 2)
2084 MIPS_SYS(sys_io_destroy
, 1)
2085 MIPS_SYS(sys_io_getevents
, 5)
2086 MIPS_SYS(sys_io_submit
, 3)
2087 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2088 MIPS_SYS(sys_exit_group
, 1)
2089 MIPS_SYS(sys_lookup_dcookie
, 3)
2090 MIPS_SYS(sys_epoll_create
, 1)
2091 MIPS_SYS(sys_epoll_ctl
, 4)
2092 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2093 MIPS_SYS(sys_remap_file_pages
, 5)
2094 MIPS_SYS(sys_set_tid_address
, 1)
2095 MIPS_SYS(sys_restart_syscall
, 0)
2096 MIPS_SYS(sys_fadvise64_64
, 7)
2097 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2098 MIPS_SYS(sys_fstatfs64
, 2)
2099 MIPS_SYS(sys_timer_create
, 3)
2100 MIPS_SYS(sys_timer_settime
, 4)
2101 MIPS_SYS(sys_timer_gettime
, 2)
2102 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2103 MIPS_SYS(sys_timer_delete
, 1)
2104 MIPS_SYS(sys_clock_settime
, 2)
2105 MIPS_SYS(sys_clock_gettime
, 2)
2106 MIPS_SYS(sys_clock_getres
, 2)
2107 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2108 MIPS_SYS(sys_tgkill
, 3)
2109 MIPS_SYS(sys_utimes
, 2)
2110 MIPS_SYS(sys_mbind
, 4)
2111 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2112 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2113 MIPS_SYS(sys_mq_open
, 4)
2114 MIPS_SYS(sys_mq_unlink
, 1)
2115 MIPS_SYS(sys_mq_timedsend
, 5)
2116 MIPS_SYS(sys_mq_timedreceive
, 5)
2117 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2118 MIPS_SYS(sys_mq_getsetattr
, 3)
2119 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2120 MIPS_SYS(sys_waitid
, 4)
2121 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2122 MIPS_SYS(sys_add_key
, 5)
2123 MIPS_SYS(sys_request_key
, 4)
2124 MIPS_SYS(sys_keyctl
, 5)
2125 MIPS_SYS(sys_set_thread_area
, 1)
2126 MIPS_SYS(sys_inotify_init
, 0)
2127 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2128 MIPS_SYS(sys_inotify_rm_watch
, 2)
2129 MIPS_SYS(sys_migrate_pages
, 4)
2130 MIPS_SYS(sys_openat
, 4)
2131 MIPS_SYS(sys_mkdirat
, 3)
2132 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2133 MIPS_SYS(sys_fchownat
, 5)
2134 MIPS_SYS(sys_futimesat
, 3)
2135 MIPS_SYS(sys_fstatat64
, 4)
2136 MIPS_SYS(sys_unlinkat
, 3)
2137 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2138 MIPS_SYS(sys_linkat
, 5)
2139 MIPS_SYS(sys_symlinkat
, 3)
2140 MIPS_SYS(sys_readlinkat
, 4)
2141 MIPS_SYS(sys_fchmodat
, 3)
2142 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2143 MIPS_SYS(sys_pselect6
, 6)
2144 MIPS_SYS(sys_ppoll
, 5)
2145 MIPS_SYS(sys_unshare
, 1)
2146 MIPS_SYS(sys_splice
, 6)
2147 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2148 MIPS_SYS(sys_tee
, 4)
2149 MIPS_SYS(sys_vmsplice
, 4)
2150 MIPS_SYS(sys_move_pages
, 6)
2151 MIPS_SYS(sys_set_robust_list
, 2)
2152 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2153 MIPS_SYS(sys_kexec_load
, 4)
2154 MIPS_SYS(sys_getcpu
, 3)
2155 MIPS_SYS(sys_epoll_pwait
, 6)
2156 MIPS_SYS(sys_ioprio_set
, 3)
2157 MIPS_SYS(sys_ioprio_get
, 2)
2158 MIPS_SYS(sys_utimensat
, 4)
2159 MIPS_SYS(sys_signalfd
, 3)
2160 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2161 MIPS_SYS(sys_eventfd
, 1)
2162 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2163 MIPS_SYS(sys_timerfd_create
, 2)
2164 MIPS_SYS(sys_timerfd_gettime
, 2)
2165 MIPS_SYS(sys_timerfd_settime
, 4)
2166 MIPS_SYS(sys_signalfd4
, 4)
2167 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2168 MIPS_SYS(sys_epoll_create1
, 1)
2169 MIPS_SYS(sys_dup3
, 3)
2170 MIPS_SYS(sys_pipe2
, 2)
2171 MIPS_SYS(sys_inotify_init1
, 1)
2172 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2173 MIPS_SYS(sys_pwritev
, 6)
2174 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2175 MIPS_SYS(sys_perf_event_open
, 5)
2176 MIPS_SYS(sys_accept4
, 4)
2177 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2178 MIPS_SYS(sys_fanotify_init
, 2)
2179 MIPS_SYS(sys_fanotify_mark
, 6)
2180 MIPS_SYS(sys_prlimit64
, 4)
2181 MIPS_SYS(sys_name_to_handle_at
, 5)
2182 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2183 MIPS_SYS(sys_clock_adjtime
, 2)
2184 MIPS_SYS(sys_syncfs
, 1)
2189 static int do_store_exclusive(CPUMIPSState
*env
)
2192 target_ulong page_addr
;
2200 page_addr
= addr
& TARGET_PAGE_MASK
;
2203 flags
= page_get_flags(page_addr
);
2204 if ((flags
& PAGE_READ
) == 0) {
2207 reg
= env
->llreg
& 0x1f;
2208 d
= (env
->llreg
& 0x20) != 0;
2210 segv
= get_user_s64(val
, addr
);
2212 segv
= get_user_s32(val
, addr
);
2215 if (val
!= env
->llval
) {
2216 env
->active_tc
.gpr
[reg
] = 0;
2219 segv
= put_user_u64(env
->llnewval
, addr
);
2221 segv
= put_user_u32(env
->llnewval
, addr
);
2224 env
->active_tc
.gpr
[reg
] = 1;
2231 env
->active_tc
.PC
+= 4;
2244 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2252 info
->si_signo
= TARGET_SIGFPE
;
2254 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2255 queue_signal(env
, info
->si_signo
, &*info
);
2265 void cpu_loop(CPUMIPSState
*env
)
2267 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2268 target_siginfo_t info
;
2271 # ifdef TARGET_ABI_MIPSO32
2272 unsigned int syscall_num
;
2277 trapnr
= cpu_mips_exec(env
);
2281 env
->active_tc
.PC
+= 4;
2282 # ifdef TARGET_ABI_MIPSO32
2283 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2284 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2285 ret
= -TARGET_ENOSYS
;
2289 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2291 nb_args
= mips_syscall_args
[syscall_num
];
2292 sp_reg
= env
->active_tc
.gpr
[29];
2294 /* these arguments are taken from the stack */
2296 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2300 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2304 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2308 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2314 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2315 env
->active_tc
.gpr
[4],
2316 env
->active_tc
.gpr
[5],
2317 env
->active_tc
.gpr
[6],
2318 env
->active_tc
.gpr
[7],
2319 arg5
, arg6
, arg7
, arg8
);
2323 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2324 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2325 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2326 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2327 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2329 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2330 /* Returning from a successful sigreturn syscall.
2331 Avoid clobbering register state. */
2334 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2335 env
->active_tc
.gpr
[7] = 1; /* error flag */
2338 env
->active_tc
.gpr
[7] = 0; /* error flag */
2340 env
->active_tc
.gpr
[2] = ret
;
2346 info
.si_signo
= TARGET_SIGSEGV
;
2348 /* XXX: check env->error_code */
2349 info
.si_code
= TARGET_SEGV_MAPERR
;
2350 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2351 queue_signal(env
, info
.si_signo
, &info
);
2355 info
.si_signo
= TARGET_SIGILL
;
2358 queue_signal(env
, info
.si_signo
, &info
);
2360 case EXCP_INTERRUPT
:
2361 /* just indicate that signals should be handled asap */
2367 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2370 info
.si_signo
= sig
;
2372 info
.si_code
= TARGET_TRAP_BRKPT
;
2373 queue_signal(env
, info
.si_signo
, &info
);
2378 if (do_store_exclusive(env
)) {
2379 info
.si_signo
= TARGET_SIGSEGV
;
2381 info
.si_code
= TARGET_SEGV_MAPERR
;
2382 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2383 queue_signal(env
, info
.si_signo
, &info
);
2387 info
.si_signo
= TARGET_SIGILL
;
2389 info
.si_code
= TARGET_ILL_ILLOPC
;
2390 queue_signal(env
, info
.si_signo
, &info
);
2392 /* The code below was inspired by the MIPS Linux kernel trap
2393 * handling code in arch/mips/kernel/traps.c.
2397 abi_ulong trap_instr
;
2400 if (env
->hflags
& MIPS_HFLAG_M16
) {
2401 if (env
->insn_flags
& ASE_MICROMIPS
) {
2402 /* microMIPS mode */
2405 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2406 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2408 trap_instr
= (instr
[0] << 16) | instr
[1];
2411 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2415 code
= (trap_instr
>> 6) & 0x3f;
2416 if (do_break(env
, &info
, code
) != 0) {
2422 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2429 /* As described in the original Linux kernel code, the
2430 * below checks on 'code' are to work around an old
2433 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2434 if (code
>= (1 << 10)) {
2438 if (do_break(env
, &info
, code
) != 0) {
2445 abi_ulong trap_instr
;
2446 unsigned int code
= 0;
2448 if (env
->hflags
& MIPS_HFLAG_M16
) {
2449 /* microMIPS mode */
2452 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2453 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2455 trap_instr
= (instr
[0] << 16) | instr
[1];
2457 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2464 /* The immediate versions don't provide a code. */
2465 if (!(trap_instr
& 0xFC000000)) {
2466 if (env
->hflags
& MIPS_HFLAG_M16
) {
2467 /* microMIPS mode */
2468 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2470 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2474 if (do_break(env
, &info
, code
) != 0) {
2481 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2483 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2486 process_pending_signals(env
);
2491 #ifdef TARGET_OPENRISC
2493 void cpu_loop(CPUOpenRISCState
*env
)
2495 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2499 trapnr
= cpu_exec(env
);
2504 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2508 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2513 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2514 gdbsig
= TARGET_SIGSEGV
;
2517 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2520 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2524 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2528 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2532 qemu_log("\nTLB miss\n");
2535 qemu_log("\nRange\n");
2539 env
->pc
+= 4; /* 0xc00; */
2540 env
->gpr
[11] = do_syscall(env
,
2541 env
->gpr
[11], /* return value */
2542 env
->gpr
[3], /* r3 - r7 are params */
2550 qemu_log("\nFloating point error\n");
2553 qemu_log("\nTrap\n");
2560 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2562 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2563 gdbsig
= TARGET_SIGILL
;
2567 gdb_handlesig(cs
, gdbsig
);
2568 if (gdbsig
!= TARGET_SIGTRAP
) {
2573 process_pending_signals(env
);
2577 #endif /* TARGET_OPENRISC */
2580 void cpu_loop(CPUSH4State
*env
)
2582 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2584 target_siginfo_t info
;
2587 trapnr
= cpu_sh4_exec (env
);
2592 ret
= do_syscall(env
,
2601 env
->gregs
[0] = ret
;
2603 case EXCP_INTERRUPT
:
2604 /* just indicate that signals should be handled asap */
2610 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2613 info
.si_signo
= sig
;
2615 info
.si_code
= TARGET_TRAP_BRKPT
;
2616 queue_signal(env
, info
.si_signo
, &info
);
2622 info
.si_signo
= SIGSEGV
;
2624 info
.si_code
= TARGET_SEGV_MAPERR
;
2625 info
._sifields
._sigfault
._addr
= env
->tea
;
2626 queue_signal(env
, info
.si_signo
, &info
);
2630 printf ("Unhandled trap: 0x%x\n", trapnr
);
2631 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2634 process_pending_signals (env
);
2640 void cpu_loop(CPUCRISState
*env
)
2642 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2644 target_siginfo_t info
;
2647 trapnr
= cpu_cris_exec (env
);
2651 info
.si_signo
= SIGSEGV
;
2653 /* XXX: check env->error_code */
2654 info
.si_code
= TARGET_SEGV_MAPERR
;
2655 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2656 queue_signal(env
, info
.si_signo
, &info
);
2659 case EXCP_INTERRUPT
:
2660 /* just indicate that signals should be handled asap */
2663 ret
= do_syscall(env
,
2672 env
->regs
[10] = ret
;
2678 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2681 info
.si_signo
= sig
;
2683 info
.si_code
= TARGET_TRAP_BRKPT
;
2684 queue_signal(env
, info
.si_signo
, &info
);
2689 printf ("Unhandled trap: 0x%x\n", trapnr
);
2690 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2693 process_pending_signals (env
);
2698 #ifdef TARGET_MICROBLAZE
2699 void cpu_loop(CPUMBState
*env
)
2701 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2703 target_siginfo_t info
;
2706 trapnr
= cpu_mb_exec (env
);
2710 info
.si_signo
= SIGSEGV
;
2712 /* XXX: check env->error_code */
2713 info
.si_code
= TARGET_SEGV_MAPERR
;
2714 info
._sifields
._sigfault
._addr
= 0;
2715 queue_signal(env
, info
.si_signo
, &info
);
2718 case EXCP_INTERRUPT
:
2719 /* just indicate that signals should be handled asap */
2722 /* Return address is 4 bytes after the call. */
2724 env
->sregs
[SR_PC
] = env
->regs
[14];
2725 ret
= do_syscall(env
,
2737 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2738 if (env
->iflags
& D_FLAG
) {
2739 env
->sregs
[SR_ESR
] |= 1 << 12;
2740 env
->sregs
[SR_PC
] -= 4;
2741 /* FIXME: if branch was immed, replay the imm as well. */
2744 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2746 switch (env
->sregs
[SR_ESR
] & 31) {
2747 case ESR_EC_DIVZERO
:
2748 info
.si_signo
= SIGFPE
;
2750 info
.si_code
= TARGET_FPE_FLTDIV
;
2751 info
._sifields
._sigfault
._addr
= 0;
2752 queue_signal(env
, info
.si_signo
, &info
);
2755 info
.si_signo
= SIGFPE
;
2757 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2758 info
.si_code
= TARGET_FPE_FLTINV
;
2760 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2761 info
.si_code
= TARGET_FPE_FLTDIV
;
2763 info
._sifields
._sigfault
._addr
= 0;
2764 queue_signal(env
, info
.si_signo
, &info
);
2767 printf ("Unhandled hw-exception: 0x%x\n",
2768 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2769 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2778 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2781 info
.si_signo
= sig
;
2783 info
.si_code
= TARGET_TRAP_BRKPT
;
2784 queue_signal(env
, info
.si_signo
, &info
);
2789 printf ("Unhandled trap: 0x%x\n", trapnr
);
2790 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2793 process_pending_signals (env
);
2800 void cpu_loop(CPUM68KState
*env
)
2802 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2805 target_siginfo_t info
;
2806 TaskState
*ts
= env
->opaque
;
2809 trapnr
= cpu_m68k_exec(env
);
2813 if (ts
->sim_syscalls
) {
2815 nr
= lduw(env
->pc
+ 2);
2817 do_m68k_simcall(env
, nr
);
2823 case EXCP_HALT_INSN
:
2824 /* Semihosing syscall. */
2826 do_m68k_semihosting(env
, env
->dregs
[0]);
2830 case EXCP_UNSUPPORTED
:
2832 info
.si_signo
= SIGILL
;
2834 info
.si_code
= TARGET_ILL_ILLOPN
;
2835 info
._sifields
._sigfault
._addr
= env
->pc
;
2836 queue_signal(env
, info
.si_signo
, &info
);
2840 ts
->sim_syscalls
= 0;
2843 env
->dregs
[0] = do_syscall(env
,
2854 case EXCP_INTERRUPT
:
2855 /* just indicate that signals should be handled asap */
2859 info
.si_signo
= SIGSEGV
;
2861 /* XXX: check env->error_code */
2862 info
.si_code
= TARGET_SEGV_MAPERR
;
2863 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
2864 queue_signal(env
, info
.si_signo
, &info
);
2871 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2874 info
.si_signo
= sig
;
2876 info
.si_code
= TARGET_TRAP_BRKPT
;
2877 queue_signal(env
, info
.si_signo
, &info
);
2882 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2884 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2887 process_pending_signals(env
);
2890 #endif /* TARGET_M68K */
2893 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
2895 target_ulong addr
, val
, tmp
;
2896 target_siginfo_t info
;
2899 addr
= env
->lock_addr
;
2900 tmp
= env
->lock_st_addr
;
2901 env
->lock_addr
= -1;
2902 env
->lock_st_addr
= 0;
2908 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
2912 if (val
== env
->lock_value
) {
2914 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
2931 info
.si_signo
= TARGET_SIGSEGV
;
2933 info
.si_code
= TARGET_SEGV_MAPERR
;
2934 info
._sifields
._sigfault
._addr
= addr
;
2935 queue_signal(env
, TARGET_SIGSEGV
, &info
);
2938 void cpu_loop(CPUAlphaState
*env
)
2940 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
2942 target_siginfo_t info
;
2946 trapnr
= cpu_alpha_exec (env
);
2948 /* All of the traps imply a transition through PALcode, which
2949 implies an REI instruction has been executed. Which means
2950 that the intr_flag should be cleared. */
2955 fprintf(stderr
, "Reset requested. Exit\n");
2959 fprintf(stderr
, "Machine check exception. Exit\n");
2962 case EXCP_SMP_INTERRUPT
:
2963 case EXCP_CLK_INTERRUPT
:
2964 case EXCP_DEV_INTERRUPT
:
2965 fprintf(stderr
, "External interrupt. Exit\n");
2969 env
->lock_addr
= -1;
2970 info
.si_signo
= TARGET_SIGSEGV
;
2972 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
2973 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
2974 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
2975 queue_signal(env
, info
.si_signo
, &info
);
2978 env
->lock_addr
= -1;
2979 info
.si_signo
= TARGET_SIGBUS
;
2981 info
.si_code
= TARGET_BUS_ADRALN
;
2982 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
2983 queue_signal(env
, info
.si_signo
, &info
);
2987 env
->lock_addr
= -1;
2988 info
.si_signo
= TARGET_SIGILL
;
2990 info
.si_code
= TARGET_ILL_ILLOPC
;
2991 info
._sifields
._sigfault
._addr
= env
->pc
;
2992 queue_signal(env
, info
.si_signo
, &info
);
2995 env
->lock_addr
= -1;
2996 info
.si_signo
= TARGET_SIGFPE
;
2998 info
.si_code
= TARGET_FPE_FLTINV
;
2999 info
._sifields
._sigfault
._addr
= env
->pc
;
3000 queue_signal(env
, info
.si_signo
, &info
);
3003 /* No-op. Linux simply re-enables the FPU. */
3006 env
->lock_addr
= -1;
3007 switch (env
->error_code
) {
3010 info
.si_signo
= TARGET_SIGTRAP
;
3012 info
.si_code
= TARGET_TRAP_BRKPT
;
3013 info
._sifields
._sigfault
._addr
= env
->pc
;
3014 queue_signal(env
, info
.si_signo
, &info
);
3018 info
.si_signo
= TARGET_SIGTRAP
;
3021 info
._sifields
._sigfault
._addr
= env
->pc
;
3022 queue_signal(env
, info
.si_signo
, &info
);
3026 trapnr
= env
->ir
[IR_V0
];
3027 sysret
= do_syscall(env
, trapnr
,
3028 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3029 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3030 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3032 if (trapnr
== TARGET_NR_sigreturn
3033 || trapnr
== TARGET_NR_rt_sigreturn
) {
3036 /* Syscall writes 0 to V0 to bypass error check, similar
3037 to how this is handled internal to Linux kernel.
3038 (Ab)use trapnr temporarily as boolean indicating error. */
3039 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3040 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3041 env
->ir
[IR_A3
] = trapnr
;
3045 /* ??? We can probably elide the code using page_unprotect
3046 that is checking for self-modifying code. Instead we
3047 could simply call tb_flush here. Until we work out the
3048 changes required to turn off the extra write protection,
3049 this can be a no-op. */
3053 /* Handled in the translator for usermode. */
3057 /* Handled in the translator for usermode. */
3061 info
.si_signo
= TARGET_SIGFPE
;
3062 switch (env
->ir
[IR_A0
]) {
3063 case TARGET_GEN_INTOVF
:
3064 info
.si_code
= TARGET_FPE_INTOVF
;
3066 case TARGET_GEN_INTDIV
:
3067 info
.si_code
= TARGET_FPE_INTDIV
;
3069 case TARGET_GEN_FLTOVF
:
3070 info
.si_code
= TARGET_FPE_FLTOVF
;
3072 case TARGET_GEN_FLTUND
:
3073 info
.si_code
= TARGET_FPE_FLTUND
;
3075 case TARGET_GEN_FLTINV
:
3076 info
.si_code
= TARGET_FPE_FLTINV
;
3078 case TARGET_GEN_FLTINE
:
3079 info
.si_code
= TARGET_FPE_FLTRES
;
3081 case TARGET_GEN_ROPRAND
:
3085 info
.si_signo
= TARGET_SIGTRAP
;
3090 info
._sifields
._sigfault
._addr
= env
->pc
;
3091 queue_signal(env
, info
.si_signo
, &info
);
3098 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3099 if (info
.si_signo
) {
3100 env
->lock_addr
= -1;
3102 info
.si_code
= TARGET_TRAP_BRKPT
;
3103 queue_signal(env
, info
.si_signo
, &info
);
3108 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3110 case EXCP_INTERRUPT
:
3111 /* Just indicate that signals should be handled asap. */
3114 printf ("Unhandled trap: 0x%x\n", trapnr
);
3115 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3118 process_pending_signals (env
);
3121 #endif /* TARGET_ALPHA */
3124 void cpu_loop(CPUS390XState
*env
)
3126 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3128 target_siginfo_t info
;
3132 trapnr
= cpu_s390x_exec(env
);
3134 case EXCP_INTERRUPT
:
3135 /* Just indicate that signals should be handled asap. */
3139 n
= env
->int_svc_code
;
3141 /* syscalls > 255 */
3144 env
->psw
.addr
+= env
->int_svc_ilen
;
3145 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3146 env
->regs
[4], env
->regs
[5],
3147 env
->regs
[6], env
->regs
[7], 0, 0);
3151 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3153 n
= TARGET_TRAP_BRKPT
;
3158 n
= env
->int_pgm_code
;
3161 case PGM_PRIVILEGED
:
3163 n
= TARGET_ILL_ILLOPC
;
3165 case PGM_PROTECTION
:
3166 case PGM_ADDRESSING
:
3168 /* XXX: check env->error_code */
3169 n
= TARGET_SEGV_MAPERR
;
3170 addr
= env
->__excp_addr
;
3173 case PGM_SPECIFICATION
:
3174 case PGM_SPECIAL_OP
:
3178 n
= TARGET_ILL_ILLOPN
;
3181 case PGM_FIXPT_OVERFLOW
:
3183 n
= TARGET_FPE_INTOVF
;
3185 case PGM_FIXPT_DIVIDE
:
3187 n
= TARGET_FPE_INTDIV
;
3191 n
= (env
->fpc
>> 8) & 0xff;
3193 /* compare-and-trap */
3196 /* An IEEE exception, simulated or otherwise. */
3198 n
= TARGET_FPE_FLTINV
;
3199 } else if (n
& 0x40) {
3200 n
= TARGET_FPE_FLTDIV
;
3201 } else if (n
& 0x20) {
3202 n
= TARGET_FPE_FLTOVF
;
3203 } else if (n
& 0x10) {
3204 n
= TARGET_FPE_FLTUND
;
3205 } else if (n
& 0x08) {
3206 n
= TARGET_FPE_FLTRES
;
3208 /* ??? Quantum exception; BFP, DFP error. */
3216 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3217 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3223 addr
= env
->psw
.addr
;
3225 info
.si_signo
= sig
;
3228 info
._sifields
._sigfault
._addr
= addr
;
3229 queue_signal(env
, info
.si_signo
, &info
);
3233 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3234 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3237 process_pending_signals (env
);
3241 #endif /* TARGET_S390X */
3243 THREAD CPUState
*thread_cpu
;
3245 void task_settid(TaskState
*ts
)
3247 if (ts
->ts_tid
== 0) {
3248 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3252 void stop_all_tasks(void)
3255 * We trust that when using NPTL, start_exclusive()
3256 * handles thread stopping correctly.
3261 /* Assumes contents are already zeroed. */
3262 void init_task_state(TaskState
*ts
)
3267 ts
->first_free
= ts
->sigqueue_table
;
3268 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3269 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3271 ts
->sigqueue_table
[i
].next
= NULL
;
3274 static void handle_arg_help(const char *arg
)
3279 static void handle_arg_log(const char *arg
)
3283 mask
= qemu_str_to_log_mask(arg
);
3285 qemu_print_log_usage(stdout
);
3291 static void handle_arg_log_filename(const char *arg
)
3293 qemu_set_log_filename(arg
);
3296 static void handle_arg_set_env(const char *arg
)
3298 char *r
, *p
, *token
;
3299 r
= p
= strdup(arg
);
3300 while ((token
= strsep(&p
, ",")) != NULL
) {
3301 if (envlist_setenv(envlist
, token
) != 0) {
3308 static void handle_arg_unset_env(const char *arg
)
3310 char *r
, *p
, *token
;
3311 r
= p
= strdup(arg
);
3312 while ((token
= strsep(&p
, ",")) != NULL
) {
3313 if (envlist_unsetenv(envlist
, token
) != 0) {
3320 static void handle_arg_argv0(const char *arg
)
3322 argv0
= strdup(arg
);
3325 static void handle_arg_stack_size(const char *arg
)
3328 guest_stack_size
= strtoul(arg
, &p
, 0);
3329 if (guest_stack_size
== 0) {
3334 guest_stack_size
*= 1024 * 1024;
3335 } else if (*p
== 'k' || *p
== 'K') {
3336 guest_stack_size
*= 1024;
3340 static void handle_arg_ld_prefix(const char *arg
)
3342 interp_prefix
= strdup(arg
);
3345 static void handle_arg_pagesize(const char *arg
)
3347 qemu_host_page_size
= atoi(arg
);
3348 if (qemu_host_page_size
== 0 ||
3349 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3350 fprintf(stderr
, "page size must be a power of two\n");
3355 static void handle_arg_gdb(const char *arg
)
3357 gdbstub_port
= atoi(arg
);
3360 static void handle_arg_uname(const char *arg
)
3362 qemu_uname_release
= strdup(arg
);
3365 static void handle_arg_cpu(const char *arg
)
3367 cpu_model
= strdup(arg
);
3368 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3369 /* XXX: implement xxx_cpu_list for targets that still miss it */
3370 #if defined(cpu_list)
3371 cpu_list(stdout
, &fprintf
);
3377 #if defined(CONFIG_USE_GUEST_BASE)
3378 static void handle_arg_guest_base(const char *arg
)
3380 guest_base
= strtol(arg
, NULL
, 0);
3381 have_guest_base
= 1;
3384 static void handle_arg_reserved_va(const char *arg
)
3388 reserved_va
= strtoul(arg
, &p
, 0);
3402 unsigned long unshifted
= reserved_va
;
3404 reserved_va
<<= shift
;
3405 if (((reserved_va
>> shift
) != unshifted
)
3406 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3407 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3410 fprintf(stderr
, "Reserved virtual address too big\n");
3415 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3421 static void handle_arg_singlestep(const char *arg
)
3426 static void handle_arg_strace(const char *arg
)
3431 static void handle_arg_version(const char *arg
)
3433 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3434 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3438 struct qemu_argument
{
3442 void (*handle_opt
)(const char *arg
);
3443 const char *example
;
3447 static const struct qemu_argument arg_table
[] = {
3448 {"h", "", false, handle_arg_help
,
3449 "", "print this help"},
3450 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3451 "port", "wait gdb connection to 'port'"},
3452 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3453 "path", "set the elf interpreter prefix to 'path'"},
3454 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3455 "size", "set the stack size to 'size' bytes"},
3456 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3457 "model", "select CPU (-cpu help for list)"},
3458 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3459 "var=value", "sets targets environment variable (see below)"},
3460 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3461 "var", "unsets targets environment variable (see below)"},
3462 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3463 "argv0", "forces target process argv[0] to be 'argv0'"},
3464 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3465 "uname", "set qemu uname release string to 'uname'"},
3466 #if defined(CONFIG_USE_GUEST_BASE)
3467 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3468 "address", "set guest_base address to 'address'"},
3469 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3470 "size", "reserve 'size' bytes for guest virtual address space"},
3472 {"d", "QEMU_LOG", true, handle_arg_log
,
3473 "item[,...]", "enable logging of specified items "
3474 "(use '-d help' for a list of items)"},
3475 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3476 "logfile", "write logs to 'logfile' (default stderr)"},
3477 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3478 "pagesize", "set the host page size to 'pagesize'"},
3479 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3480 "", "run in singlestep mode"},
3481 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3482 "", "log system calls"},
3483 {"version", "QEMU_VERSION", false, handle_arg_version
,
3484 "", "display version information and exit"},
3485 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3488 static void usage(void)
3490 const struct qemu_argument
*arginfo
;
3494 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3495 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3497 "Options and associated environment variables:\n"
3500 /* Calculate column widths. We must always have at least enough space
3501 * for the column header.
3503 maxarglen
= strlen("Argument");
3504 maxenvlen
= strlen("Env-variable");
3506 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3507 int arglen
= strlen(arginfo
->argv
);
3508 if (arginfo
->has_arg
) {
3509 arglen
+= strlen(arginfo
->example
) + 1;
3511 if (strlen(arginfo
->env
) > maxenvlen
) {
3512 maxenvlen
= strlen(arginfo
->env
);
3514 if (arglen
> maxarglen
) {
3519 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3520 maxenvlen
, "Env-variable");
3522 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3523 if (arginfo
->has_arg
) {
3524 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3525 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3526 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3528 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3529 maxenvlen
, arginfo
->env
,
3536 "QEMU_LD_PREFIX = %s\n"
3537 "QEMU_STACK_SIZE = %ld byte\n",
3542 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3543 "QEMU_UNSET_ENV environment variables to set and unset\n"
3544 "environment variables for the target process.\n"
3545 "It is possible to provide several variables by separating them\n"
3546 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3547 "provide the -E and -U options multiple times.\n"
3548 "The following lines are equivalent:\n"
3549 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3550 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3551 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3552 "Note that if you provide several changes to a single variable\n"
3553 "the last change will stay in effect.\n");
3558 static int parse_args(int argc
, char **argv
)
3562 const struct qemu_argument
*arginfo
;
3564 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3565 if (arginfo
->env
== NULL
) {
3569 r
= getenv(arginfo
->env
);
3571 arginfo
->handle_opt(r
);
3577 if (optind
>= argc
) {
3586 if (!strcmp(r
, "-")) {
3590 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3591 if (!strcmp(r
, arginfo
->argv
)) {
3592 if (arginfo
->has_arg
) {
3593 if (optind
>= argc
) {
3596 arginfo
->handle_opt(argv
[optind
]);
3599 arginfo
->handle_opt(NULL
);
3605 /* no option matched the current argv */
3606 if (arginfo
->handle_opt
== NULL
) {
3611 if (optind
>= argc
) {
3615 filename
= argv
[optind
];
3616 exec_path
= argv
[optind
];
3621 int main(int argc
, char **argv
, char **envp
)
3623 struct target_pt_regs regs1
, *regs
= ®s1
;
3624 struct image_info info1
, *info
= &info1
;
3625 struct linux_binprm bprm
;
3630 char **target_environ
, **wrk
;
3636 module_call_init(MODULE_INIT_QOM
);
3638 qemu_cache_utils_init(envp
);
3640 if ((envlist
= envlist_create()) == NULL
) {
3641 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3645 /* add current environment into the list */
3646 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3647 (void) envlist_setenv(envlist
, *wrk
);
3650 /* Read the stack limit from the kernel. If it's "unlimited",
3651 then we can do little else besides use the default. */
3654 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3655 && lim
.rlim_cur
!= RLIM_INFINITY
3656 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3657 guest_stack_size
= lim
.rlim_cur
;
3662 #if defined(cpudef_setup)
3663 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3666 optind
= parse_args(argc
, argv
);
3669 memset(regs
, 0, sizeof(struct target_pt_regs
));
3671 /* Zero out image_info */
3672 memset(info
, 0, sizeof(struct image_info
));
3674 memset(&bprm
, 0, sizeof (bprm
));
3676 /* Scan interp_prefix dir for replacement files. */
3677 init_paths(interp_prefix
);
3679 init_qemu_uname_release();
3681 if (cpu_model
== NULL
) {
3682 #if defined(TARGET_I386)
3683 #ifdef TARGET_X86_64
3684 cpu_model
= "qemu64";
3686 cpu_model
= "qemu32";
3688 #elif defined(TARGET_ARM)
3690 #elif defined(TARGET_UNICORE32)
3692 #elif defined(TARGET_M68K)
3694 #elif defined(TARGET_SPARC)
3695 #ifdef TARGET_SPARC64
3696 cpu_model
= "TI UltraSparc II";
3698 cpu_model
= "Fujitsu MB86904";
3700 #elif defined(TARGET_MIPS)
3701 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3706 #elif defined TARGET_OPENRISC
3707 cpu_model
= "or1200";
3708 #elif defined(TARGET_PPC)
3710 cpu_model
= "970fx";
3719 cpu_exec_init_all();
3720 /* NOTE: we need to init the CPU at this stage to get
3721 qemu_host_page_size */
3722 env
= cpu_init(cpu_model
);
3724 fprintf(stderr
, "Unable to find CPU definition\n");
3727 cpu
= ENV_GET_CPU(env
);
3732 if (getenv("QEMU_STRACE")) {
3736 target_environ
= envlist_to_environ(envlist
, NULL
);
3737 envlist_free(envlist
);
3739 #if defined(CONFIG_USE_GUEST_BASE)
3741 * Now that page sizes are configured in cpu_init() we can do
3742 * proper page alignment for guest_base.
3744 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3746 if (reserved_va
|| have_guest_base
) {
3747 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3749 if (guest_base
== (unsigned long)-1) {
3750 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3751 "space for use as guest address space (check your virtual "
3752 "memory ulimit setting or reserve less using -R option)\n",
3758 mmap_next_start
= reserved_va
;
3761 #endif /* CONFIG_USE_GUEST_BASE */
3764 * Read in mmap_min_addr kernel parameter. This value is used
3765 * When loading the ELF image to determine whether guest_base
3766 * is needed. It is also used in mmap_find_vma.
3771 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3773 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3774 mmap_min_addr
= tmp
;
3775 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
3782 * Prepare copy of argv vector for target.
3784 target_argc
= argc
- optind
;
3785 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
3786 if (target_argv
== NULL
) {
3787 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
3792 * If argv0 is specified (using '-0' switch) we replace
3793 * argv[0] pointer with the given one.
3796 if (argv0
!= NULL
) {
3797 target_argv
[i
++] = strdup(argv0
);
3799 for (; i
< target_argc
; i
++) {
3800 target_argv
[i
] = strdup(argv
[optind
+ i
]);
3802 target_argv
[target_argc
] = NULL
;
3804 ts
= g_malloc0 (sizeof(TaskState
));
3805 init_task_state(ts
);
3806 /* build Task State */
3812 ret
= loader_exec(filename
, target_argv
, target_environ
, regs
,
3815 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
3819 for (wrk
= target_environ
; *wrk
; wrk
++) {
3823 free(target_environ
);
3825 if (qemu_log_enabled()) {
3826 #if defined(CONFIG_USE_GUEST_BASE)
3827 qemu_log("guest_base 0x%lx\n", guest_base
);
3831 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
3832 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
3833 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
3835 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
3837 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
3838 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
3840 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
3841 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
3844 target_set_brk(info
->brk
);
3848 #if defined(CONFIG_USE_GUEST_BASE)
3849 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
3850 generating the prologue until now so that the prologue can take
3851 the real value of GUEST_BASE into account. */
3852 tcg_prologue_init(&tcg_ctx
);
3855 #if defined(TARGET_I386)
3856 cpu_x86_set_cpl(env
, 3);
3858 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
3859 env
->hflags
|= HF_PE_MASK
;
3860 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
3861 env
->cr
[4] |= CR4_OSFXSR_MASK
;
3862 env
->hflags
|= HF_OSFXSR_MASK
;
3864 #ifndef TARGET_ABI32
3865 /* enable 64 bit mode if possible */
3866 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
3867 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
3870 env
->cr
[4] |= CR4_PAE_MASK
;
3871 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
3872 env
->hflags
|= HF_LMA_MASK
;
3875 /* flags setup : we activate the IRQs by default as in user mode */
3876 env
->eflags
|= IF_MASK
;
3878 /* linux register setup */
3879 #ifndef TARGET_ABI32
3880 env
->regs
[R_EAX
] = regs
->rax
;
3881 env
->regs
[R_EBX
] = regs
->rbx
;
3882 env
->regs
[R_ECX
] = regs
->rcx
;
3883 env
->regs
[R_EDX
] = regs
->rdx
;
3884 env
->regs
[R_ESI
] = regs
->rsi
;
3885 env
->regs
[R_EDI
] = regs
->rdi
;
3886 env
->regs
[R_EBP
] = regs
->rbp
;
3887 env
->regs
[R_ESP
] = regs
->rsp
;
3888 env
->eip
= regs
->rip
;
3890 env
->regs
[R_EAX
] = regs
->eax
;
3891 env
->regs
[R_EBX
] = regs
->ebx
;
3892 env
->regs
[R_ECX
] = regs
->ecx
;
3893 env
->regs
[R_EDX
] = regs
->edx
;
3894 env
->regs
[R_ESI
] = regs
->esi
;
3895 env
->regs
[R_EDI
] = regs
->edi
;
3896 env
->regs
[R_EBP
] = regs
->ebp
;
3897 env
->regs
[R_ESP
] = regs
->esp
;
3898 env
->eip
= regs
->eip
;
3901 /* linux interrupt setup */
3902 #ifndef TARGET_ABI32
3903 env
->idt
.limit
= 511;
3905 env
->idt
.limit
= 255;
3907 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
3908 PROT_READ
|PROT_WRITE
,
3909 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3910 idt_table
= g2h(env
->idt
.base
);
3933 /* linux segment setup */
3935 uint64_t *gdt_table
;
3936 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
3937 PROT_READ
|PROT_WRITE
,
3938 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3939 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
3940 gdt_table
= g2h(env
->gdt
.base
);
3942 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
3943 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3944 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
3946 /* 64 bit code segment */
3947 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
3948 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3950 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
3952 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
3953 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3954 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
3956 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
3957 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
3959 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
3960 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
3961 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
3962 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
3963 /* This hack makes Wine work... */
3964 env
->segs
[R_FS
].selector
= 0;
3966 cpu_x86_load_seg(env
, R_DS
, 0);
3967 cpu_x86_load_seg(env
, R_ES
, 0);
3968 cpu_x86_load_seg(env
, R_FS
, 0);
3969 cpu_x86_load_seg(env
, R_GS
, 0);
3971 #elif defined(TARGET_AARCH64)
3975 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
3977 "The selected ARM CPU does not support 64 bit mode\n");
3981 for (i
= 0; i
< 31; i
++) {
3982 env
->xregs
[i
] = regs
->regs
[i
];
3985 env
->xregs
[31] = regs
->sp
;
3987 #elif defined(TARGET_ARM)
3990 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
3991 for(i
= 0; i
< 16; i
++) {
3992 env
->regs
[i
] = regs
->uregs
[i
];
3995 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
3996 && (info
->elf_flags
& EF_ARM_BE8
)) {
3997 env
->bswap_code
= 1;
4000 #elif defined(TARGET_UNICORE32)
4003 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4004 for (i
= 0; i
< 32; i
++) {
4005 env
->regs
[i
] = regs
->uregs
[i
];
4008 #elif defined(TARGET_SPARC)
4012 env
->npc
= regs
->npc
;
4014 for(i
= 0; i
< 8; i
++)
4015 env
->gregs
[i
] = regs
->u_regs
[i
];
4016 for(i
= 0; i
< 8; i
++)
4017 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4019 #elif defined(TARGET_PPC)
4023 #if defined(TARGET_PPC64)
4024 #if defined(TARGET_ABI32)
4025 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4027 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4030 env
->nip
= regs
->nip
;
4031 for(i
= 0; i
< 32; i
++) {
4032 env
->gpr
[i
] = regs
->gpr
[i
];
4035 #elif defined(TARGET_M68K)
4038 env
->dregs
[0] = regs
->d0
;
4039 env
->dregs
[1] = regs
->d1
;
4040 env
->dregs
[2] = regs
->d2
;
4041 env
->dregs
[3] = regs
->d3
;
4042 env
->dregs
[4] = regs
->d4
;
4043 env
->dregs
[5] = regs
->d5
;
4044 env
->dregs
[6] = regs
->d6
;
4045 env
->dregs
[7] = regs
->d7
;
4046 env
->aregs
[0] = regs
->a0
;
4047 env
->aregs
[1] = regs
->a1
;
4048 env
->aregs
[2] = regs
->a2
;
4049 env
->aregs
[3] = regs
->a3
;
4050 env
->aregs
[4] = regs
->a4
;
4051 env
->aregs
[5] = regs
->a5
;
4052 env
->aregs
[6] = regs
->a6
;
4053 env
->aregs
[7] = regs
->usp
;
4055 ts
->sim_syscalls
= 1;
4057 #elif defined(TARGET_MICROBLAZE)
4059 env
->regs
[0] = regs
->r0
;
4060 env
->regs
[1] = regs
->r1
;
4061 env
->regs
[2] = regs
->r2
;
4062 env
->regs
[3] = regs
->r3
;
4063 env
->regs
[4] = regs
->r4
;
4064 env
->regs
[5] = regs
->r5
;
4065 env
->regs
[6] = regs
->r6
;
4066 env
->regs
[7] = regs
->r7
;
4067 env
->regs
[8] = regs
->r8
;
4068 env
->regs
[9] = regs
->r9
;
4069 env
->regs
[10] = regs
->r10
;
4070 env
->regs
[11] = regs
->r11
;
4071 env
->regs
[12] = regs
->r12
;
4072 env
->regs
[13] = regs
->r13
;
4073 env
->regs
[14] = regs
->r14
;
4074 env
->regs
[15] = regs
->r15
;
4075 env
->regs
[16] = regs
->r16
;
4076 env
->regs
[17] = regs
->r17
;
4077 env
->regs
[18] = regs
->r18
;
4078 env
->regs
[19] = regs
->r19
;
4079 env
->regs
[20] = regs
->r20
;
4080 env
->regs
[21] = regs
->r21
;
4081 env
->regs
[22] = regs
->r22
;
4082 env
->regs
[23] = regs
->r23
;
4083 env
->regs
[24] = regs
->r24
;
4084 env
->regs
[25] = regs
->r25
;
4085 env
->regs
[26] = regs
->r26
;
4086 env
->regs
[27] = regs
->r27
;
4087 env
->regs
[28] = regs
->r28
;
4088 env
->regs
[29] = regs
->r29
;
4089 env
->regs
[30] = regs
->r30
;
4090 env
->regs
[31] = regs
->r31
;
4091 env
->sregs
[SR_PC
] = regs
->pc
;
4093 #elif defined(TARGET_MIPS)
4097 for(i
= 0; i
< 32; i
++) {
4098 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4100 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4101 if (regs
->cp0_epc
& 1) {
4102 env
->hflags
|= MIPS_HFLAG_M16
;
4105 #elif defined(TARGET_OPENRISC)
4109 for (i
= 0; i
< 32; i
++) {
4110 env
->gpr
[i
] = regs
->gpr
[i
];
4116 #elif defined(TARGET_SH4)
4120 for(i
= 0; i
< 16; i
++) {
4121 env
->gregs
[i
] = regs
->regs
[i
];
4125 #elif defined(TARGET_ALPHA)
4129 for(i
= 0; i
< 28; i
++) {
4130 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4132 env
->ir
[IR_SP
] = regs
->usp
;
4135 #elif defined(TARGET_CRIS)
4137 env
->regs
[0] = regs
->r0
;
4138 env
->regs
[1] = regs
->r1
;
4139 env
->regs
[2] = regs
->r2
;
4140 env
->regs
[3] = regs
->r3
;
4141 env
->regs
[4] = regs
->r4
;
4142 env
->regs
[5] = regs
->r5
;
4143 env
->regs
[6] = regs
->r6
;
4144 env
->regs
[7] = regs
->r7
;
4145 env
->regs
[8] = regs
->r8
;
4146 env
->regs
[9] = regs
->r9
;
4147 env
->regs
[10] = regs
->r10
;
4148 env
->regs
[11] = regs
->r11
;
4149 env
->regs
[12] = regs
->r12
;
4150 env
->regs
[13] = regs
->r13
;
4151 env
->regs
[14] = info
->start_stack
;
4152 env
->regs
[15] = regs
->acr
;
4153 env
->pc
= regs
->erp
;
4155 #elif defined(TARGET_S390X)
4158 for (i
= 0; i
< 16; i
++) {
4159 env
->regs
[i
] = regs
->gprs
[i
];
4161 env
->psw
.mask
= regs
->psw
.mask
;
4162 env
->psw
.addr
= regs
->psw
.addr
;
4165 #error unsupported target CPU
4168 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4169 ts
->stack_base
= info
->start_stack
;
4170 ts
->heap_base
= info
->brk
;
4171 /* This will be filled in on the first SYS_HEAPINFO call. */
4176 if (gdbserver_start(gdbstub_port
) < 0) {
4177 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4181 gdb_handlesig(cpu
, 0);