4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
27 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
31 #include "qemu/timer.h"
32 #include "qemu/envlist.h"
42 static const char *cpu_model
;
43 unsigned long mmap_min_addr
;
46 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
48 * When running 32-on-64 we should make sure we can fit all of the possible
49 * guest address space into a contiguous chunk of virtual host memory.
51 * This way we will never overlap with our own libraries or binaries or stack
52 * or anything else that QEMU maps.
55 /* MIPS only supports 31 bits of virtual address space for user space */
56 uintptr_t reserved_va
= 0x77000000;
58 uintptr_t reserved_va
= 0xf7000000;
61 uintptr_t reserved_va
;
64 static void usage(void);
66 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
67 const char *qemu_uname_release
;
69 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
70 we allocate a bigger stack. Need a better solution, for example
71 by remapping the process stack directly at the right place */
72 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
74 void gemu_log(const char *fmt
, ...)
79 vfprintf(stderr
, fmt
, ap
);
83 #if defined(TARGET_I386)
84 int cpu_get_pic_interrupt(CPUX86State
*env
)
90 /***********************************************************/
91 /* Helper routines for implementing atomic operations. */
93 /* To implement exclusive operations we force all cpus to syncronise.
94 We don't require a full sync, only that no cpus are executing guest code.
95 The alternative is to map target atomic ops onto host equivalents,
96 which requires quite a lot of per host/target work. */
97 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
98 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
99 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
100 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
101 static int pending_cpus
;
103 /* Make sure everything is in a consistent state for calling fork(). */
104 void fork_start(void)
106 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
107 pthread_mutex_lock(&exclusive_lock
);
111 void fork_end(int child
)
113 mmap_fork_end(child
);
115 CPUState
*cpu
, *next_cpu
;
116 /* Child processes created by fork() only have a single thread.
117 Discard information about the parent threads. */
118 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
119 if (cpu
!= thread_cpu
) {
120 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
124 pthread_mutex_init(&exclusive_lock
, NULL
);
125 pthread_mutex_init(&cpu_list_mutex
, NULL
);
126 pthread_cond_init(&exclusive_cond
, NULL
);
127 pthread_cond_init(&exclusive_resume
, NULL
);
128 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
129 gdbserver_fork(thread_cpu
);
131 pthread_mutex_unlock(&exclusive_lock
);
132 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
136 /* Wait for pending exclusive operations to complete. The exclusive lock
138 static inline void exclusive_idle(void)
140 while (pending_cpus
) {
141 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
145 /* Start an exclusive operation.
146 Must only be called from outside cpu_arm_exec. */
147 static inline void start_exclusive(void)
151 pthread_mutex_lock(&exclusive_lock
);
155 /* Make all other cpus stop executing. */
156 CPU_FOREACH(other_cpu
) {
157 if (other_cpu
->running
) {
162 if (pending_cpus
> 1) {
163 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
167 /* Finish an exclusive operation. */
168 static inline void __attribute__((unused
)) end_exclusive(void)
171 pthread_cond_broadcast(&exclusive_resume
);
172 pthread_mutex_unlock(&exclusive_lock
);
175 /* Wait for exclusive ops to finish, and begin cpu execution. */
176 static inline void cpu_exec_start(CPUState
*cpu
)
178 pthread_mutex_lock(&exclusive_lock
);
181 pthread_mutex_unlock(&exclusive_lock
);
184 /* Mark cpu as not executing, and release pending exclusive ops. */
185 static inline void cpu_exec_end(CPUState
*cpu
)
187 pthread_mutex_lock(&exclusive_lock
);
188 cpu
->running
= false;
189 if (pending_cpus
> 1) {
191 if (pending_cpus
== 1) {
192 pthread_cond_signal(&exclusive_cond
);
196 pthread_mutex_unlock(&exclusive_lock
);
199 void cpu_list_lock(void)
201 pthread_mutex_lock(&cpu_list_mutex
);
204 void cpu_list_unlock(void)
206 pthread_mutex_unlock(&cpu_list_mutex
);
211 /***********************************************************/
212 /* CPUX86 core interface */
214 uint64_t cpu_get_tsc(CPUX86State
*env
)
216 return cpu_get_real_ticks();
219 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
224 e1
= (addr
<< 16) | (limit
& 0xffff);
225 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
232 static uint64_t *idt_table
;
234 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
235 uint64_t addr
, unsigned int sel
)
238 e1
= (addr
& 0xffff) | (sel
<< 16);
239 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
243 p
[2] = tswap32(addr
>> 32);
246 /* only dpl matters as we do only user space emulation */
247 static void set_idt(int n
, unsigned int dpl
)
249 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
252 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
253 uint32_t addr
, unsigned int sel
)
256 e1
= (addr
& 0xffff) | (sel
<< 16);
257 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
263 /* only dpl matters as we do only user space emulation */
264 static void set_idt(int n
, unsigned int dpl
)
266 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
270 void cpu_loop(CPUX86State
*env
)
272 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
275 target_siginfo_t info
;
283 trapnr
= cpu_x86_exec(cs
);
287 /* linux syscall from int $0x80 */
288 env
->regs
[R_EAX
] = do_syscall(env
,
300 /* linux syscall from syscall instruction */
301 env
->regs
[R_EAX
] = do_syscall(env
,
315 case TARGET_VSYSCALL_ADDR(__NR_vgettimeofday
):
316 syscall_num
= __NR_gettimeofday
;
318 case TARGET_VSYSCALL_ADDR(__NR_vtime
):
320 syscall_num
= __NR_time
;
322 /* XXX: not yet implemented (arm eabi host) */
323 cpu_abort(cs
, "Unimplemented vsyscall vtime");
326 case TARGET_VSYSCALL_ADDR(__NR_vgetcpu
):
327 /* XXX: not yet implemented */
328 cpu_abort(cs
, "Unimplemented vsyscall vgetcpu");
332 "Invalid vsyscall to address " TARGET_FMT_lx
"\n",
335 env
->regs
[R_EAX
] = do_syscall(env
,
345 get_user_u64(val
, env
->regs
[R_ESP
]);
347 env
->regs
[R_ESP
] += 8;
352 info
.si_signo
= TARGET_SIGBUS
;
354 info
.si_code
= TARGET_SI_KERNEL
;
355 info
._sifields
._sigfault
._addr
= 0;
356 queue_signal(env
, info
.si_signo
, &info
);
359 /* XXX: potential problem if ABI32 */
360 #ifndef TARGET_X86_64
361 if (env
->eflags
& VM_MASK
) {
362 handle_vm86_fault(env
);
366 info
.si_signo
= TARGET_SIGSEGV
;
368 info
.si_code
= TARGET_SI_KERNEL
;
369 info
._sifields
._sigfault
._addr
= 0;
370 queue_signal(env
, info
.si_signo
, &info
);
374 info
.si_signo
= TARGET_SIGSEGV
;
376 if (!(env
->error_code
& 1))
377 info
.si_code
= TARGET_SEGV_MAPERR
;
379 info
.si_code
= TARGET_SEGV_ACCERR
;
380 info
._sifields
._sigfault
._addr
= env
->cr
[2];
381 queue_signal(env
, info
.si_signo
, &info
);
384 #ifndef TARGET_X86_64
385 if (env
->eflags
& VM_MASK
) {
386 handle_vm86_trap(env
, trapnr
);
390 /* division by zero */
391 info
.si_signo
= TARGET_SIGFPE
;
393 info
.si_code
= TARGET_FPE_INTDIV
;
394 info
._sifields
._sigfault
._addr
= env
->eip
;
395 queue_signal(env
, info
.si_signo
, &info
);
400 #ifndef TARGET_X86_64
401 if (env
->eflags
& VM_MASK
) {
402 handle_vm86_trap(env
, trapnr
);
406 info
.si_signo
= TARGET_SIGTRAP
;
408 if (trapnr
== EXCP01_DB
) {
409 info
.si_code
= TARGET_TRAP_BRKPT
;
410 info
._sifields
._sigfault
._addr
= env
->eip
;
412 info
.si_code
= TARGET_SI_KERNEL
;
413 info
._sifields
._sigfault
._addr
= 0;
415 queue_signal(env
, info
.si_signo
, &info
);
420 #ifndef TARGET_X86_64
421 if (env
->eflags
& VM_MASK
) {
422 handle_vm86_trap(env
, trapnr
);
426 info
.si_signo
= TARGET_SIGSEGV
;
428 info
.si_code
= TARGET_SI_KERNEL
;
429 info
._sifields
._sigfault
._addr
= 0;
430 queue_signal(env
, info
.si_signo
, &info
);
434 info
.si_signo
= TARGET_SIGILL
;
436 info
.si_code
= TARGET_ILL_ILLOPN
;
437 info
._sifields
._sigfault
._addr
= env
->eip
;
438 queue_signal(env
, info
.si_signo
, &info
);
441 /* just indicate that signals should be handled asap */
447 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
452 info
.si_code
= TARGET_TRAP_BRKPT
;
453 queue_signal(env
, info
.si_signo
, &info
);
458 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
459 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
463 process_pending_signals(env
);
470 #define get_user_code_u32(x, gaddr, doswap) \
471 ({ abi_long __r = get_user_u32((x), (gaddr)); \
472 if (!__r && (doswap)) { \
478 #define get_user_code_u16(x, gaddr, doswap) \
479 ({ abi_long __r = get_user_u16((x), (gaddr)); \
480 if (!__r && (doswap)) { \
487 /* Commpage handling -- there is no commpage for AArch64 */
490 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
492 * r0 = pointer to oldval
493 * r1 = pointer to newval
494 * r2 = pointer to target value
497 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
498 * C set if *ptr was changed, clear if no exchange happened
500 * Note segv's in kernel helpers are a bit tricky, we can set the
501 * data address sensibly but the PC address is just the entry point.
503 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
505 uint64_t oldval
, newval
, val
;
507 target_siginfo_t info
;
509 /* Based on the 32 bit code in do_kernel_trap */
511 /* XXX: This only works between threads, not between processes.
512 It's probably possible to implement this with native host
513 operations. However things like ldrex/strex are much harder so
514 there's not much point trying. */
516 cpsr
= cpsr_read(env
);
519 if (get_user_u64(oldval
, env
->regs
[0])) {
520 env
->exception
.vaddress
= env
->regs
[0];
524 if (get_user_u64(newval
, env
->regs
[1])) {
525 env
->exception
.vaddress
= env
->regs
[1];
529 if (get_user_u64(val
, addr
)) {
530 env
->exception
.vaddress
= addr
;
537 if (put_user_u64(val
, addr
)) {
538 env
->exception
.vaddress
= addr
;
548 cpsr_write(env
, cpsr
, CPSR_C
);
554 /* We get the PC of the entry address - which is as good as anything,
555 on a real kernel what you get depends on which mode it uses. */
556 info
.si_signo
= TARGET_SIGSEGV
;
558 /* XXX: check env->error_code */
559 info
.si_code
= TARGET_SEGV_MAPERR
;
560 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
561 queue_signal(env
, info
.si_signo
, &info
);
564 /* Handle a jump to the kernel code page. */
566 do_kernel_trap(CPUARMState
*env
)
572 switch (env
->regs
[15]) {
573 case 0xffff0fa0: /* __kernel_memory_barrier */
574 /* ??? No-op. Will need to do better for SMP. */
576 case 0xffff0fc0: /* __kernel_cmpxchg */
577 /* XXX: This only works between threads, not between processes.
578 It's probably possible to implement this with native host
579 operations. However things like ldrex/strex are much harder so
580 there's not much point trying. */
582 cpsr
= cpsr_read(env
);
584 /* FIXME: This should SEGV if the access fails. */
585 if (get_user_u32(val
, addr
))
587 if (val
== env
->regs
[0]) {
589 /* FIXME: Check for segfaults. */
590 put_user_u32(val
, addr
);
597 cpsr_write(env
, cpsr
, CPSR_C
);
600 case 0xffff0fe0: /* __kernel_get_tls */
601 env
->regs
[0] = cpu_get_tls(env
);
603 case 0xffff0f60: /* __kernel_cmpxchg64 */
604 arm_kernel_cmpxchg64_helper(env
);
610 /* Jump back to the caller. */
611 addr
= env
->regs
[14];
616 env
->regs
[15] = addr
;
621 /* Store exclusive handling for AArch32 */
622 static int do_strex(CPUARMState
*env
)
630 if (env
->exclusive_addr
!= env
->exclusive_test
) {
633 /* We know we're always AArch32 so the address is in uint32_t range
634 * unless it was the -1 exclusive-monitor-lost value (which won't
635 * match exclusive_test above).
637 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
638 addr
= env
->exclusive_addr
;
639 size
= env
->exclusive_info
& 0xf;
642 segv
= get_user_u8(val
, addr
);
645 segv
= get_user_u16(val
, addr
);
649 segv
= get_user_u32(val
, addr
);
655 env
->exception
.vaddress
= addr
;
660 segv
= get_user_u32(valhi
, addr
+ 4);
662 env
->exception
.vaddress
= addr
+ 4;
665 val
= deposit64(val
, 32, 32, valhi
);
667 if (val
!= env
->exclusive_val
) {
671 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
674 segv
= put_user_u8(val
, addr
);
677 segv
= put_user_u16(val
, addr
);
681 segv
= put_user_u32(val
, addr
);
685 env
->exception
.vaddress
= addr
;
689 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
690 segv
= put_user_u32(val
, addr
+ 4);
692 env
->exception
.vaddress
= addr
+ 4;
699 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
705 void cpu_loop(CPUARMState
*env
)
707 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
709 unsigned int n
, insn
;
710 target_siginfo_t info
;
715 trapnr
= cpu_arm_exec(cs
);
720 TaskState
*ts
= cs
->opaque
;
724 /* we handle the FPU emulation here, as Linux */
725 /* we get the opcode */
726 /* FIXME - what to do if get_user() fails? */
727 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
729 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
730 if (rc
== 0) { /* illegal instruction */
731 info
.si_signo
= TARGET_SIGILL
;
733 info
.si_code
= TARGET_ILL_ILLOPN
;
734 info
._sifields
._sigfault
._addr
= env
->regs
[15];
735 queue_signal(env
, info
.si_signo
, &info
);
736 } else if (rc
< 0) { /* FP exception */
739 /* translate softfloat flags to FPSR flags */
740 if (-rc
& float_flag_invalid
)
742 if (-rc
& float_flag_divbyzero
)
744 if (-rc
& float_flag_overflow
)
746 if (-rc
& float_flag_underflow
)
748 if (-rc
& float_flag_inexact
)
751 FPSR fpsr
= ts
->fpa
.fpsr
;
752 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
754 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
755 info
.si_signo
= TARGET_SIGFPE
;
758 /* ordered by priority, least first */
759 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
760 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
761 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
762 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
763 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
765 info
._sifields
._sigfault
._addr
= env
->regs
[15];
766 queue_signal(env
, info
.si_signo
, &info
);
771 /* accumulate unenabled exceptions */
772 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
774 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
776 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
778 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
780 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
783 } else { /* everything OK */
794 if (trapnr
== EXCP_BKPT
) {
796 /* FIXME - what to do if get_user() fails? */
797 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
801 /* FIXME - what to do if get_user() fails? */
802 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
803 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
808 /* FIXME - what to do if get_user() fails? */
809 get_user_code_u16(insn
, env
->regs
[15] - 2,
813 /* FIXME - what to do if get_user() fails? */
814 get_user_code_u32(insn
, env
->regs
[15] - 4,
820 if (n
== ARM_NR_cacheflush
) {
822 } else if (n
== ARM_NR_semihosting
823 || n
== ARM_NR_thumb_semihosting
) {
824 env
->regs
[0] = do_arm_semihosting (env
);
825 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
827 if (env
->thumb
|| n
== 0) {
830 n
-= ARM_SYSCALL_BASE
;
833 if ( n
> ARM_NR_BASE
) {
835 case ARM_NR_cacheflush
:
839 cpu_set_tls(env
, env
->regs
[0]);
842 case ARM_NR_breakpoint
:
843 env
->regs
[15] -= env
->thumb
? 2 : 4;
846 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
848 env
->regs
[0] = -TARGET_ENOSYS
;
852 env
->regs
[0] = do_syscall(env
,
868 /* just indicate that signals should be handled asap */
871 if (!do_strex(env
)) {
874 /* fall through for segv */
875 case EXCP_PREFETCH_ABORT
:
876 case EXCP_DATA_ABORT
:
877 addr
= env
->exception
.vaddress
;
879 info
.si_signo
= TARGET_SIGSEGV
;
881 /* XXX: check env->error_code */
882 info
.si_code
= TARGET_SEGV_MAPERR
;
883 info
._sifields
._sigfault
._addr
= addr
;
884 queue_signal(env
, info
.si_signo
, &info
);
892 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
897 info
.si_code
= TARGET_TRAP_BRKPT
;
898 queue_signal(env
, info
.si_signo
, &info
);
902 case EXCP_KERNEL_TRAP
:
903 if (do_kernel_trap(env
))
908 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
910 cpu_dump_state(cs
, stderr
, fprintf
, 0);
913 process_pending_signals(env
);
920 * Handle AArch64 store-release exclusive
922 * rs = gets the status result of store exclusive
923 * rt = is the register that is stored
924 * rt2 = is the second register store (in STP)
927 static int do_strex_a64(CPUARMState
*env
)
938 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
939 size
= extract32(env
->exclusive_info
, 0, 2);
940 is_pair
= extract32(env
->exclusive_info
, 2, 1);
941 rs
= extract32(env
->exclusive_info
, 4, 5);
942 rt
= extract32(env
->exclusive_info
, 9, 5);
943 rt2
= extract32(env
->exclusive_info
, 14, 5);
945 addr
= env
->exclusive_addr
;
947 if (addr
!= env
->exclusive_test
) {
953 segv
= get_user_u8(val
, addr
);
956 segv
= get_user_u16(val
, addr
);
959 segv
= get_user_u32(val
, addr
);
962 segv
= get_user_u64(val
, addr
);
968 env
->exception
.vaddress
= addr
;
971 if (val
!= env
->exclusive_val
) {
976 segv
= get_user_u32(val
, addr
+ 4);
978 segv
= get_user_u64(val
, addr
+ 8);
981 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
984 if (val
!= env
->exclusive_high
) {
988 /* handle the zero register */
989 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
992 segv
= put_user_u8(val
, addr
);
995 segv
= put_user_u16(val
, addr
);
998 segv
= put_user_u32(val
, addr
);
1001 segv
= put_user_u64(val
, addr
);
1008 /* handle the zero register */
1009 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1011 segv
= put_user_u32(val
, addr
+ 4);
1013 segv
= put_user_u64(val
, addr
+ 8);
1016 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1023 /* rs == 31 encodes a write to the ZR, thus throwing away
1024 * the status return. This is rather silly but valid.
1027 env
->xregs
[rs
] = rc
;
1030 /* instruction faulted, PC does not advance */
1031 /* either way a strex releases any exclusive lock we have */
1032 env
->exclusive_addr
= -1;
1037 /* AArch64 main loop */
1038 void cpu_loop(CPUARMState
*env
)
1040 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1042 target_siginfo_t info
;
1046 trapnr
= cpu_arm_exec(cs
);
1051 env
->xregs
[0] = do_syscall(env
,
1061 case EXCP_INTERRUPT
:
1062 /* just indicate that signals should be handled asap */
1065 info
.si_signo
= TARGET_SIGILL
;
1067 info
.si_code
= TARGET_ILL_ILLOPN
;
1068 info
._sifields
._sigfault
._addr
= env
->pc
;
1069 queue_signal(env
, info
.si_signo
, &info
);
1072 if (!do_strex_a64(env
)) {
1075 /* fall through for segv */
1076 case EXCP_PREFETCH_ABORT
:
1077 case EXCP_DATA_ABORT
:
1078 info
.si_signo
= TARGET_SIGSEGV
;
1080 /* XXX: check env->error_code */
1081 info
.si_code
= TARGET_SEGV_MAPERR
;
1082 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1083 queue_signal(env
, info
.si_signo
, &info
);
1087 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1089 info
.si_signo
= sig
;
1091 info
.si_code
= TARGET_TRAP_BRKPT
;
1092 queue_signal(env
, info
.si_signo
, &info
);
1096 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1098 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1101 process_pending_signals(env
);
1102 /* Exception return on AArch64 always clears the exclusive monitor,
1103 * so any return to running guest code implies this.
1104 * A strex (successful or otherwise) also clears the monitor, so
1105 * we don't need to specialcase EXCP_STREX.
1107 env
->exclusive_addr
= -1;
1110 #endif /* ndef TARGET_ABI32 */
1114 #ifdef TARGET_UNICORE32
1116 void cpu_loop(CPUUniCore32State
*env
)
1118 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1120 unsigned int n
, insn
;
1121 target_siginfo_t info
;
1125 trapnr
= uc32_cpu_exec(cs
);
1128 case UC32_EXCP_PRIV
:
1131 get_user_u32(insn
, env
->regs
[31] - 4);
1132 n
= insn
& 0xffffff;
1134 if (n
>= UC32_SYSCALL_BASE
) {
1136 n
-= UC32_SYSCALL_BASE
;
1137 if (n
== UC32_SYSCALL_NR_set_tls
) {
1138 cpu_set_tls(env
, env
->regs
[0]);
1141 env
->regs
[0] = do_syscall(env
,
1156 case UC32_EXCP_DTRAP
:
1157 case UC32_EXCP_ITRAP
:
1158 info
.si_signo
= TARGET_SIGSEGV
;
1160 /* XXX: check env->error_code */
1161 info
.si_code
= TARGET_SEGV_MAPERR
;
1162 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1163 queue_signal(env
, info
.si_signo
, &info
);
1165 case EXCP_INTERRUPT
:
1166 /* just indicate that signals should be handled asap */
1172 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1174 info
.si_signo
= sig
;
1176 info
.si_code
= TARGET_TRAP_BRKPT
;
1177 queue_signal(env
, info
.si_signo
, &info
);
1184 process_pending_signals(env
);
1188 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1189 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1195 #define SPARC64_STACK_BIAS 2047
1199 /* WARNING: dealing with register windows _is_ complicated. More info
1200 can be found at http://www.sics.se/~psm/sparcstack.html */
1201 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1203 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1204 /* wrap handling : if cwp is on the last window, then we use the
1205 registers 'after' the end */
1206 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1207 index
+= 16 * env
->nwindows
;
1211 /* save the register window 'cwp1' */
1212 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1217 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1218 #ifdef TARGET_SPARC64
1220 sp_ptr
+= SPARC64_STACK_BIAS
;
1222 #if defined(DEBUG_WIN)
1223 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1226 for(i
= 0; i
< 16; i
++) {
1227 /* FIXME - what to do if put_user() fails? */
1228 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1229 sp_ptr
+= sizeof(abi_ulong
);
1233 static void save_window(CPUSPARCState
*env
)
1235 #ifndef TARGET_SPARC64
1236 unsigned int new_wim
;
1237 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1238 ((1LL << env
->nwindows
) - 1);
1239 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1242 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1248 static void restore_window(CPUSPARCState
*env
)
1250 #ifndef TARGET_SPARC64
1251 unsigned int new_wim
;
1253 unsigned int i
, cwp1
;
1256 #ifndef TARGET_SPARC64
1257 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1258 ((1LL << env
->nwindows
) - 1);
1261 /* restore the invalid window */
1262 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1263 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1264 #ifdef TARGET_SPARC64
1266 sp_ptr
+= SPARC64_STACK_BIAS
;
1268 #if defined(DEBUG_WIN)
1269 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1272 for(i
= 0; i
< 16; i
++) {
1273 /* FIXME - what to do if get_user() fails? */
1274 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1275 sp_ptr
+= sizeof(abi_ulong
);
1277 #ifdef TARGET_SPARC64
1279 if (env
->cleanwin
< env
->nwindows
- 1)
1287 static void flush_windows(CPUSPARCState
*env
)
1293 /* if restore would invoke restore_window(), then we can stop */
1294 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1295 #ifndef TARGET_SPARC64
1296 if (env
->wim
& (1 << cwp1
))
1299 if (env
->canrestore
== 0)
1304 save_window_offset(env
, cwp1
);
1307 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1308 #ifndef TARGET_SPARC64
1309 /* set wim so that restore will reload the registers */
1310 env
->wim
= 1 << cwp1
;
1312 #if defined(DEBUG_WIN)
1313 printf("flush_windows: nb=%d\n", offset
- 1);
1317 void cpu_loop (CPUSPARCState
*env
)
1319 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1322 target_siginfo_t info
;
1326 trapnr
= cpu_sparc_exec(cs
);
1329 /* Compute PSR before exposing state. */
1330 if (env
->cc_op
!= CC_OP_FLAGS
) {
1335 #ifndef TARGET_SPARC64
1342 ret
= do_syscall (env
, env
->gregs
[1],
1343 env
->regwptr
[0], env
->regwptr
[1],
1344 env
->regwptr
[2], env
->regwptr
[3],
1345 env
->regwptr
[4], env
->regwptr
[5],
1347 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1348 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1349 env
->xcc
|= PSR_CARRY
;
1351 env
->psr
|= PSR_CARRY
;
1355 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1356 env
->xcc
&= ~PSR_CARRY
;
1358 env
->psr
&= ~PSR_CARRY
;
1361 env
->regwptr
[0] = ret
;
1362 /* next instruction */
1364 env
->npc
= env
->npc
+ 4;
1366 case 0x83: /* flush windows */
1371 /* next instruction */
1373 env
->npc
= env
->npc
+ 4;
1375 #ifndef TARGET_SPARC64
1376 case TT_WIN_OVF
: /* window overflow */
1379 case TT_WIN_UNF
: /* window underflow */
1380 restore_window(env
);
1385 info
.si_signo
= TARGET_SIGSEGV
;
1387 /* XXX: check env->error_code */
1388 info
.si_code
= TARGET_SEGV_MAPERR
;
1389 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1390 queue_signal(env
, info
.si_signo
, &info
);
1394 case TT_SPILL
: /* window overflow */
1397 case TT_FILL
: /* window underflow */
1398 restore_window(env
);
1403 info
.si_signo
= TARGET_SIGSEGV
;
1405 /* XXX: check env->error_code */
1406 info
.si_code
= TARGET_SEGV_MAPERR
;
1407 if (trapnr
== TT_DFAULT
)
1408 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1410 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1411 queue_signal(env
, info
.si_signo
, &info
);
1414 #ifndef TARGET_ABI32
1417 sparc64_get_context(env
);
1421 sparc64_set_context(env
);
1425 case EXCP_INTERRUPT
:
1426 /* just indicate that signals should be handled asap */
1430 info
.si_signo
= TARGET_SIGILL
;
1432 info
.si_code
= TARGET_ILL_ILLOPC
;
1433 info
._sifields
._sigfault
._addr
= env
->pc
;
1434 queue_signal(env
, info
.si_signo
, &info
);
1441 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1444 info
.si_signo
= sig
;
1446 info
.si_code
= TARGET_TRAP_BRKPT
;
1447 queue_signal(env
, info
.si_signo
, &info
);
1452 printf ("Unhandled trap: 0x%x\n", trapnr
);
1453 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1456 process_pending_signals (env
);
1463 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1465 return cpu_get_real_ticks();
1468 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1470 return cpu_ppc_get_tb(env
);
1473 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1475 return cpu_ppc_get_tb(env
) >> 32;
1478 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1480 return cpu_ppc_get_tb(env
);
1483 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1485 return cpu_ppc_get_tb(env
) >> 32;
1488 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1489 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1491 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1493 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1496 /* XXX: to be fixed */
1497 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1502 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1507 #define EXCP_DUMP(env, fmt, ...) \
1509 CPUState *cs = ENV_GET_CPU(env); \
1510 fprintf(stderr, fmt , ## __VA_ARGS__); \
1511 cpu_dump_state(cs, stderr, fprintf, 0); \
1512 qemu_log(fmt, ## __VA_ARGS__); \
1513 if (qemu_log_enabled()) { \
1514 log_cpu_state(cs, 0); \
1518 static int do_store_exclusive(CPUPPCState
*env
)
1521 target_ulong page_addr
;
1522 target_ulong val
, val2
__attribute__((unused
)) = 0;
1526 addr
= env
->reserve_ea
;
1527 page_addr
= addr
& TARGET_PAGE_MASK
;
1530 flags
= page_get_flags(page_addr
);
1531 if ((flags
& PAGE_READ
) == 0) {
1534 int reg
= env
->reserve_info
& 0x1f;
1535 int size
= env
->reserve_info
>> 5;
1538 if (addr
== env
->reserve_addr
) {
1540 case 1: segv
= get_user_u8(val
, addr
); break;
1541 case 2: segv
= get_user_u16(val
, addr
); break;
1542 case 4: segv
= get_user_u32(val
, addr
); break;
1543 #if defined(TARGET_PPC64)
1544 case 8: segv
= get_user_u64(val
, addr
); break;
1546 segv
= get_user_u64(val
, addr
);
1548 segv
= get_user_u64(val2
, addr
+ 8);
1555 if (!segv
&& val
== env
->reserve_val
) {
1556 val
= env
->gpr
[reg
];
1558 case 1: segv
= put_user_u8(val
, addr
); break;
1559 case 2: segv
= put_user_u16(val
, addr
); break;
1560 case 4: segv
= put_user_u32(val
, addr
); break;
1561 #if defined(TARGET_PPC64)
1562 case 8: segv
= put_user_u64(val
, addr
); break;
1564 if (val2
== env
->reserve_val2
) {
1567 val
= env
->gpr
[reg
+1];
1569 val2
= env
->gpr
[reg
+1];
1571 segv
= put_user_u64(val
, addr
);
1573 segv
= put_user_u64(val2
, addr
+ 8);
1586 env
->crf
[0] = (stored
<< 1) | xer_so
;
1587 env
->reserve_addr
= (target_ulong
)-1;
1597 void cpu_loop(CPUPPCState
*env
)
1599 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1600 target_siginfo_t info
;
1606 trapnr
= cpu_ppc_exec(cs
);
1609 case POWERPC_EXCP_NONE
:
1612 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1613 cpu_abort(cs
, "Critical interrupt while in user mode. "
1616 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1617 cpu_abort(cs
, "Machine check exception while in user mode. "
1620 case POWERPC_EXCP_DSI
: /* Data storage exception */
1621 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1623 /* XXX: check this. Seems bugged */
1624 switch (env
->error_code
& 0xFF000000) {
1626 info
.si_signo
= TARGET_SIGSEGV
;
1628 info
.si_code
= TARGET_SEGV_MAPERR
;
1631 info
.si_signo
= TARGET_SIGILL
;
1633 info
.si_code
= TARGET_ILL_ILLADR
;
1636 info
.si_signo
= TARGET_SIGSEGV
;
1638 info
.si_code
= TARGET_SEGV_ACCERR
;
1641 /* Let's send a regular segfault... */
1642 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1644 info
.si_signo
= TARGET_SIGSEGV
;
1646 info
.si_code
= TARGET_SEGV_MAPERR
;
1649 info
._sifields
._sigfault
._addr
= env
->nip
;
1650 queue_signal(env
, info
.si_signo
, &info
);
1652 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1653 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1654 "\n", env
->spr
[SPR_SRR0
]);
1655 /* XXX: check this */
1656 switch (env
->error_code
& 0xFF000000) {
1658 info
.si_signo
= TARGET_SIGSEGV
;
1660 info
.si_code
= TARGET_SEGV_MAPERR
;
1664 info
.si_signo
= TARGET_SIGSEGV
;
1666 info
.si_code
= TARGET_SEGV_ACCERR
;
1669 /* Let's send a regular segfault... */
1670 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1672 info
.si_signo
= TARGET_SIGSEGV
;
1674 info
.si_code
= TARGET_SEGV_MAPERR
;
1677 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1678 queue_signal(env
, info
.si_signo
, &info
);
1680 case POWERPC_EXCP_EXTERNAL
: /* External input */
1681 cpu_abort(cs
, "External interrupt while in user mode. "
1684 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1685 EXCP_DUMP(env
, "Unaligned memory access\n");
1686 /* XXX: check this */
1687 info
.si_signo
= TARGET_SIGBUS
;
1689 info
.si_code
= TARGET_BUS_ADRALN
;
1690 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1691 queue_signal(env
, info
.si_signo
, &info
);
1693 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1694 /* XXX: check this */
1695 switch (env
->error_code
& ~0xF) {
1696 case POWERPC_EXCP_FP
:
1697 EXCP_DUMP(env
, "Floating point program exception\n");
1698 info
.si_signo
= TARGET_SIGFPE
;
1700 switch (env
->error_code
& 0xF) {
1701 case POWERPC_EXCP_FP_OX
:
1702 info
.si_code
= TARGET_FPE_FLTOVF
;
1704 case POWERPC_EXCP_FP_UX
:
1705 info
.si_code
= TARGET_FPE_FLTUND
;
1707 case POWERPC_EXCP_FP_ZX
:
1708 case POWERPC_EXCP_FP_VXZDZ
:
1709 info
.si_code
= TARGET_FPE_FLTDIV
;
1711 case POWERPC_EXCP_FP_XX
:
1712 info
.si_code
= TARGET_FPE_FLTRES
;
1714 case POWERPC_EXCP_FP_VXSOFT
:
1715 info
.si_code
= TARGET_FPE_FLTINV
;
1717 case POWERPC_EXCP_FP_VXSNAN
:
1718 case POWERPC_EXCP_FP_VXISI
:
1719 case POWERPC_EXCP_FP_VXIDI
:
1720 case POWERPC_EXCP_FP_VXIMZ
:
1721 case POWERPC_EXCP_FP_VXVC
:
1722 case POWERPC_EXCP_FP_VXSQRT
:
1723 case POWERPC_EXCP_FP_VXCVI
:
1724 info
.si_code
= TARGET_FPE_FLTSUB
;
1727 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1732 case POWERPC_EXCP_INVAL
:
1733 EXCP_DUMP(env
, "Invalid instruction\n");
1734 info
.si_signo
= TARGET_SIGILL
;
1736 switch (env
->error_code
& 0xF) {
1737 case POWERPC_EXCP_INVAL_INVAL
:
1738 info
.si_code
= TARGET_ILL_ILLOPC
;
1740 case POWERPC_EXCP_INVAL_LSWX
:
1741 info
.si_code
= TARGET_ILL_ILLOPN
;
1743 case POWERPC_EXCP_INVAL_SPR
:
1744 info
.si_code
= TARGET_ILL_PRVREG
;
1746 case POWERPC_EXCP_INVAL_FP
:
1747 info
.si_code
= TARGET_ILL_COPROC
;
1750 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1751 env
->error_code
& 0xF);
1752 info
.si_code
= TARGET_ILL_ILLADR
;
1756 case POWERPC_EXCP_PRIV
:
1757 EXCP_DUMP(env
, "Privilege violation\n");
1758 info
.si_signo
= TARGET_SIGILL
;
1760 switch (env
->error_code
& 0xF) {
1761 case POWERPC_EXCP_PRIV_OPC
:
1762 info
.si_code
= TARGET_ILL_PRVOPC
;
1764 case POWERPC_EXCP_PRIV_REG
:
1765 info
.si_code
= TARGET_ILL_PRVREG
;
1768 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1769 env
->error_code
& 0xF);
1770 info
.si_code
= TARGET_ILL_PRVOPC
;
1774 case POWERPC_EXCP_TRAP
:
1775 cpu_abort(cs
, "Tried to call a TRAP\n");
1778 /* Should not happen ! */
1779 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1783 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1784 queue_signal(env
, info
.si_signo
, &info
);
1786 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1787 EXCP_DUMP(env
, "No floating point allowed\n");
1788 info
.si_signo
= TARGET_SIGILL
;
1790 info
.si_code
= TARGET_ILL_COPROC
;
1791 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1792 queue_signal(env
, info
.si_signo
, &info
);
1794 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1795 cpu_abort(cs
, "Syscall exception while in user mode. "
1798 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1799 EXCP_DUMP(env
, "No APU instruction allowed\n");
1800 info
.si_signo
= TARGET_SIGILL
;
1802 info
.si_code
= TARGET_ILL_COPROC
;
1803 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1804 queue_signal(env
, info
.si_signo
, &info
);
1806 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1807 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1810 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1811 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1814 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1815 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1818 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1819 cpu_abort(cs
, "Data TLB exception while in user mode. "
1822 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1823 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1826 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1827 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1828 info
.si_signo
= TARGET_SIGILL
;
1830 info
.si_code
= TARGET_ILL_COPROC
;
1831 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1832 queue_signal(env
, info
.si_signo
, &info
);
1834 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1835 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1837 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1838 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1840 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1841 cpu_abort(cs
, "Performance monitor exception not handled\n");
1843 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1844 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1847 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1848 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1851 case POWERPC_EXCP_RESET
: /* System reset exception */
1852 cpu_abort(cs
, "Reset interrupt while in user mode. "
1855 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1856 cpu_abort(cs
, "Data segment exception while in user mode. "
1859 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1860 cpu_abort(cs
, "Instruction segment exception "
1861 "while in user mode. Aborting\n");
1863 /* PowerPC 64 with hypervisor mode support */
1864 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1865 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1866 "while in user mode. Aborting\n");
1868 case POWERPC_EXCP_TRACE
: /* Trace exception */
1870 * we use this exception to emulate step-by-step execution mode.
1873 /* PowerPC 64 with hypervisor mode support */
1874 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1875 cpu_abort(cs
, "Hypervisor data storage exception "
1876 "while in user mode. Aborting\n");
1878 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1879 cpu_abort(cs
, "Hypervisor instruction storage exception "
1880 "while in user mode. Aborting\n");
1882 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1883 cpu_abort(cs
, "Hypervisor data segment exception "
1884 "while in user mode. Aborting\n");
1886 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1887 cpu_abort(cs
, "Hypervisor instruction segment exception "
1888 "while in user mode. Aborting\n");
1890 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1891 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1892 info
.si_signo
= TARGET_SIGILL
;
1894 info
.si_code
= TARGET_ILL_COPROC
;
1895 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1896 queue_signal(env
, info
.si_signo
, &info
);
1898 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1899 cpu_abort(cs
, "Programmable interval timer interrupt "
1900 "while in user mode. Aborting\n");
1902 case POWERPC_EXCP_IO
: /* IO error exception */
1903 cpu_abort(cs
, "IO error exception while in user mode. "
1906 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1907 cpu_abort(cs
, "Run mode exception while in user mode. "
1910 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1911 cpu_abort(cs
, "Emulation trap exception not handled\n");
1913 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1914 cpu_abort(cs
, "Instruction fetch TLB exception "
1915 "while in user-mode. Aborting");
1917 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1918 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1921 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1922 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1925 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1926 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1928 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1929 cpu_abort(cs
, "Instruction address breakpoint exception "
1932 case POWERPC_EXCP_SMI
: /* System management interrupt */
1933 cpu_abort(cs
, "System management interrupt while in user mode. "
1936 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1937 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1940 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1941 cpu_abort(cs
, "Performance monitor exception not handled\n");
1943 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1944 cpu_abort(cs
, "Vector assist exception not handled\n");
1946 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1947 cpu_abort(cs
, "Soft patch exception not handled\n");
1949 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1950 cpu_abort(cs
, "Maintenance exception while in user mode. "
1953 case POWERPC_EXCP_STOP
: /* stop translation */
1954 /* We did invalidate the instruction cache. Go on */
1956 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1957 /* We just stopped because of a branch. Go on */
1959 case POWERPC_EXCP_SYSCALL_USER
:
1960 /* system call in user-mode emulation */
1962 * PPC ABI uses overflow flag in cr0 to signal an error
1965 env
->crf
[0] &= ~0x1;
1966 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1967 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1969 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1970 /* Returning from a successful sigreturn syscall.
1971 Avoid corrupting register state. */
1974 if (ret
> (target_ulong
)(-515)) {
1980 case POWERPC_EXCP_STCX
:
1981 if (do_store_exclusive(env
)) {
1982 info
.si_signo
= TARGET_SIGSEGV
;
1984 info
.si_code
= TARGET_SEGV_MAPERR
;
1985 info
._sifields
._sigfault
._addr
= env
->nip
;
1986 queue_signal(env
, info
.si_signo
, &info
);
1993 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1995 info
.si_signo
= sig
;
1997 info
.si_code
= TARGET_TRAP_BRKPT
;
1998 queue_signal(env
, info
.si_signo
, &info
);
2002 case EXCP_INTERRUPT
:
2003 /* just indicate that signals should be handled asap */
2006 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2009 process_pending_signals(env
);
2016 # ifdef TARGET_ABI_MIPSO32
2017 # define MIPS_SYS(name, args) args,
2018 static const uint8_t mips_syscall_args
[] = {
2019 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2020 MIPS_SYS(sys_exit
, 1)
2021 MIPS_SYS(sys_fork
, 0)
2022 MIPS_SYS(sys_read
, 3)
2023 MIPS_SYS(sys_write
, 3)
2024 MIPS_SYS(sys_open
, 3) /* 4005 */
2025 MIPS_SYS(sys_close
, 1)
2026 MIPS_SYS(sys_waitpid
, 3)
2027 MIPS_SYS(sys_creat
, 2)
2028 MIPS_SYS(sys_link
, 2)
2029 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2030 MIPS_SYS(sys_execve
, 0)
2031 MIPS_SYS(sys_chdir
, 1)
2032 MIPS_SYS(sys_time
, 1)
2033 MIPS_SYS(sys_mknod
, 3)
2034 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2035 MIPS_SYS(sys_lchown
, 3)
2036 MIPS_SYS(sys_ni_syscall
, 0)
2037 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2038 MIPS_SYS(sys_lseek
, 3)
2039 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2040 MIPS_SYS(sys_mount
, 5)
2041 MIPS_SYS(sys_umount
, 1)
2042 MIPS_SYS(sys_setuid
, 1)
2043 MIPS_SYS(sys_getuid
, 0)
2044 MIPS_SYS(sys_stime
, 1) /* 4025 */
2045 MIPS_SYS(sys_ptrace
, 4)
2046 MIPS_SYS(sys_alarm
, 1)
2047 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2048 MIPS_SYS(sys_pause
, 0)
2049 MIPS_SYS(sys_utime
, 2) /* 4030 */
2050 MIPS_SYS(sys_ni_syscall
, 0)
2051 MIPS_SYS(sys_ni_syscall
, 0)
2052 MIPS_SYS(sys_access
, 2)
2053 MIPS_SYS(sys_nice
, 1)
2054 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2055 MIPS_SYS(sys_sync
, 0)
2056 MIPS_SYS(sys_kill
, 2)
2057 MIPS_SYS(sys_rename
, 2)
2058 MIPS_SYS(sys_mkdir
, 2)
2059 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2060 MIPS_SYS(sys_dup
, 1)
2061 MIPS_SYS(sys_pipe
, 0)
2062 MIPS_SYS(sys_times
, 1)
2063 MIPS_SYS(sys_ni_syscall
, 0)
2064 MIPS_SYS(sys_brk
, 1) /* 4045 */
2065 MIPS_SYS(sys_setgid
, 1)
2066 MIPS_SYS(sys_getgid
, 0)
2067 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2068 MIPS_SYS(sys_geteuid
, 0)
2069 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2070 MIPS_SYS(sys_acct
, 0)
2071 MIPS_SYS(sys_umount2
, 2)
2072 MIPS_SYS(sys_ni_syscall
, 0)
2073 MIPS_SYS(sys_ioctl
, 3)
2074 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2075 MIPS_SYS(sys_ni_syscall
, 2)
2076 MIPS_SYS(sys_setpgid
, 2)
2077 MIPS_SYS(sys_ni_syscall
, 0)
2078 MIPS_SYS(sys_olduname
, 1)
2079 MIPS_SYS(sys_umask
, 1) /* 4060 */
2080 MIPS_SYS(sys_chroot
, 1)
2081 MIPS_SYS(sys_ustat
, 2)
2082 MIPS_SYS(sys_dup2
, 2)
2083 MIPS_SYS(sys_getppid
, 0)
2084 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2085 MIPS_SYS(sys_setsid
, 0)
2086 MIPS_SYS(sys_sigaction
, 3)
2087 MIPS_SYS(sys_sgetmask
, 0)
2088 MIPS_SYS(sys_ssetmask
, 1)
2089 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2090 MIPS_SYS(sys_setregid
, 2)
2091 MIPS_SYS(sys_sigsuspend
, 0)
2092 MIPS_SYS(sys_sigpending
, 1)
2093 MIPS_SYS(sys_sethostname
, 2)
2094 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2095 MIPS_SYS(sys_getrlimit
, 2)
2096 MIPS_SYS(sys_getrusage
, 2)
2097 MIPS_SYS(sys_gettimeofday
, 2)
2098 MIPS_SYS(sys_settimeofday
, 2)
2099 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2100 MIPS_SYS(sys_setgroups
, 2)
2101 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2102 MIPS_SYS(sys_symlink
, 2)
2103 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2104 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2105 MIPS_SYS(sys_uselib
, 1)
2106 MIPS_SYS(sys_swapon
, 2)
2107 MIPS_SYS(sys_reboot
, 3)
2108 MIPS_SYS(old_readdir
, 3)
2109 MIPS_SYS(old_mmap
, 6) /* 4090 */
2110 MIPS_SYS(sys_munmap
, 2)
2111 MIPS_SYS(sys_truncate
, 2)
2112 MIPS_SYS(sys_ftruncate
, 2)
2113 MIPS_SYS(sys_fchmod
, 2)
2114 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2115 MIPS_SYS(sys_getpriority
, 2)
2116 MIPS_SYS(sys_setpriority
, 3)
2117 MIPS_SYS(sys_ni_syscall
, 0)
2118 MIPS_SYS(sys_statfs
, 2)
2119 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2120 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2121 MIPS_SYS(sys_socketcall
, 2)
2122 MIPS_SYS(sys_syslog
, 3)
2123 MIPS_SYS(sys_setitimer
, 3)
2124 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2125 MIPS_SYS(sys_newstat
, 2)
2126 MIPS_SYS(sys_newlstat
, 2)
2127 MIPS_SYS(sys_newfstat
, 2)
2128 MIPS_SYS(sys_uname
, 1)
2129 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2130 MIPS_SYS(sys_vhangup
, 0)
2131 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2132 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2133 MIPS_SYS(sys_wait4
, 4)
2134 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2135 MIPS_SYS(sys_sysinfo
, 1)
2136 MIPS_SYS(sys_ipc
, 6)
2137 MIPS_SYS(sys_fsync
, 1)
2138 MIPS_SYS(sys_sigreturn
, 0)
2139 MIPS_SYS(sys_clone
, 6) /* 4120 */
2140 MIPS_SYS(sys_setdomainname
, 2)
2141 MIPS_SYS(sys_newuname
, 1)
2142 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2143 MIPS_SYS(sys_adjtimex
, 1)
2144 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2145 MIPS_SYS(sys_sigprocmask
, 3)
2146 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2147 MIPS_SYS(sys_init_module
, 5)
2148 MIPS_SYS(sys_delete_module
, 1)
2149 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2150 MIPS_SYS(sys_quotactl
, 0)
2151 MIPS_SYS(sys_getpgid
, 1)
2152 MIPS_SYS(sys_fchdir
, 1)
2153 MIPS_SYS(sys_bdflush
, 2)
2154 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2155 MIPS_SYS(sys_personality
, 1)
2156 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2157 MIPS_SYS(sys_setfsuid
, 1)
2158 MIPS_SYS(sys_setfsgid
, 1)
2159 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2160 MIPS_SYS(sys_getdents
, 3)
2161 MIPS_SYS(sys_select
, 5)
2162 MIPS_SYS(sys_flock
, 2)
2163 MIPS_SYS(sys_msync
, 3)
2164 MIPS_SYS(sys_readv
, 3) /* 4145 */
2165 MIPS_SYS(sys_writev
, 3)
2166 MIPS_SYS(sys_cacheflush
, 3)
2167 MIPS_SYS(sys_cachectl
, 3)
2168 MIPS_SYS(sys_sysmips
, 4)
2169 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2170 MIPS_SYS(sys_getsid
, 1)
2171 MIPS_SYS(sys_fdatasync
, 0)
2172 MIPS_SYS(sys_sysctl
, 1)
2173 MIPS_SYS(sys_mlock
, 2)
2174 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2175 MIPS_SYS(sys_mlockall
, 1)
2176 MIPS_SYS(sys_munlockall
, 0)
2177 MIPS_SYS(sys_sched_setparam
, 2)
2178 MIPS_SYS(sys_sched_getparam
, 2)
2179 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2180 MIPS_SYS(sys_sched_getscheduler
, 1)
2181 MIPS_SYS(sys_sched_yield
, 0)
2182 MIPS_SYS(sys_sched_get_priority_max
, 1)
2183 MIPS_SYS(sys_sched_get_priority_min
, 1)
2184 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2185 MIPS_SYS(sys_nanosleep
, 2)
2186 MIPS_SYS(sys_mremap
, 5)
2187 MIPS_SYS(sys_accept
, 3)
2188 MIPS_SYS(sys_bind
, 3)
2189 MIPS_SYS(sys_connect
, 3) /* 4170 */
2190 MIPS_SYS(sys_getpeername
, 3)
2191 MIPS_SYS(sys_getsockname
, 3)
2192 MIPS_SYS(sys_getsockopt
, 5)
2193 MIPS_SYS(sys_listen
, 2)
2194 MIPS_SYS(sys_recv
, 4) /* 4175 */
2195 MIPS_SYS(sys_recvfrom
, 6)
2196 MIPS_SYS(sys_recvmsg
, 3)
2197 MIPS_SYS(sys_send
, 4)
2198 MIPS_SYS(sys_sendmsg
, 3)
2199 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2200 MIPS_SYS(sys_setsockopt
, 5)
2201 MIPS_SYS(sys_shutdown
, 2)
2202 MIPS_SYS(sys_socket
, 3)
2203 MIPS_SYS(sys_socketpair
, 4)
2204 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2205 MIPS_SYS(sys_getresuid
, 3)
2206 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2207 MIPS_SYS(sys_poll
, 3)
2208 MIPS_SYS(sys_nfsservctl
, 3)
2209 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2210 MIPS_SYS(sys_getresgid
, 3)
2211 MIPS_SYS(sys_prctl
, 5)
2212 MIPS_SYS(sys_rt_sigreturn
, 0)
2213 MIPS_SYS(sys_rt_sigaction
, 4)
2214 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2215 MIPS_SYS(sys_rt_sigpending
, 2)
2216 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2217 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2218 MIPS_SYS(sys_rt_sigsuspend
, 0)
2219 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2220 MIPS_SYS(sys_pwrite64
, 6)
2221 MIPS_SYS(sys_chown
, 3)
2222 MIPS_SYS(sys_getcwd
, 2)
2223 MIPS_SYS(sys_capget
, 2)
2224 MIPS_SYS(sys_capset
, 2) /* 4205 */
2225 MIPS_SYS(sys_sigaltstack
, 2)
2226 MIPS_SYS(sys_sendfile
, 4)
2227 MIPS_SYS(sys_ni_syscall
, 0)
2228 MIPS_SYS(sys_ni_syscall
, 0)
2229 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2230 MIPS_SYS(sys_truncate64
, 4)
2231 MIPS_SYS(sys_ftruncate64
, 4)
2232 MIPS_SYS(sys_stat64
, 2)
2233 MIPS_SYS(sys_lstat64
, 2)
2234 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2235 MIPS_SYS(sys_pivot_root
, 2)
2236 MIPS_SYS(sys_mincore
, 3)
2237 MIPS_SYS(sys_madvise
, 3)
2238 MIPS_SYS(sys_getdents64
, 3)
2239 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2240 MIPS_SYS(sys_ni_syscall
, 0)
2241 MIPS_SYS(sys_gettid
, 0)
2242 MIPS_SYS(sys_readahead
, 5)
2243 MIPS_SYS(sys_setxattr
, 5)
2244 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2245 MIPS_SYS(sys_fsetxattr
, 5)
2246 MIPS_SYS(sys_getxattr
, 4)
2247 MIPS_SYS(sys_lgetxattr
, 4)
2248 MIPS_SYS(sys_fgetxattr
, 4)
2249 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2250 MIPS_SYS(sys_llistxattr
, 3)
2251 MIPS_SYS(sys_flistxattr
, 3)
2252 MIPS_SYS(sys_removexattr
, 2)
2253 MIPS_SYS(sys_lremovexattr
, 2)
2254 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2255 MIPS_SYS(sys_tkill
, 2)
2256 MIPS_SYS(sys_sendfile64
, 5)
2257 MIPS_SYS(sys_futex
, 6)
2258 MIPS_SYS(sys_sched_setaffinity
, 3)
2259 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2260 MIPS_SYS(sys_io_setup
, 2)
2261 MIPS_SYS(sys_io_destroy
, 1)
2262 MIPS_SYS(sys_io_getevents
, 5)
2263 MIPS_SYS(sys_io_submit
, 3)
2264 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2265 MIPS_SYS(sys_exit_group
, 1)
2266 MIPS_SYS(sys_lookup_dcookie
, 3)
2267 MIPS_SYS(sys_epoll_create
, 1)
2268 MIPS_SYS(sys_epoll_ctl
, 4)
2269 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2270 MIPS_SYS(sys_remap_file_pages
, 5)
2271 MIPS_SYS(sys_set_tid_address
, 1)
2272 MIPS_SYS(sys_restart_syscall
, 0)
2273 MIPS_SYS(sys_fadvise64_64
, 7)
2274 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2275 MIPS_SYS(sys_fstatfs64
, 2)
2276 MIPS_SYS(sys_timer_create
, 3)
2277 MIPS_SYS(sys_timer_settime
, 4)
2278 MIPS_SYS(sys_timer_gettime
, 2)
2279 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2280 MIPS_SYS(sys_timer_delete
, 1)
2281 MIPS_SYS(sys_clock_settime
, 2)
2282 MIPS_SYS(sys_clock_gettime
, 2)
2283 MIPS_SYS(sys_clock_getres
, 2)
2284 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2285 MIPS_SYS(sys_tgkill
, 3)
2286 MIPS_SYS(sys_utimes
, 2)
2287 MIPS_SYS(sys_mbind
, 4)
2288 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2289 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2290 MIPS_SYS(sys_mq_open
, 4)
2291 MIPS_SYS(sys_mq_unlink
, 1)
2292 MIPS_SYS(sys_mq_timedsend
, 5)
2293 MIPS_SYS(sys_mq_timedreceive
, 5)
2294 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2295 MIPS_SYS(sys_mq_getsetattr
, 3)
2296 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2297 MIPS_SYS(sys_waitid
, 4)
2298 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2299 MIPS_SYS(sys_add_key
, 5)
2300 MIPS_SYS(sys_request_key
, 4)
2301 MIPS_SYS(sys_keyctl
, 5)
2302 MIPS_SYS(sys_set_thread_area
, 1)
2303 MIPS_SYS(sys_inotify_init
, 0)
2304 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2305 MIPS_SYS(sys_inotify_rm_watch
, 2)
2306 MIPS_SYS(sys_migrate_pages
, 4)
2307 MIPS_SYS(sys_openat
, 4)
2308 MIPS_SYS(sys_mkdirat
, 3)
2309 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2310 MIPS_SYS(sys_fchownat
, 5)
2311 MIPS_SYS(sys_futimesat
, 3)
2312 MIPS_SYS(sys_fstatat64
, 4)
2313 MIPS_SYS(sys_unlinkat
, 3)
2314 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2315 MIPS_SYS(sys_linkat
, 5)
2316 MIPS_SYS(sys_symlinkat
, 3)
2317 MIPS_SYS(sys_readlinkat
, 4)
2318 MIPS_SYS(sys_fchmodat
, 3)
2319 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2320 MIPS_SYS(sys_pselect6
, 6)
2321 MIPS_SYS(sys_ppoll
, 5)
2322 MIPS_SYS(sys_unshare
, 1)
2323 MIPS_SYS(sys_splice
, 6)
2324 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2325 MIPS_SYS(sys_tee
, 4)
2326 MIPS_SYS(sys_vmsplice
, 4)
2327 MIPS_SYS(sys_move_pages
, 6)
2328 MIPS_SYS(sys_set_robust_list
, 2)
2329 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2330 MIPS_SYS(sys_kexec_load
, 4)
2331 MIPS_SYS(sys_getcpu
, 3)
2332 MIPS_SYS(sys_epoll_pwait
, 6)
2333 MIPS_SYS(sys_ioprio_set
, 3)
2334 MIPS_SYS(sys_ioprio_get
, 2)
2335 MIPS_SYS(sys_utimensat
, 4)
2336 MIPS_SYS(sys_signalfd
, 3)
2337 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2338 MIPS_SYS(sys_eventfd
, 1)
2339 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2340 MIPS_SYS(sys_timerfd_create
, 2)
2341 MIPS_SYS(sys_timerfd_gettime
, 2)
2342 MIPS_SYS(sys_timerfd_settime
, 4)
2343 MIPS_SYS(sys_signalfd4
, 4)
2344 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2345 MIPS_SYS(sys_epoll_create1
, 1)
2346 MIPS_SYS(sys_dup3
, 3)
2347 MIPS_SYS(sys_pipe2
, 2)
2348 MIPS_SYS(sys_inotify_init1
, 1)
2349 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2350 MIPS_SYS(sys_pwritev
, 6)
2351 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2352 MIPS_SYS(sys_perf_event_open
, 5)
2353 MIPS_SYS(sys_accept4
, 4)
2354 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2355 MIPS_SYS(sys_fanotify_init
, 2)
2356 MIPS_SYS(sys_fanotify_mark
, 6)
2357 MIPS_SYS(sys_prlimit64
, 4)
2358 MIPS_SYS(sys_name_to_handle_at
, 5)
2359 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2360 MIPS_SYS(sys_clock_adjtime
, 2)
2361 MIPS_SYS(sys_syncfs
, 1)
2366 static int do_store_exclusive(CPUMIPSState
*env
)
2369 target_ulong page_addr
;
2377 page_addr
= addr
& TARGET_PAGE_MASK
;
2380 flags
= page_get_flags(page_addr
);
2381 if ((flags
& PAGE_READ
) == 0) {
2384 reg
= env
->llreg
& 0x1f;
2385 d
= (env
->llreg
& 0x20) != 0;
2387 segv
= get_user_s64(val
, addr
);
2389 segv
= get_user_s32(val
, addr
);
2392 if (val
!= env
->llval
) {
2393 env
->active_tc
.gpr
[reg
] = 0;
2396 segv
= put_user_u64(env
->llnewval
, addr
);
2398 segv
= put_user_u32(env
->llnewval
, addr
);
2401 env
->active_tc
.gpr
[reg
] = 1;
2408 env
->active_tc
.PC
+= 4;
2421 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2429 info
->si_signo
= TARGET_SIGFPE
;
2431 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2432 queue_signal(env
, info
->si_signo
, &*info
);
2436 info
->si_signo
= TARGET_SIGTRAP
;
2438 queue_signal(env
, info
->si_signo
, &*info
);
2446 void cpu_loop(CPUMIPSState
*env
)
2448 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2449 target_siginfo_t info
;
2452 # ifdef TARGET_ABI_MIPSO32
2453 unsigned int syscall_num
;
2458 trapnr
= cpu_mips_exec(cs
);
2462 env
->active_tc
.PC
+= 4;
2463 # ifdef TARGET_ABI_MIPSO32
2464 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2465 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2466 ret
= -TARGET_ENOSYS
;
2470 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2472 nb_args
= mips_syscall_args
[syscall_num
];
2473 sp_reg
= env
->active_tc
.gpr
[29];
2475 /* these arguments are taken from the stack */
2477 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2481 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2485 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2489 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2495 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2496 env
->active_tc
.gpr
[4],
2497 env
->active_tc
.gpr
[5],
2498 env
->active_tc
.gpr
[6],
2499 env
->active_tc
.gpr
[7],
2500 arg5
, arg6
, arg7
, arg8
);
2504 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2505 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2506 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2507 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2508 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2510 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2511 /* Returning from a successful sigreturn syscall.
2512 Avoid clobbering register state. */
2515 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2516 env
->active_tc
.gpr
[7] = 1; /* error flag */
2519 env
->active_tc
.gpr
[7] = 0; /* error flag */
2521 env
->active_tc
.gpr
[2] = ret
;
2527 info
.si_signo
= TARGET_SIGSEGV
;
2529 /* XXX: check env->error_code */
2530 info
.si_code
= TARGET_SEGV_MAPERR
;
2531 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2532 queue_signal(env
, info
.si_signo
, &info
);
2536 info
.si_signo
= TARGET_SIGILL
;
2539 queue_signal(env
, info
.si_signo
, &info
);
2541 case EXCP_INTERRUPT
:
2542 /* just indicate that signals should be handled asap */
2548 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2551 info
.si_signo
= sig
;
2553 info
.si_code
= TARGET_TRAP_BRKPT
;
2554 queue_signal(env
, info
.si_signo
, &info
);
2559 if (do_store_exclusive(env
)) {
2560 info
.si_signo
= TARGET_SIGSEGV
;
2562 info
.si_code
= TARGET_SEGV_MAPERR
;
2563 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2564 queue_signal(env
, info
.si_signo
, &info
);
2568 info
.si_signo
= TARGET_SIGILL
;
2570 info
.si_code
= TARGET_ILL_ILLOPC
;
2571 queue_signal(env
, info
.si_signo
, &info
);
2573 /* The code below was inspired by the MIPS Linux kernel trap
2574 * handling code in arch/mips/kernel/traps.c.
2578 abi_ulong trap_instr
;
2581 if (env
->hflags
& MIPS_HFLAG_M16
) {
2582 if (env
->insn_flags
& ASE_MICROMIPS
) {
2583 /* microMIPS mode */
2584 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2589 if ((trap_instr
>> 10) == 0x11) {
2590 /* 16-bit instruction */
2591 code
= trap_instr
& 0xf;
2593 /* 32-bit instruction */
2596 ret
= get_user_u16(instr_lo
,
2597 env
->active_tc
.PC
+ 2);
2601 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2602 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2603 /* Unfortunately, microMIPS also suffers from
2604 the old assembler bug... */
2605 if (code
>= (1 << 10)) {
2611 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2615 code
= (trap_instr
>> 6) & 0x3f;
2618 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2623 /* As described in the original Linux kernel code, the
2624 * below checks on 'code' are to work around an old
2627 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2628 if (code
>= (1 << 10)) {
2633 if (do_break(env
, &info
, code
) != 0) {
2640 abi_ulong trap_instr
;
2641 unsigned int code
= 0;
2643 if (env
->hflags
& MIPS_HFLAG_M16
) {
2644 /* microMIPS mode */
2647 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2648 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2650 trap_instr
= (instr
[0] << 16) | instr
[1];
2652 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2659 /* The immediate versions don't provide a code. */
2660 if (!(trap_instr
& 0xFC000000)) {
2661 if (env
->hflags
& MIPS_HFLAG_M16
) {
2662 /* microMIPS mode */
2663 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2665 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2669 if (do_break(env
, &info
, code
) != 0) {
2676 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2678 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2681 process_pending_signals(env
);
2686 #ifdef TARGET_OPENRISC
2688 void cpu_loop(CPUOpenRISCState
*env
)
2690 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2695 trapnr
= cpu_openrisc_exec(cs
);
2701 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2705 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2706 gdbsig
= TARGET_SIGBUS
;
2710 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2711 gdbsig
= TARGET_SIGSEGV
;
2714 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2717 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2718 gdbsig
= TARGET_SIGBUS
;
2721 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2722 gdbsig
= TARGET_SIGILL
;
2725 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2729 qemu_log("\nTLB miss\n");
2732 qemu_log("\nRange\n");
2733 gdbsig
= TARGET_SIGSEGV
;
2736 env
->pc
+= 4; /* 0xc00; */
2737 env
->gpr
[11] = do_syscall(env
,
2738 env
->gpr
[11], /* return value */
2739 env
->gpr
[3], /* r3 - r7 are params */
2747 qemu_log("\nFloating point error\n");
2750 qemu_log("\nTrap\n");
2751 gdbsig
= TARGET_SIGTRAP
;
2757 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2759 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2760 gdbsig
= TARGET_SIGILL
;
2764 gdb_handlesig(cs
, gdbsig
);
2765 if (gdbsig
!= TARGET_SIGTRAP
) {
2770 process_pending_signals(env
);
2774 #endif /* TARGET_OPENRISC */
2777 void cpu_loop(CPUSH4State
*env
)
2779 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2781 target_siginfo_t info
;
2785 trapnr
= cpu_sh4_exec(cs
);
2791 ret
= do_syscall(env
,
2800 env
->gregs
[0] = ret
;
2802 case EXCP_INTERRUPT
:
2803 /* just indicate that signals should be handled asap */
2809 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2812 info
.si_signo
= sig
;
2814 info
.si_code
= TARGET_TRAP_BRKPT
;
2815 queue_signal(env
, info
.si_signo
, &info
);
2821 info
.si_signo
= TARGET_SIGSEGV
;
2823 info
.si_code
= TARGET_SEGV_MAPERR
;
2824 info
._sifields
._sigfault
._addr
= env
->tea
;
2825 queue_signal(env
, info
.si_signo
, &info
);
2829 printf ("Unhandled trap: 0x%x\n", trapnr
);
2830 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2833 process_pending_signals (env
);
2839 void cpu_loop(CPUCRISState
*env
)
2841 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2843 target_siginfo_t info
;
2847 trapnr
= cpu_cris_exec(cs
);
2852 info
.si_signo
= TARGET_SIGSEGV
;
2854 /* XXX: check env->error_code */
2855 info
.si_code
= TARGET_SEGV_MAPERR
;
2856 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2857 queue_signal(env
, info
.si_signo
, &info
);
2860 case EXCP_INTERRUPT
:
2861 /* just indicate that signals should be handled asap */
2864 ret
= do_syscall(env
,
2873 env
->regs
[10] = ret
;
2879 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2882 info
.si_signo
= sig
;
2884 info
.si_code
= TARGET_TRAP_BRKPT
;
2885 queue_signal(env
, info
.si_signo
, &info
);
2890 printf ("Unhandled trap: 0x%x\n", trapnr
);
2891 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2894 process_pending_signals (env
);
2899 #ifdef TARGET_MICROBLAZE
2900 void cpu_loop(CPUMBState
*env
)
2902 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2904 target_siginfo_t info
;
2908 trapnr
= cpu_mb_exec(cs
);
2913 info
.si_signo
= TARGET_SIGSEGV
;
2915 /* XXX: check env->error_code */
2916 info
.si_code
= TARGET_SEGV_MAPERR
;
2917 info
._sifields
._sigfault
._addr
= 0;
2918 queue_signal(env
, info
.si_signo
, &info
);
2921 case EXCP_INTERRUPT
:
2922 /* just indicate that signals should be handled asap */
2925 /* Return address is 4 bytes after the call. */
2927 env
->sregs
[SR_PC
] = env
->regs
[14];
2928 ret
= do_syscall(env
,
2940 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2941 if (env
->iflags
& D_FLAG
) {
2942 env
->sregs
[SR_ESR
] |= 1 << 12;
2943 env
->sregs
[SR_PC
] -= 4;
2944 /* FIXME: if branch was immed, replay the imm as well. */
2947 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2949 switch (env
->sregs
[SR_ESR
] & 31) {
2950 case ESR_EC_DIVZERO
:
2951 info
.si_signo
= TARGET_SIGFPE
;
2953 info
.si_code
= TARGET_FPE_FLTDIV
;
2954 info
._sifields
._sigfault
._addr
= 0;
2955 queue_signal(env
, info
.si_signo
, &info
);
2958 info
.si_signo
= TARGET_SIGFPE
;
2960 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2961 info
.si_code
= TARGET_FPE_FLTINV
;
2963 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2964 info
.si_code
= TARGET_FPE_FLTDIV
;
2966 info
._sifields
._sigfault
._addr
= 0;
2967 queue_signal(env
, info
.si_signo
, &info
);
2970 printf ("Unhandled hw-exception: 0x%x\n",
2971 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2972 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2981 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2984 info
.si_signo
= sig
;
2986 info
.si_code
= TARGET_TRAP_BRKPT
;
2987 queue_signal(env
, info
.si_signo
, &info
);
2992 printf ("Unhandled trap: 0x%x\n", trapnr
);
2993 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2996 process_pending_signals (env
);
3003 void cpu_loop(CPUM68KState
*env
)
3005 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3008 target_siginfo_t info
;
3009 TaskState
*ts
= cs
->opaque
;
3013 trapnr
= cpu_m68k_exec(cs
);
3018 if (ts
->sim_syscalls
) {
3020 get_user_u16(nr
, env
->pc
+ 2);
3022 do_m68k_simcall(env
, nr
);
3028 case EXCP_HALT_INSN
:
3029 /* Semihosing syscall. */
3031 do_m68k_semihosting(env
, env
->dregs
[0]);
3035 case EXCP_UNSUPPORTED
:
3037 info
.si_signo
= TARGET_SIGILL
;
3039 info
.si_code
= TARGET_ILL_ILLOPN
;
3040 info
._sifields
._sigfault
._addr
= env
->pc
;
3041 queue_signal(env
, info
.si_signo
, &info
);
3045 ts
->sim_syscalls
= 0;
3048 env
->dregs
[0] = do_syscall(env
,
3059 case EXCP_INTERRUPT
:
3060 /* just indicate that signals should be handled asap */
3064 info
.si_signo
= TARGET_SIGSEGV
;
3066 /* XXX: check env->error_code */
3067 info
.si_code
= TARGET_SEGV_MAPERR
;
3068 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3069 queue_signal(env
, info
.si_signo
, &info
);
3076 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3079 info
.si_signo
= sig
;
3081 info
.si_code
= TARGET_TRAP_BRKPT
;
3082 queue_signal(env
, info
.si_signo
, &info
);
3087 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3089 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3092 process_pending_signals(env
);
3095 #endif /* TARGET_M68K */
3098 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3100 target_ulong addr
, val
, tmp
;
3101 target_siginfo_t info
;
3104 addr
= env
->lock_addr
;
3105 tmp
= env
->lock_st_addr
;
3106 env
->lock_addr
= -1;
3107 env
->lock_st_addr
= 0;
3113 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3117 if (val
== env
->lock_value
) {
3119 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3136 info
.si_signo
= TARGET_SIGSEGV
;
3138 info
.si_code
= TARGET_SEGV_MAPERR
;
3139 info
._sifields
._sigfault
._addr
= addr
;
3140 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3143 void cpu_loop(CPUAlphaState
*env
)
3145 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3147 target_siginfo_t info
;
3152 trapnr
= cpu_alpha_exec(cs
);
3155 /* All of the traps imply a transition through PALcode, which
3156 implies an REI instruction has been executed. Which means
3157 that the intr_flag should be cleared. */
3162 fprintf(stderr
, "Reset requested. Exit\n");
3166 fprintf(stderr
, "Machine check exception. Exit\n");
3169 case EXCP_SMP_INTERRUPT
:
3170 case EXCP_CLK_INTERRUPT
:
3171 case EXCP_DEV_INTERRUPT
:
3172 fprintf(stderr
, "External interrupt. Exit\n");
3176 env
->lock_addr
= -1;
3177 info
.si_signo
= TARGET_SIGSEGV
;
3179 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3180 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3181 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3182 queue_signal(env
, info
.si_signo
, &info
);
3185 env
->lock_addr
= -1;
3186 info
.si_signo
= TARGET_SIGBUS
;
3188 info
.si_code
= TARGET_BUS_ADRALN
;
3189 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3190 queue_signal(env
, info
.si_signo
, &info
);
3194 env
->lock_addr
= -1;
3195 info
.si_signo
= TARGET_SIGILL
;
3197 info
.si_code
= TARGET_ILL_ILLOPC
;
3198 info
._sifields
._sigfault
._addr
= env
->pc
;
3199 queue_signal(env
, info
.si_signo
, &info
);
3202 env
->lock_addr
= -1;
3203 info
.si_signo
= TARGET_SIGFPE
;
3205 info
.si_code
= TARGET_FPE_FLTINV
;
3206 info
._sifields
._sigfault
._addr
= env
->pc
;
3207 queue_signal(env
, info
.si_signo
, &info
);
3210 /* No-op. Linux simply re-enables the FPU. */
3213 env
->lock_addr
= -1;
3214 switch (env
->error_code
) {
3217 info
.si_signo
= TARGET_SIGTRAP
;
3219 info
.si_code
= TARGET_TRAP_BRKPT
;
3220 info
._sifields
._sigfault
._addr
= env
->pc
;
3221 queue_signal(env
, info
.si_signo
, &info
);
3225 info
.si_signo
= TARGET_SIGTRAP
;
3228 info
._sifields
._sigfault
._addr
= env
->pc
;
3229 queue_signal(env
, info
.si_signo
, &info
);
3233 trapnr
= env
->ir
[IR_V0
];
3234 sysret
= do_syscall(env
, trapnr
,
3235 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3236 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3237 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3239 if (trapnr
== TARGET_NR_sigreturn
3240 || trapnr
== TARGET_NR_rt_sigreturn
) {
3243 /* Syscall writes 0 to V0 to bypass error check, similar
3244 to how this is handled internal to Linux kernel.
3245 (Ab)use trapnr temporarily as boolean indicating error. */
3246 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3247 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3248 env
->ir
[IR_A3
] = trapnr
;
3252 /* ??? We can probably elide the code using page_unprotect
3253 that is checking for self-modifying code. Instead we
3254 could simply call tb_flush here. Until we work out the
3255 changes required to turn off the extra write protection,
3256 this can be a no-op. */
3260 /* Handled in the translator for usermode. */
3264 /* Handled in the translator for usermode. */
3268 info
.si_signo
= TARGET_SIGFPE
;
3269 switch (env
->ir
[IR_A0
]) {
3270 case TARGET_GEN_INTOVF
:
3271 info
.si_code
= TARGET_FPE_INTOVF
;
3273 case TARGET_GEN_INTDIV
:
3274 info
.si_code
= TARGET_FPE_INTDIV
;
3276 case TARGET_GEN_FLTOVF
:
3277 info
.si_code
= TARGET_FPE_FLTOVF
;
3279 case TARGET_GEN_FLTUND
:
3280 info
.si_code
= TARGET_FPE_FLTUND
;
3282 case TARGET_GEN_FLTINV
:
3283 info
.si_code
= TARGET_FPE_FLTINV
;
3285 case TARGET_GEN_FLTINE
:
3286 info
.si_code
= TARGET_FPE_FLTRES
;
3288 case TARGET_GEN_ROPRAND
:
3292 info
.si_signo
= TARGET_SIGTRAP
;
3297 info
._sifields
._sigfault
._addr
= env
->pc
;
3298 queue_signal(env
, info
.si_signo
, &info
);
3305 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3306 if (info
.si_signo
) {
3307 env
->lock_addr
= -1;
3309 info
.si_code
= TARGET_TRAP_BRKPT
;
3310 queue_signal(env
, info
.si_signo
, &info
);
3315 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3317 case EXCP_INTERRUPT
:
3318 /* Just indicate that signals should be handled asap. */
3321 printf ("Unhandled trap: 0x%x\n", trapnr
);
3322 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3325 process_pending_signals (env
);
3328 #endif /* TARGET_ALPHA */
3331 void cpu_loop(CPUS390XState
*env
)
3333 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3335 target_siginfo_t info
;
3340 trapnr
= cpu_s390x_exec(cs
);
3343 case EXCP_INTERRUPT
:
3344 /* Just indicate that signals should be handled asap. */
3348 n
= env
->int_svc_code
;
3350 /* syscalls > 255 */
3353 env
->psw
.addr
+= env
->int_svc_ilen
;
3354 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3355 env
->regs
[4], env
->regs
[5],
3356 env
->regs
[6], env
->regs
[7], 0, 0);
3360 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3362 n
= TARGET_TRAP_BRKPT
;
3367 n
= env
->int_pgm_code
;
3370 case PGM_PRIVILEGED
:
3371 sig
= TARGET_SIGILL
;
3372 n
= TARGET_ILL_ILLOPC
;
3374 case PGM_PROTECTION
:
3375 case PGM_ADDRESSING
:
3376 sig
= TARGET_SIGSEGV
;
3377 /* XXX: check env->error_code */
3378 n
= TARGET_SEGV_MAPERR
;
3379 addr
= env
->__excp_addr
;
3382 case PGM_SPECIFICATION
:
3383 case PGM_SPECIAL_OP
:
3386 sig
= TARGET_SIGILL
;
3387 n
= TARGET_ILL_ILLOPN
;
3390 case PGM_FIXPT_OVERFLOW
:
3391 sig
= TARGET_SIGFPE
;
3392 n
= TARGET_FPE_INTOVF
;
3394 case PGM_FIXPT_DIVIDE
:
3395 sig
= TARGET_SIGFPE
;
3396 n
= TARGET_FPE_INTDIV
;
3400 n
= (env
->fpc
>> 8) & 0xff;
3402 /* compare-and-trap */
3405 /* An IEEE exception, simulated or otherwise. */
3407 n
= TARGET_FPE_FLTINV
;
3408 } else if (n
& 0x40) {
3409 n
= TARGET_FPE_FLTDIV
;
3410 } else if (n
& 0x20) {
3411 n
= TARGET_FPE_FLTOVF
;
3412 } else if (n
& 0x10) {
3413 n
= TARGET_FPE_FLTUND
;
3414 } else if (n
& 0x08) {
3415 n
= TARGET_FPE_FLTRES
;
3417 /* ??? Quantum exception; BFP, DFP error. */
3420 sig
= TARGET_SIGFPE
;
3425 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3426 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3432 addr
= env
->psw
.addr
;
3434 info
.si_signo
= sig
;
3437 info
._sifields
._sigfault
._addr
= addr
;
3438 queue_signal(env
, info
.si_signo
, &info
);
3442 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3443 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3446 process_pending_signals (env
);
3450 #endif /* TARGET_S390X */
3452 THREAD CPUState
*thread_cpu
;
3454 void task_settid(TaskState
*ts
)
3456 if (ts
->ts_tid
== 0) {
3457 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3461 void stop_all_tasks(void)
3464 * We trust that when using NPTL, start_exclusive()
3465 * handles thread stopping correctly.
3470 /* Assumes contents are already zeroed. */
3471 void init_task_state(TaskState
*ts
)
3476 ts
->first_free
= ts
->sigqueue_table
;
3477 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3478 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3480 ts
->sigqueue_table
[i
].next
= NULL
;
3483 CPUArchState
*cpu_copy(CPUArchState
*env
)
3485 CPUState
*cpu
= ENV_GET_CPU(env
);
3486 CPUState
*new_cpu
= cpu_init(cpu_model
);
3487 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3491 /* Reset non arch specific state */
3494 memcpy(new_env
, env
, sizeof(CPUArchState
));
3496 /* Clone all break/watchpoints.
3497 Note: Once we support ptrace with hw-debug register access, make sure
3498 BP_CPU break/watchpoints are handled correctly on clone. */
3499 QTAILQ_INIT(&new_cpu
->breakpoints
);
3500 QTAILQ_INIT(&new_cpu
->watchpoints
);
3501 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3502 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3504 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3505 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3511 static void handle_arg_help(const char *arg
)
3516 static void handle_arg_log(const char *arg
)
3520 mask
= qemu_str_to_log_mask(arg
);
3522 qemu_print_log_usage(stdout
);
3528 static void handle_arg_log_filename(const char *arg
)
3530 qemu_set_log_filename(arg
);
3533 static void handle_arg_set_env(const char *arg
)
3535 char *r
, *p
, *token
;
3536 r
= p
= strdup(arg
);
3537 while ((token
= strsep(&p
, ",")) != NULL
) {
3538 if (envlist_setenv(envlist
, token
) != 0) {
3545 static void handle_arg_unset_env(const char *arg
)
3547 char *r
, *p
, *token
;
3548 r
= p
= strdup(arg
);
3549 while ((token
= strsep(&p
, ",")) != NULL
) {
3550 if (envlist_unsetenv(envlist
, token
) != 0) {
3557 static void handle_arg_argv0(const char *arg
)
3559 argv0
= strdup(arg
);
3562 static void handle_arg_stack_size(const char *arg
)
3565 guest_stack_size
= strtoul(arg
, &p
, 0);
3566 if (guest_stack_size
== 0) {
3571 guest_stack_size
*= 1024 * 1024;
3572 } else if (*p
== 'k' || *p
== 'K') {
3573 guest_stack_size
*= 1024;
3577 static void handle_arg_ld_prefix(const char *arg
)
3579 interp_prefix
= strdup(arg
);
3582 static void handle_arg_pagesize(const char *arg
)
3584 qemu_host_page_size
= atoi(arg
);
3585 if (qemu_host_page_size
== 0 ||
3586 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3587 fprintf(stderr
, "page size must be a power of two\n");
3592 static void handle_arg_randseed(const char *arg
)
3594 unsigned long long seed
;
3596 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3597 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3603 static void handle_arg_gdb(const char *arg
)
3605 gdbstub_port
= atoi(arg
);
3608 static void handle_arg_uname(const char *arg
)
3610 qemu_uname_release
= strdup(arg
);
3613 static void handle_arg_cpu(const char *arg
)
3615 cpu_model
= strdup(arg
);
3616 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3617 /* XXX: implement xxx_cpu_list for targets that still miss it */
3618 #if defined(cpu_list_id)
3619 cpu_list_id(stdout
, &fprintf
, "");
3620 #elif defined(cpu_list)
3621 cpu_list(stdout
, &fprintf
); /* deprecated */
3623 /* TODO: add cpu selection for alpha, microblaze, unicore32, s390x. */
3624 printf("Target ignores cpu selection\n");
3630 static void handle_arg_guest_base(const char *arg
)
3632 guest_base
= strtol(arg
, NULL
, 0);
3633 have_guest_base
= 1;
3636 static void handle_arg_reserved_va(const char *arg
)
3640 reserved_va
= strtoul(arg
, &p
, 0);
3654 unsigned long unshifted
= reserved_va
;
3656 reserved_va
<<= shift
;
3657 if (((reserved_va
>> shift
) != unshifted
)
3658 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3659 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3662 fprintf(stderr
, "Reserved virtual address too big\n");
3667 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3672 static void handle_arg_singlestep(const char *arg
)
3677 static void handle_arg_strace(const char *arg
)
3682 static void handle_arg_version(const char *arg
)
3684 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3685 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3689 struct qemu_argument
{
3693 void (*handle_opt
)(const char *arg
);
3694 const char *example
;
3698 static const struct qemu_argument arg_table
[] = {
3699 {"h", "", false, handle_arg_help
,
3700 "", "print this help"},
3701 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3702 "port", "wait gdb connection to 'port'"},
3703 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3704 "path", "set the elf interpreter prefix to 'path'"},
3705 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3706 "size", "set the stack size to 'size' bytes"},
3707 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3708 "model", "select CPU (-cpu help for list)"},
3709 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3710 "var=value", "sets targets environment variable (see below)"},
3711 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3712 "var", "unsets targets environment variable (see below)"},
3713 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3714 "argv0", "forces target process argv[0] to be 'argv0'"},
3715 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3716 "uname", "set qemu uname release string to 'uname'"},
3717 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3718 "address", "set guest_base address to 'address'"},
3719 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3720 "size", "reserve 'size' bytes for guest virtual address space"},
3721 {"d", "QEMU_LOG", true, handle_arg_log
,
3722 "item[,...]", "enable logging of specified items "
3723 "(use '-d help' for a list of items)"},
3724 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3725 "logfile", "write logs to 'logfile' (default stderr)"},
3726 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3727 "pagesize", "set the host page size to 'pagesize'"},
3728 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3729 "", "run in singlestep mode"},
3730 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3731 "", "log system calls"},
3732 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3733 "", "Seed for pseudo-random number generator"},
3734 {"version", "QEMU_VERSION", false, handle_arg_version
,
3735 "", "display version information and exit"},
3736 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3739 static void QEMU_NORETURN
usage(void)
3741 const struct qemu_argument
*arginfo
;
3745 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3746 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3748 "Options and associated environment variables:\n"
3751 /* Calculate column widths. We must always have at least enough space
3752 * for the column header.
3754 maxarglen
= strlen("Argument");
3755 maxenvlen
= strlen("Env-variable");
3757 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3758 int arglen
= strlen(arginfo
->argv
);
3759 if (arginfo
->has_arg
) {
3760 arglen
+= strlen(arginfo
->example
) + 1;
3762 if (strlen(arginfo
->env
) > maxenvlen
) {
3763 maxenvlen
= strlen(arginfo
->env
);
3765 if (arglen
> maxarglen
) {
3770 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3771 maxenvlen
, "Env-variable");
3773 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3774 if (arginfo
->has_arg
) {
3775 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3776 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3777 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3779 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3780 maxenvlen
, arginfo
->env
,
3787 "QEMU_LD_PREFIX = %s\n"
3788 "QEMU_STACK_SIZE = %ld byte\n",
3793 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3794 "QEMU_UNSET_ENV environment variables to set and unset\n"
3795 "environment variables for the target process.\n"
3796 "It is possible to provide several variables by separating them\n"
3797 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3798 "provide the -E and -U options multiple times.\n"
3799 "The following lines are equivalent:\n"
3800 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3801 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3802 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3803 "Note that if you provide several changes to a single variable\n"
3804 "the last change will stay in effect.\n");
3809 static int parse_args(int argc
, char **argv
)
3813 const struct qemu_argument
*arginfo
;
3815 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3816 if (arginfo
->env
== NULL
) {
3820 r
= getenv(arginfo
->env
);
3822 arginfo
->handle_opt(r
);
3828 if (optind
>= argc
) {
3837 if (!strcmp(r
, "-")) {
3841 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3842 if (!strcmp(r
, arginfo
->argv
)) {
3843 if (arginfo
->has_arg
) {
3844 if (optind
>= argc
) {
3847 arginfo
->handle_opt(argv
[optind
]);
3850 arginfo
->handle_opt(NULL
);
3856 /* no option matched the current argv */
3857 if (arginfo
->handle_opt
== NULL
) {
3862 if (optind
>= argc
) {
3866 filename
= argv
[optind
];
3867 exec_path
= argv
[optind
];
3872 int main(int argc
, char **argv
)
3874 struct target_pt_regs regs1
, *regs
= ®s1
;
3875 struct image_info info1
, *info
= &info1
;
3876 struct linux_binprm bprm
;
3881 char **target_environ
, **wrk
;
3888 module_call_init(MODULE_INIT_QOM
);
3890 if ((envlist
= envlist_create()) == NULL
) {
3891 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3895 /* add current environment into the list */
3896 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3897 (void) envlist_setenv(envlist
, *wrk
);
3900 /* Read the stack limit from the kernel. If it's "unlimited",
3901 then we can do little else besides use the default. */
3904 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3905 && lim
.rlim_cur
!= RLIM_INFINITY
3906 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3907 guest_stack_size
= lim
.rlim_cur
;
3912 #if defined(cpudef_setup)
3913 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3918 optind
= parse_args(argc
, argv
);
3921 memset(regs
, 0, sizeof(struct target_pt_regs
));
3923 /* Zero out image_info */
3924 memset(info
, 0, sizeof(struct image_info
));
3926 memset(&bprm
, 0, sizeof (bprm
));
3928 /* Scan interp_prefix dir for replacement files. */
3929 init_paths(interp_prefix
);
3931 init_qemu_uname_release();
3933 if (cpu_model
== NULL
) {
3934 #if defined(TARGET_I386)
3935 #ifdef TARGET_X86_64
3936 cpu_model
= "qemu64";
3938 cpu_model
= "qemu32";
3940 #elif defined(TARGET_ARM)
3942 #elif defined(TARGET_UNICORE32)
3944 #elif defined(TARGET_M68K)
3946 #elif defined(TARGET_SPARC)
3947 #ifdef TARGET_SPARC64
3948 cpu_model
= "TI UltraSparc II";
3950 cpu_model
= "Fujitsu MB86904";
3952 #elif defined(TARGET_MIPS)
3953 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3958 #elif defined TARGET_OPENRISC
3959 cpu_model
= "or1200";
3960 #elif defined(TARGET_PPC)
3961 # ifdef TARGET_PPC64
3962 cpu_model
= "POWER7";
3966 #elif defined TARGET_SH4
3967 cpu_model
= TYPE_SH7785_CPU
;
3973 /* NOTE: we need to init the CPU at this stage to get
3974 qemu_host_page_size */
3975 cpu
= cpu_init(cpu_model
);
3977 fprintf(stderr
, "Unable to find CPU definition\n");
3985 if (getenv("QEMU_STRACE")) {
3989 if (getenv("QEMU_RAND_SEED")) {
3990 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
3993 target_environ
= envlist_to_environ(envlist
, NULL
);
3994 envlist_free(envlist
);
3997 * Now that page sizes are configured in cpu_init() we can do
3998 * proper page alignment for guest_base.
4000 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4002 if (reserved_va
|| have_guest_base
) {
4003 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4005 if (guest_base
== (unsigned long)-1) {
4006 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4007 "space for use as guest address space (check your virtual "
4008 "memory ulimit setting or reserve less using -R option)\n",
4014 mmap_next_start
= reserved_va
;
4019 * Read in mmap_min_addr kernel parameter. This value is used
4020 * When loading the ELF image to determine whether guest_base
4021 * is needed. It is also used in mmap_find_vma.
4026 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4028 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4029 mmap_min_addr
= tmp
;
4030 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4037 * Prepare copy of argv vector for target.
4039 target_argc
= argc
- optind
;
4040 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4041 if (target_argv
== NULL
) {
4042 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4047 * If argv0 is specified (using '-0' switch) we replace
4048 * argv[0] pointer with the given one.
4051 if (argv0
!= NULL
) {
4052 target_argv
[i
++] = strdup(argv0
);
4054 for (; i
< target_argc
; i
++) {
4055 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4057 target_argv
[target_argc
] = NULL
;
4059 ts
= g_malloc0 (sizeof(TaskState
));
4060 init_task_state(ts
);
4061 /* build Task State */
4067 execfd
= qemu_getauxval(AT_EXECFD
);
4069 execfd
= open(filename
, O_RDONLY
);
4071 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4076 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4079 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4083 for (wrk
= target_environ
; *wrk
; wrk
++) {
4087 free(target_environ
);
4089 if (qemu_log_enabled()) {
4090 qemu_log("guest_base 0x%" PRIxPTR
"\n", guest_base
);
4093 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4094 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4095 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4097 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4099 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4100 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4102 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4103 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4106 target_set_brk(info
->brk
);
4110 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4111 generating the prologue until now so that the prologue can take
4112 the real value of GUEST_BASE into account. */
4113 tcg_prologue_init(&tcg_ctx
);
4115 #if defined(TARGET_I386)
4116 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4117 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4118 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4119 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4120 env
->hflags
|= HF_OSFXSR_MASK
;
4122 #ifndef TARGET_ABI32
4123 /* enable 64 bit mode if possible */
4124 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4125 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4128 env
->cr
[4] |= CR4_PAE_MASK
;
4129 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4130 env
->hflags
|= HF_LMA_MASK
;
4133 /* flags setup : we activate the IRQs by default as in user mode */
4134 env
->eflags
|= IF_MASK
;
4136 /* linux register setup */
4137 #ifndef TARGET_ABI32
4138 env
->regs
[R_EAX
] = regs
->rax
;
4139 env
->regs
[R_EBX
] = regs
->rbx
;
4140 env
->regs
[R_ECX
] = regs
->rcx
;
4141 env
->regs
[R_EDX
] = regs
->rdx
;
4142 env
->regs
[R_ESI
] = regs
->rsi
;
4143 env
->regs
[R_EDI
] = regs
->rdi
;
4144 env
->regs
[R_EBP
] = regs
->rbp
;
4145 env
->regs
[R_ESP
] = regs
->rsp
;
4146 env
->eip
= regs
->rip
;
4148 env
->regs
[R_EAX
] = regs
->eax
;
4149 env
->regs
[R_EBX
] = regs
->ebx
;
4150 env
->regs
[R_ECX
] = regs
->ecx
;
4151 env
->regs
[R_EDX
] = regs
->edx
;
4152 env
->regs
[R_ESI
] = regs
->esi
;
4153 env
->regs
[R_EDI
] = regs
->edi
;
4154 env
->regs
[R_EBP
] = regs
->ebp
;
4155 env
->regs
[R_ESP
] = regs
->esp
;
4156 env
->eip
= regs
->eip
;
4159 /* linux interrupt setup */
4160 #ifndef TARGET_ABI32
4161 env
->idt
.limit
= 511;
4163 env
->idt
.limit
= 255;
4165 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4166 PROT_READ
|PROT_WRITE
,
4167 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4168 idt_table
= g2h(env
->idt
.base
);
4191 /* linux segment setup */
4193 uint64_t *gdt_table
;
4194 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4195 PROT_READ
|PROT_WRITE
,
4196 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4197 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4198 gdt_table
= g2h(env
->gdt
.base
);
4200 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4201 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4202 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4204 /* 64 bit code segment */
4205 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4206 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4208 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4210 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4211 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4212 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4214 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4215 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4217 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4218 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4219 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4220 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4221 /* This hack makes Wine work... */
4222 env
->segs
[R_FS
].selector
= 0;
4224 cpu_x86_load_seg(env
, R_DS
, 0);
4225 cpu_x86_load_seg(env
, R_ES
, 0);
4226 cpu_x86_load_seg(env
, R_FS
, 0);
4227 cpu_x86_load_seg(env
, R_GS
, 0);
4229 #elif defined(TARGET_AARCH64)
4233 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4235 "The selected ARM CPU does not support 64 bit mode\n");
4239 for (i
= 0; i
< 31; i
++) {
4240 env
->xregs
[i
] = regs
->regs
[i
];
4243 env
->xregs
[31] = regs
->sp
;
4245 #elif defined(TARGET_ARM)
4248 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4249 for(i
= 0; i
< 16; i
++) {
4250 env
->regs
[i
] = regs
->uregs
[i
];
4252 #ifdef TARGET_WORDS_BIGENDIAN
4254 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4255 && (info
->elf_flags
& EF_ARM_BE8
)) {
4256 env
->bswap_code
= 1;
4260 #elif defined(TARGET_UNICORE32)
4263 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4264 for (i
= 0; i
< 32; i
++) {
4265 env
->regs
[i
] = regs
->uregs
[i
];
4268 #elif defined(TARGET_SPARC)
4272 env
->npc
= regs
->npc
;
4274 for(i
= 0; i
< 8; i
++)
4275 env
->gregs
[i
] = regs
->u_regs
[i
];
4276 for(i
= 0; i
< 8; i
++)
4277 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4279 #elif defined(TARGET_PPC)
4283 #if defined(TARGET_PPC64)
4284 #if defined(TARGET_ABI32)
4285 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4287 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4290 env
->nip
= regs
->nip
;
4291 for(i
= 0; i
< 32; i
++) {
4292 env
->gpr
[i
] = regs
->gpr
[i
];
4295 #elif defined(TARGET_M68K)
4298 env
->dregs
[0] = regs
->d0
;
4299 env
->dregs
[1] = regs
->d1
;
4300 env
->dregs
[2] = regs
->d2
;
4301 env
->dregs
[3] = regs
->d3
;
4302 env
->dregs
[4] = regs
->d4
;
4303 env
->dregs
[5] = regs
->d5
;
4304 env
->dregs
[6] = regs
->d6
;
4305 env
->dregs
[7] = regs
->d7
;
4306 env
->aregs
[0] = regs
->a0
;
4307 env
->aregs
[1] = regs
->a1
;
4308 env
->aregs
[2] = regs
->a2
;
4309 env
->aregs
[3] = regs
->a3
;
4310 env
->aregs
[4] = regs
->a4
;
4311 env
->aregs
[5] = regs
->a5
;
4312 env
->aregs
[6] = regs
->a6
;
4313 env
->aregs
[7] = regs
->usp
;
4315 ts
->sim_syscalls
= 1;
4317 #elif defined(TARGET_MICROBLAZE)
4319 env
->regs
[0] = regs
->r0
;
4320 env
->regs
[1] = regs
->r1
;
4321 env
->regs
[2] = regs
->r2
;
4322 env
->regs
[3] = regs
->r3
;
4323 env
->regs
[4] = regs
->r4
;
4324 env
->regs
[5] = regs
->r5
;
4325 env
->regs
[6] = regs
->r6
;
4326 env
->regs
[7] = regs
->r7
;
4327 env
->regs
[8] = regs
->r8
;
4328 env
->regs
[9] = regs
->r9
;
4329 env
->regs
[10] = regs
->r10
;
4330 env
->regs
[11] = regs
->r11
;
4331 env
->regs
[12] = regs
->r12
;
4332 env
->regs
[13] = regs
->r13
;
4333 env
->regs
[14] = regs
->r14
;
4334 env
->regs
[15] = regs
->r15
;
4335 env
->regs
[16] = regs
->r16
;
4336 env
->regs
[17] = regs
->r17
;
4337 env
->regs
[18] = regs
->r18
;
4338 env
->regs
[19] = regs
->r19
;
4339 env
->regs
[20] = regs
->r20
;
4340 env
->regs
[21] = regs
->r21
;
4341 env
->regs
[22] = regs
->r22
;
4342 env
->regs
[23] = regs
->r23
;
4343 env
->regs
[24] = regs
->r24
;
4344 env
->regs
[25] = regs
->r25
;
4345 env
->regs
[26] = regs
->r26
;
4346 env
->regs
[27] = regs
->r27
;
4347 env
->regs
[28] = regs
->r28
;
4348 env
->regs
[29] = regs
->r29
;
4349 env
->regs
[30] = regs
->r30
;
4350 env
->regs
[31] = regs
->r31
;
4351 env
->sregs
[SR_PC
] = regs
->pc
;
4353 #elif defined(TARGET_MIPS)
4357 for(i
= 0; i
< 32; i
++) {
4358 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4360 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4361 if (regs
->cp0_epc
& 1) {
4362 env
->hflags
|= MIPS_HFLAG_M16
;
4365 #elif defined(TARGET_OPENRISC)
4369 for (i
= 0; i
< 32; i
++) {
4370 env
->gpr
[i
] = regs
->gpr
[i
];
4376 #elif defined(TARGET_SH4)
4380 for(i
= 0; i
< 16; i
++) {
4381 env
->gregs
[i
] = regs
->regs
[i
];
4385 #elif defined(TARGET_ALPHA)
4389 for(i
= 0; i
< 28; i
++) {
4390 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4392 env
->ir
[IR_SP
] = regs
->usp
;
4395 #elif defined(TARGET_CRIS)
4397 env
->regs
[0] = regs
->r0
;
4398 env
->regs
[1] = regs
->r1
;
4399 env
->regs
[2] = regs
->r2
;
4400 env
->regs
[3] = regs
->r3
;
4401 env
->regs
[4] = regs
->r4
;
4402 env
->regs
[5] = regs
->r5
;
4403 env
->regs
[6] = regs
->r6
;
4404 env
->regs
[7] = regs
->r7
;
4405 env
->regs
[8] = regs
->r8
;
4406 env
->regs
[9] = regs
->r9
;
4407 env
->regs
[10] = regs
->r10
;
4408 env
->regs
[11] = regs
->r11
;
4409 env
->regs
[12] = regs
->r12
;
4410 env
->regs
[13] = regs
->r13
;
4411 env
->regs
[14] = info
->start_stack
;
4412 env
->regs
[15] = regs
->acr
;
4413 env
->pc
= regs
->erp
;
4415 #elif defined(TARGET_S390X)
4418 for (i
= 0; i
< 16; i
++) {
4419 env
->regs
[i
] = regs
->gprs
[i
];
4421 env
->psw
.mask
= regs
->psw
.mask
;
4422 env
->psw
.addr
= regs
->psw
.addr
;
4425 #error unsupported target CPU
4428 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4429 ts
->stack_base
= info
->start_stack
;
4430 ts
->heap_base
= info
->brk
;
4431 /* This will be filled in on the first SYS_HEAPINFO call. */
4436 if (gdbserver_start(gdbstub_port
) < 0) {
4437 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4441 gdb_handlesig(cpu
, 0);