4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
40 static const char *filename
;
41 static const char *argv0
;
42 static int gdbstub_port
;
43 static envlist_t
*envlist
;
44 static const char *cpu_model
;
45 unsigned long mmap_min_addr
;
46 unsigned long guest_base
;
49 #define EXCP_DUMP(env, fmt, ...) \
51 CPUState *cs = ENV_GET_CPU(env); \
52 fprintf(stderr, fmt , ## __VA_ARGS__); \
53 cpu_dump_state(cs, stderr, fprintf, 0); \
54 if (qemu_log_separate()) { \
55 qemu_log(fmt, ## __VA_ARGS__); \
56 log_cpu_state(cs, 0); \
60 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
62 * When running 32-on-64 we should make sure we can fit all of the possible
63 * guest address space into a contiguous chunk of virtual host memory.
65 * This way we will never overlap with our own libraries or binaries or stack
66 * or anything else that QEMU maps.
69 /* MIPS only supports 31 bits of virtual address space for user space */
70 unsigned long reserved_va
= 0x77000000;
72 unsigned long reserved_va
= 0xf7000000;
75 unsigned long reserved_va
;
78 static void usage(int exitcode
);
80 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
81 const char *qemu_uname_release
;
83 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
84 we allocate a bigger stack. Need a better solution, for example
85 by remapping the process stack directly at the right place */
86 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
88 void gemu_log(const char *fmt
, ...)
93 vfprintf(stderr
, fmt
, ap
);
97 #if defined(TARGET_I386)
98 int cpu_get_pic_interrupt(CPUX86State
*env
)
104 /***********************************************************/
105 /* Helper routines for implementing atomic operations. */
107 /* To implement exclusive operations we force all cpus to syncronise.
108 We don't require a full sync, only that no cpus are executing guest code.
109 The alternative is to map target atomic ops onto host equivalents,
110 which requires quite a lot of per host/target work. */
111 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
112 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
113 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
114 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
115 static int pending_cpus
;
117 /* Make sure everything is in a consistent state for calling fork(). */
118 void fork_start(void)
120 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
121 pthread_mutex_lock(&exclusive_lock
);
125 void fork_end(int child
)
127 mmap_fork_end(child
);
129 CPUState
*cpu
, *next_cpu
;
130 /* Child processes created by fork() only have a single thread.
131 Discard information about the parent threads. */
132 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
133 if (cpu
!= thread_cpu
) {
134 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
138 pthread_mutex_init(&exclusive_lock
, NULL
);
139 pthread_mutex_init(&cpu_list_mutex
, NULL
);
140 pthread_cond_init(&exclusive_cond
, NULL
);
141 pthread_cond_init(&exclusive_resume
, NULL
);
142 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
143 gdbserver_fork(thread_cpu
);
145 pthread_mutex_unlock(&exclusive_lock
);
146 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
150 /* Wait for pending exclusive operations to complete. The exclusive lock
152 static inline void exclusive_idle(void)
154 while (pending_cpus
) {
155 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
159 /* Start an exclusive operation.
160 Must only be called from outside cpu_arm_exec. */
161 static inline void start_exclusive(void)
165 pthread_mutex_lock(&exclusive_lock
);
169 /* Make all other cpus stop executing. */
170 CPU_FOREACH(other_cpu
) {
171 if (other_cpu
->running
) {
176 if (pending_cpus
> 1) {
177 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
181 /* Finish an exclusive operation. */
182 static inline void __attribute__((unused
)) end_exclusive(void)
185 pthread_cond_broadcast(&exclusive_resume
);
186 pthread_mutex_unlock(&exclusive_lock
);
189 /* Wait for exclusive ops to finish, and begin cpu execution. */
190 static inline void cpu_exec_start(CPUState
*cpu
)
192 pthread_mutex_lock(&exclusive_lock
);
195 pthread_mutex_unlock(&exclusive_lock
);
198 /* Mark cpu as not executing, and release pending exclusive ops. */
199 static inline void cpu_exec_end(CPUState
*cpu
)
201 pthread_mutex_lock(&exclusive_lock
);
202 cpu
->running
= false;
203 if (pending_cpus
> 1) {
205 if (pending_cpus
== 1) {
206 pthread_cond_signal(&exclusive_cond
);
210 pthread_mutex_unlock(&exclusive_lock
);
213 void cpu_list_lock(void)
215 pthread_mutex_lock(&cpu_list_mutex
);
218 void cpu_list_unlock(void)
220 pthread_mutex_unlock(&cpu_list_mutex
);
225 /***********************************************************/
226 /* CPUX86 core interface */
228 uint64_t cpu_get_tsc(CPUX86State
*env
)
230 return cpu_get_host_ticks();
233 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
238 e1
= (addr
<< 16) | (limit
& 0xffff);
239 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
246 static uint64_t *idt_table
;
248 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
249 uint64_t addr
, unsigned int sel
)
252 e1
= (addr
& 0xffff) | (sel
<< 16);
253 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
257 p
[2] = tswap32(addr
>> 32);
260 /* only dpl matters as we do only user space emulation */
261 static void set_idt(int n
, unsigned int dpl
)
263 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
266 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
267 uint32_t addr
, unsigned int sel
)
270 e1
= (addr
& 0xffff) | (sel
<< 16);
271 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
277 /* only dpl matters as we do only user space emulation */
278 static void set_idt(int n
, unsigned int dpl
)
280 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
284 void cpu_loop(CPUX86State
*env
)
286 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
289 target_siginfo_t info
;
293 trapnr
= cpu_x86_exec(cs
);
297 /* linux syscall from int $0x80 */
298 env
->regs
[R_EAX
] = do_syscall(env
,
310 /* linux syscall from syscall instruction */
311 env
->regs
[R_EAX
] = do_syscall(env
,
324 info
.si_signo
= TARGET_SIGBUS
;
326 info
.si_code
= TARGET_SI_KERNEL
;
327 info
._sifields
._sigfault
._addr
= 0;
328 queue_signal(env
, info
.si_signo
, &info
);
331 /* XXX: potential problem if ABI32 */
332 #ifndef TARGET_X86_64
333 if (env
->eflags
& VM_MASK
) {
334 handle_vm86_fault(env
);
338 info
.si_signo
= TARGET_SIGSEGV
;
340 info
.si_code
= TARGET_SI_KERNEL
;
341 info
._sifields
._sigfault
._addr
= 0;
342 queue_signal(env
, info
.si_signo
, &info
);
346 info
.si_signo
= TARGET_SIGSEGV
;
348 if (!(env
->error_code
& 1))
349 info
.si_code
= TARGET_SEGV_MAPERR
;
351 info
.si_code
= TARGET_SEGV_ACCERR
;
352 info
._sifields
._sigfault
._addr
= env
->cr
[2];
353 queue_signal(env
, info
.si_signo
, &info
);
356 #ifndef TARGET_X86_64
357 if (env
->eflags
& VM_MASK
) {
358 handle_vm86_trap(env
, trapnr
);
362 /* division by zero */
363 info
.si_signo
= TARGET_SIGFPE
;
365 info
.si_code
= TARGET_FPE_INTDIV
;
366 info
._sifields
._sigfault
._addr
= env
->eip
;
367 queue_signal(env
, info
.si_signo
, &info
);
372 #ifndef TARGET_X86_64
373 if (env
->eflags
& VM_MASK
) {
374 handle_vm86_trap(env
, trapnr
);
378 info
.si_signo
= TARGET_SIGTRAP
;
380 if (trapnr
== EXCP01_DB
) {
381 info
.si_code
= TARGET_TRAP_BRKPT
;
382 info
._sifields
._sigfault
._addr
= env
->eip
;
384 info
.si_code
= TARGET_SI_KERNEL
;
385 info
._sifields
._sigfault
._addr
= 0;
387 queue_signal(env
, info
.si_signo
, &info
);
392 #ifndef TARGET_X86_64
393 if (env
->eflags
& VM_MASK
) {
394 handle_vm86_trap(env
, trapnr
);
398 info
.si_signo
= TARGET_SIGSEGV
;
400 info
.si_code
= TARGET_SI_KERNEL
;
401 info
._sifields
._sigfault
._addr
= 0;
402 queue_signal(env
, info
.si_signo
, &info
);
406 info
.si_signo
= TARGET_SIGILL
;
408 info
.si_code
= TARGET_ILL_ILLOPN
;
409 info
._sifields
._sigfault
._addr
= env
->eip
;
410 queue_signal(env
, info
.si_signo
, &info
);
413 /* just indicate that signals should be handled asap */
419 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
424 info
.si_code
= TARGET_TRAP_BRKPT
;
425 queue_signal(env
, info
.si_signo
, &info
);
430 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
431 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
435 process_pending_signals(env
);
442 #define get_user_code_u32(x, gaddr, doswap) \
443 ({ abi_long __r = get_user_u32((x), (gaddr)); \
444 if (!__r && (doswap)) { \
450 #define get_user_code_u16(x, gaddr, doswap) \
451 ({ abi_long __r = get_user_u16((x), (gaddr)); \
452 if (!__r && (doswap)) { \
459 /* Commpage handling -- there is no commpage for AArch64 */
462 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
464 * r0 = pointer to oldval
465 * r1 = pointer to newval
466 * r2 = pointer to target value
469 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
470 * C set if *ptr was changed, clear if no exchange happened
472 * Note segv's in kernel helpers are a bit tricky, we can set the
473 * data address sensibly but the PC address is just the entry point.
475 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
477 uint64_t oldval
, newval
, val
;
479 target_siginfo_t info
;
481 /* Based on the 32 bit code in do_kernel_trap */
483 /* XXX: This only works between threads, not between processes.
484 It's probably possible to implement this with native host
485 operations. However things like ldrex/strex are much harder so
486 there's not much point trying. */
488 cpsr
= cpsr_read(env
);
491 if (get_user_u64(oldval
, env
->regs
[0])) {
492 env
->exception
.vaddress
= env
->regs
[0];
496 if (get_user_u64(newval
, env
->regs
[1])) {
497 env
->exception
.vaddress
= env
->regs
[1];
501 if (get_user_u64(val
, addr
)) {
502 env
->exception
.vaddress
= addr
;
509 if (put_user_u64(val
, addr
)) {
510 env
->exception
.vaddress
= addr
;
520 cpsr_write(env
, cpsr
, CPSR_C
);
526 /* We get the PC of the entry address - which is as good as anything,
527 on a real kernel what you get depends on which mode it uses. */
528 info
.si_signo
= TARGET_SIGSEGV
;
530 /* XXX: check env->error_code */
531 info
.si_code
= TARGET_SEGV_MAPERR
;
532 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
533 queue_signal(env
, info
.si_signo
, &info
);
536 /* Handle a jump to the kernel code page. */
538 do_kernel_trap(CPUARMState
*env
)
544 switch (env
->regs
[15]) {
545 case 0xffff0fa0: /* __kernel_memory_barrier */
546 /* ??? No-op. Will need to do better for SMP. */
548 case 0xffff0fc0: /* __kernel_cmpxchg */
549 /* XXX: This only works between threads, not between processes.
550 It's probably possible to implement this with native host
551 operations. However things like ldrex/strex are much harder so
552 there's not much point trying. */
554 cpsr
= cpsr_read(env
);
556 /* FIXME: This should SEGV if the access fails. */
557 if (get_user_u32(val
, addr
))
559 if (val
== env
->regs
[0]) {
561 /* FIXME: Check for segfaults. */
562 put_user_u32(val
, addr
);
569 cpsr_write(env
, cpsr
, CPSR_C
);
572 case 0xffff0fe0: /* __kernel_get_tls */
573 env
->regs
[0] = cpu_get_tls(env
);
575 case 0xffff0f60: /* __kernel_cmpxchg64 */
576 arm_kernel_cmpxchg64_helper(env
);
582 /* Jump back to the caller. */
583 addr
= env
->regs
[14];
588 env
->regs
[15] = addr
;
593 /* Store exclusive handling for AArch32 */
594 static int do_strex(CPUARMState
*env
)
602 if (env
->exclusive_addr
!= env
->exclusive_test
) {
605 /* We know we're always AArch32 so the address is in uint32_t range
606 * unless it was the -1 exclusive-monitor-lost value (which won't
607 * match exclusive_test above).
609 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
610 addr
= env
->exclusive_addr
;
611 size
= env
->exclusive_info
& 0xf;
614 segv
= get_user_u8(val
, addr
);
617 segv
= get_user_u16(val
, addr
);
621 segv
= get_user_u32(val
, addr
);
627 env
->exception
.vaddress
= addr
;
632 segv
= get_user_u32(valhi
, addr
+ 4);
634 env
->exception
.vaddress
= addr
+ 4;
637 val
= deposit64(val
, 32, 32, valhi
);
639 if (val
!= env
->exclusive_val
) {
643 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
646 segv
= put_user_u8(val
, addr
);
649 segv
= put_user_u16(val
, addr
);
653 segv
= put_user_u32(val
, addr
);
657 env
->exception
.vaddress
= addr
;
661 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
662 segv
= put_user_u32(val
, addr
+ 4);
664 env
->exception
.vaddress
= addr
+ 4;
671 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
677 void cpu_loop(CPUARMState
*env
)
679 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
681 unsigned int n
, insn
;
682 target_siginfo_t info
;
687 trapnr
= cpu_arm_exec(cs
);
692 TaskState
*ts
= cs
->opaque
;
696 /* we handle the FPU emulation here, as Linux */
697 /* we get the opcode */
698 /* FIXME - what to do if get_user() fails? */
699 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
701 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
702 if (rc
== 0) { /* illegal instruction */
703 info
.si_signo
= TARGET_SIGILL
;
705 info
.si_code
= TARGET_ILL_ILLOPN
;
706 info
._sifields
._sigfault
._addr
= env
->regs
[15];
707 queue_signal(env
, info
.si_signo
, &info
);
708 } else if (rc
< 0) { /* FP exception */
711 /* translate softfloat flags to FPSR flags */
712 if (-rc
& float_flag_invalid
)
714 if (-rc
& float_flag_divbyzero
)
716 if (-rc
& float_flag_overflow
)
718 if (-rc
& float_flag_underflow
)
720 if (-rc
& float_flag_inexact
)
723 FPSR fpsr
= ts
->fpa
.fpsr
;
724 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
726 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
727 info
.si_signo
= TARGET_SIGFPE
;
730 /* ordered by priority, least first */
731 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
732 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
733 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
734 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
735 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
737 info
._sifields
._sigfault
._addr
= env
->regs
[15];
738 queue_signal(env
, info
.si_signo
, &info
);
743 /* accumulate unenabled exceptions */
744 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
746 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
748 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
750 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
752 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
755 } else { /* everything OK */
766 if (trapnr
== EXCP_BKPT
) {
768 /* FIXME - what to do if get_user() fails? */
769 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
773 /* FIXME - what to do if get_user() fails? */
774 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
775 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
780 /* FIXME - what to do if get_user() fails? */
781 get_user_code_u16(insn
, env
->regs
[15] - 2,
785 /* FIXME - what to do if get_user() fails? */
786 get_user_code_u32(insn
, env
->regs
[15] - 4,
792 if (n
== ARM_NR_cacheflush
) {
794 } else if (n
== ARM_NR_semihosting
795 || n
== ARM_NR_thumb_semihosting
) {
796 env
->regs
[0] = do_arm_semihosting (env
);
797 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
799 if (env
->thumb
|| n
== 0) {
802 n
-= ARM_SYSCALL_BASE
;
805 if ( n
> ARM_NR_BASE
) {
807 case ARM_NR_cacheflush
:
811 cpu_set_tls(env
, env
->regs
[0]);
814 case ARM_NR_breakpoint
:
815 env
->regs
[15] -= env
->thumb
? 2 : 4;
818 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
820 env
->regs
[0] = -TARGET_ENOSYS
;
824 env
->regs
[0] = do_syscall(env
,
840 /* just indicate that signals should be handled asap */
843 if (!do_strex(env
)) {
846 /* fall through for segv */
847 case EXCP_PREFETCH_ABORT
:
848 case EXCP_DATA_ABORT
:
849 addr
= env
->exception
.vaddress
;
851 info
.si_signo
= TARGET_SIGSEGV
;
853 /* XXX: check env->error_code */
854 info
.si_code
= TARGET_SEGV_MAPERR
;
855 info
._sifields
._sigfault
._addr
= addr
;
856 queue_signal(env
, info
.si_signo
, &info
);
864 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
869 info
.si_code
= TARGET_TRAP_BRKPT
;
870 queue_signal(env
, info
.si_signo
, &info
);
874 case EXCP_KERNEL_TRAP
:
875 if (do_kernel_trap(env
))
880 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
883 process_pending_signals(env
);
890 * Handle AArch64 store-release exclusive
892 * rs = gets the status result of store exclusive
893 * rt = is the register that is stored
894 * rt2 = is the second register store (in STP)
897 static int do_strex_a64(CPUARMState
*env
)
908 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
909 size
= extract32(env
->exclusive_info
, 0, 2);
910 is_pair
= extract32(env
->exclusive_info
, 2, 1);
911 rs
= extract32(env
->exclusive_info
, 4, 5);
912 rt
= extract32(env
->exclusive_info
, 9, 5);
913 rt2
= extract32(env
->exclusive_info
, 14, 5);
915 addr
= env
->exclusive_addr
;
917 if (addr
!= env
->exclusive_test
) {
923 segv
= get_user_u8(val
, addr
);
926 segv
= get_user_u16(val
, addr
);
929 segv
= get_user_u32(val
, addr
);
932 segv
= get_user_u64(val
, addr
);
938 env
->exception
.vaddress
= addr
;
941 if (val
!= env
->exclusive_val
) {
946 segv
= get_user_u32(val
, addr
+ 4);
948 segv
= get_user_u64(val
, addr
+ 8);
951 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
954 if (val
!= env
->exclusive_high
) {
958 /* handle the zero register */
959 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
962 segv
= put_user_u8(val
, addr
);
965 segv
= put_user_u16(val
, addr
);
968 segv
= put_user_u32(val
, addr
);
971 segv
= put_user_u64(val
, addr
);
978 /* handle the zero register */
979 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
981 segv
= put_user_u32(val
, addr
+ 4);
983 segv
= put_user_u64(val
, addr
+ 8);
986 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
993 /* rs == 31 encodes a write to the ZR, thus throwing away
994 * the status return. This is rather silly but valid.
1000 /* instruction faulted, PC does not advance */
1001 /* either way a strex releases any exclusive lock we have */
1002 env
->exclusive_addr
= -1;
1007 /* AArch64 main loop */
1008 void cpu_loop(CPUARMState
*env
)
1010 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1012 target_siginfo_t info
;
1016 trapnr
= cpu_arm_exec(cs
);
1021 env
->xregs
[0] = do_syscall(env
,
1031 case EXCP_INTERRUPT
:
1032 /* just indicate that signals should be handled asap */
1035 info
.si_signo
= TARGET_SIGILL
;
1037 info
.si_code
= TARGET_ILL_ILLOPN
;
1038 info
._sifields
._sigfault
._addr
= env
->pc
;
1039 queue_signal(env
, info
.si_signo
, &info
);
1042 if (!do_strex_a64(env
)) {
1045 /* fall through for segv */
1046 case EXCP_PREFETCH_ABORT
:
1047 case EXCP_DATA_ABORT
:
1048 info
.si_signo
= TARGET_SIGSEGV
;
1050 /* XXX: check env->error_code */
1051 info
.si_code
= TARGET_SEGV_MAPERR
;
1052 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1053 queue_signal(env
, info
.si_signo
, &info
);
1057 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1059 info
.si_signo
= sig
;
1061 info
.si_code
= TARGET_TRAP_BRKPT
;
1062 queue_signal(env
, info
.si_signo
, &info
);
1066 env
->xregs
[0] = do_arm_semihosting(env
);
1069 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1072 process_pending_signals(env
);
1073 /* Exception return on AArch64 always clears the exclusive monitor,
1074 * so any return to running guest code implies this.
1075 * A strex (successful or otherwise) also clears the monitor, so
1076 * we don't need to specialcase EXCP_STREX.
1078 env
->exclusive_addr
= -1;
1081 #endif /* ndef TARGET_ABI32 */
1085 #ifdef TARGET_UNICORE32
1087 void cpu_loop(CPUUniCore32State
*env
)
1089 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1091 unsigned int n
, insn
;
1092 target_siginfo_t info
;
1096 trapnr
= uc32_cpu_exec(cs
);
1099 case UC32_EXCP_PRIV
:
1102 get_user_u32(insn
, env
->regs
[31] - 4);
1103 n
= insn
& 0xffffff;
1105 if (n
>= UC32_SYSCALL_BASE
) {
1107 n
-= UC32_SYSCALL_BASE
;
1108 if (n
== UC32_SYSCALL_NR_set_tls
) {
1109 cpu_set_tls(env
, env
->regs
[0]);
1112 env
->regs
[0] = do_syscall(env
,
1127 case UC32_EXCP_DTRAP
:
1128 case UC32_EXCP_ITRAP
:
1129 info
.si_signo
= TARGET_SIGSEGV
;
1131 /* XXX: check env->error_code */
1132 info
.si_code
= TARGET_SEGV_MAPERR
;
1133 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1134 queue_signal(env
, info
.si_signo
, &info
);
1136 case EXCP_INTERRUPT
:
1137 /* just indicate that signals should be handled asap */
1143 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1145 info
.si_signo
= sig
;
1147 info
.si_code
= TARGET_TRAP_BRKPT
;
1148 queue_signal(env
, info
.si_signo
, &info
);
1155 process_pending_signals(env
);
1159 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1165 #define SPARC64_STACK_BIAS 2047
1169 /* WARNING: dealing with register windows _is_ complicated. More info
1170 can be found at http://www.sics.se/~psm/sparcstack.html */
1171 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1173 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1174 /* wrap handling : if cwp is on the last window, then we use the
1175 registers 'after' the end */
1176 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1177 index
+= 16 * env
->nwindows
;
1181 /* save the register window 'cwp1' */
1182 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1187 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1188 #ifdef TARGET_SPARC64
1190 sp_ptr
+= SPARC64_STACK_BIAS
;
1192 #if defined(DEBUG_WIN)
1193 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1196 for(i
= 0; i
< 16; i
++) {
1197 /* FIXME - what to do if put_user() fails? */
1198 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1199 sp_ptr
+= sizeof(abi_ulong
);
1203 static void save_window(CPUSPARCState
*env
)
1205 #ifndef TARGET_SPARC64
1206 unsigned int new_wim
;
1207 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1208 ((1LL << env
->nwindows
) - 1);
1209 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1212 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1218 static void restore_window(CPUSPARCState
*env
)
1220 #ifndef TARGET_SPARC64
1221 unsigned int new_wim
;
1223 unsigned int i
, cwp1
;
1226 #ifndef TARGET_SPARC64
1227 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1228 ((1LL << env
->nwindows
) - 1);
1231 /* restore the invalid window */
1232 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1233 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1234 #ifdef TARGET_SPARC64
1236 sp_ptr
+= SPARC64_STACK_BIAS
;
1238 #if defined(DEBUG_WIN)
1239 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1242 for(i
= 0; i
< 16; i
++) {
1243 /* FIXME - what to do if get_user() fails? */
1244 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1245 sp_ptr
+= sizeof(abi_ulong
);
1247 #ifdef TARGET_SPARC64
1249 if (env
->cleanwin
< env
->nwindows
- 1)
1257 static void flush_windows(CPUSPARCState
*env
)
1263 /* if restore would invoke restore_window(), then we can stop */
1264 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1265 #ifndef TARGET_SPARC64
1266 if (env
->wim
& (1 << cwp1
))
1269 if (env
->canrestore
== 0)
1274 save_window_offset(env
, cwp1
);
1277 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1278 #ifndef TARGET_SPARC64
1279 /* set wim so that restore will reload the registers */
1280 env
->wim
= 1 << cwp1
;
1282 #if defined(DEBUG_WIN)
1283 printf("flush_windows: nb=%d\n", offset
- 1);
1287 void cpu_loop (CPUSPARCState
*env
)
1289 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1292 target_siginfo_t info
;
1296 trapnr
= cpu_sparc_exec(cs
);
1299 /* Compute PSR before exposing state. */
1300 if (env
->cc_op
!= CC_OP_FLAGS
) {
1305 #ifndef TARGET_SPARC64
1312 ret
= do_syscall (env
, env
->gregs
[1],
1313 env
->regwptr
[0], env
->regwptr
[1],
1314 env
->regwptr
[2], env
->regwptr
[3],
1315 env
->regwptr
[4], env
->regwptr
[5],
1317 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1318 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1319 env
->xcc
|= PSR_CARRY
;
1321 env
->psr
|= PSR_CARRY
;
1325 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1326 env
->xcc
&= ~PSR_CARRY
;
1328 env
->psr
&= ~PSR_CARRY
;
1331 env
->regwptr
[0] = ret
;
1332 /* next instruction */
1334 env
->npc
= env
->npc
+ 4;
1336 case 0x83: /* flush windows */
1341 /* next instruction */
1343 env
->npc
= env
->npc
+ 4;
1345 #ifndef TARGET_SPARC64
1346 case TT_WIN_OVF
: /* window overflow */
1349 case TT_WIN_UNF
: /* window underflow */
1350 restore_window(env
);
1355 info
.si_signo
= TARGET_SIGSEGV
;
1357 /* XXX: check env->error_code */
1358 info
.si_code
= TARGET_SEGV_MAPERR
;
1359 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1360 queue_signal(env
, info
.si_signo
, &info
);
1364 case TT_SPILL
: /* window overflow */
1367 case TT_FILL
: /* window underflow */
1368 restore_window(env
);
1373 info
.si_signo
= TARGET_SIGSEGV
;
1375 /* XXX: check env->error_code */
1376 info
.si_code
= TARGET_SEGV_MAPERR
;
1377 if (trapnr
== TT_DFAULT
)
1378 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1380 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1381 queue_signal(env
, info
.si_signo
, &info
);
1384 #ifndef TARGET_ABI32
1387 sparc64_get_context(env
);
1391 sparc64_set_context(env
);
1395 case EXCP_INTERRUPT
:
1396 /* just indicate that signals should be handled asap */
1400 info
.si_signo
= TARGET_SIGILL
;
1402 info
.si_code
= TARGET_ILL_ILLOPC
;
1403 info
._sifields
._sigfault
._addr
= env
->pc
;
1404 queue_signal(env
, info
.si_signo
, &info
);
1411 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1414 info
.si_signo
= sig
;
1416 info
.si_code
= TARGET_TRAP_BRKPT
;
1417 queue_signal(env
, info
.si_signo
, &info
);
1422 printf ("Unhandled trap: 0x%x\n", trapnr
);
1423 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1426 process_pending_signals (env
);
1433 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1435 return cpu_get_host_ticks();
1438 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1440 return cpu_ppc_get_tb(env
);
1443 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1445 return cpu_ppc_get_tb(env
) >> 32;
1448 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1450 return cpu_ppc_get_tb(env
);
1453 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1455 return cpu_ppc_get_tb(env
) >> 32;
1458 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1459 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1461 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1463 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1466 /* XXX: to be fixed */
1467 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1472 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1477 static int do_store_exclusive(CPUPPCState
*env
)
1480 target_ulong page_addr
;
1481 target_ulong val
, val2
__attribute__((unused
)) = 0;
1485 addr
= env
->reserve_ea
;
1486 page_addr
= addr
& TARGET_PAGE_MASK
;
1489 flags
= page_get_flags(page_addr
);
1490 if ((flags
& PAGE_READ
) == 0) {
1493 int reg
= env
->reserve_info
& 0x1f;
1494 int size
= env
->reserve_info
>> 5;
1497 if (addr
== env
->reserve_addr
) {
1499 case 1: segv
= get_user_u8(val
, addr
); break;
1500 case 2: segv
= get_user_u16(val
, addr
); break;
1501 case 4: segv
= get_user_u32(val
, addr
); break;
1502 #if defined(TARGET_PPC64)
1503 case 8: segv
= get_user_u64(val
, addr
); break;
1505 segv
= get_user_u64(val
, addr
);
1507 segv
= get_user_u64(val2
, addr
+ 8);
1514 if (!segv
&& val
== env
->reserve_val
) {
1515 val
= env
->gpr
[reg
];
1517 case 1: segv
= put_user_u8(val
, addr
); break;
1518 case 2: segv
= put_user_u16(val
, addr
); break;
1519 case 4: segv
= put_user_u32(val
, addr
); break;
1520 #if defined(TARGET_PPC64)
1521 case 8: segv
= put_user_u64(val
, addr
); break;
1523 if (val2
== env
->reserve_val2
) {
1526 val
= env
->gpr
[reg
+1];
1528 val2
= env
->gpr
[reg
+1];
1530 segv
= put_user_u64(val
, addr
);
1532 segv
= put_user_u64(val2
, addr
+ 8);
1545 env
->crf
[0] = (stored
<< 1) | xer_so
;
1546 env
->reserve_addr
= (target_ulong
)-1;
1556 void cpu_loop(CPUPPCState
*env
)
1558 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1559 target_siginfo_t info
;
1565 trapnr
= cpu_ppc_exec(cs
);
1568 case POWERPC_EXCP_NONE
:
1571 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1572 cpu_abort(cs
, "Critical interrupt while in user mode. "
1575 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1576 cpu_abort(cs
, "Machine check exception while in user mode. "
1579 case POWERPC_EXCP_DSI
: /* Data storage exception */
1580 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1582 /* XXX: check this. Seems bugged */
1583 switch (env
->error_code
& 0xFF000000) {
1585 info
.si_signo
= TARGET_SIGSEGV
;
1587 info
.si_code
= TARGET_SEGV_MAPERR
;
1590 info
.si_signo
= TARGET_SIGILL
;
1592 info
.si_code
= TARGET_ILL_ILLADR
;
1595 info
.si_signo
= TARGET_SIGSEGV
;
1597 info
.si_code
= TARGET_SEGV_ACCERR
;
1600 /* Let's send a regular segfault... */
1601 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1603 info
.si_signo
= TARGET_SIGSEGV
;
1605 info
.si_code
= TARGET_SEGV_MAPERR
;
1608 info
._sifields
._sigfault
._addr
= env
->nip
;
1609 queue_signal(env
, info
.si_signo
, &info
);
1611 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1612 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1613 "\n", env
->spr
[SPR_SRR0
]);
1614 /* XXX: check this */
1615 switch (env
->error_code
& 0xFF000000) {
1617 info
.si_signo
= TARGET_SIGSEGV
;
1619 info
.si_code
= TARGET_SEGV_MAPERR
;
1623 info
.si_signo
= TARGET_SIGSEGV
;
1625 info
.si_code
= TARGET_SEGV_ACCERR
;
1628 /* Let's send a regular segfault... */
1629 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1631 info
.si_signo
= TARGET_SIGSEGV
;
1633 info
.si_code
= TARGET_SEGV_MAPERR
;
1636 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1637 queue_signal(env
, info
.si_signo
, &info
);
1639 case POWERPC_EXCP_EXTERNAL
: /* External input */
1640 cpu_abort(cs
, "External interrupt while in user mode. "
1643 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1644 EXCP_DUMP(env
, "Unaligned memory access\n");
1645 /* XXX: check this */
1646 info
.si_signo
= TARGET_SIGBUS
;
1648 info
.si_code
= TARGET_BUS_ADRALN
;
1649 info
._sifields
._sigfault
._addr
= env
->nip
;
1650 queue_signal(env
, info
.si_signo
, &info
);
1652 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1653 /* XXX: check this */
1654 switch (env
->error_code
& ~0xF) {
1655 case POWERPC_EXCP_FP
:
1656 EXCP_DUMP(env
, "Floating point program exception\n");
1657 info
.si_signo
= TARGET_SIGFPE
;
1659 switch (env
->error_code
& 0xF) {
1660 case POWERPC_EXCP_FP_OX
:
1661 info
.si_code
= TARGET_FPE_FLTOVF
;
1663 case POWERPC_EXCP_FP_UX
:
1664 info
.si_code
= TARGET_FPE_FLTUND
;
1666 case POWERPC_EXCP_FP_ZX
:
1667 case POWERPC_EXCP_FP_VXZDZ
:
1668 info
.si_code
= TARGET_FPE_FLTDIV
;
1670 case POWERPC_EXCP_FP_XX
:
1671 info
.si_code
= TARGET_FPE_FLTRES
;
1673 case POWERPC_EXCP_FP_VXSOFT
:
1674 info
.si_code
= TARGET_FPE_FLTINV
;
1676 case POWERPC_EXCP_FP_VXSNAN
:
1677 case POWERPC_EXCP_FP_VXISI
:
1678 case POWERPC_EXCP_FP_VXIDI
:
1679 case POWERPC_EXCP_FP_VXIMZ
:
1680 case POWERPC_EXCP_FP_VXVC
:
1681 case POWERPC_EXCP_FP_VXSQRT
:
1682 case POWERPC_EXCP_FP_VXCVI
:
1683 info
.si_code
= TARGET_FPE_FLTSUB
;
1686 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1691 case POWERPC_EXCP_INVAL
:
1692 EXCP_DUMP(env
, "Invalid instruction\n");
1693 info
.si_signo
= TARGET_SIGILL
;
1695 switch (env
->error_code
& 0xF) {
1696 case POWERPC_EXCP_INVAL_INVAL
:
1697 info
.si_code
= TARGET_ILL_ILLOPC
;
1699 case POWERPC_EXCP_INVAL_LSWX
:
1700 info
.si_code
= TARGET_ILL_ILLOPN
;
1702 case POWERPC_EXCP_INVAL_SPR
:
1703 info
.si_code
= TARGET_ILL_PRVREG
;
1705 case POWERPC_EXCP_INVAL_FP
:
1706 info
.si_code
= TARGET_ILL_COPROC
;
1709 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1710 env
->error_code
& 0xF);
1711 info
.si_code
= TARGET_ILL_ILLADR
;
1715 case POWERPC_EXCP_PRIV
:
1716 EXCP_DUMP(env
, "Privilege violation\n");
1717 info
.si_signo
= TARGET_SIGILL
;
1719 switch (env
->error_code
& 0xF) {
1720 case POWERPC_EXCP_PRIV_OPC
:
1721 info
.si_code
= TARGET_ILL_PRVOPC
;
1723 case POWERPC_EXCP_PRIV_REG
:
1724 info
.si_code
= TARGET_ILL_PRVREG
;
1727 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1728 env
->error_code
& 0xF);
1729 info
.si_code
= TARGET_ILL_PRVOPC
;
1733 case POWERPC_EXCP_TRAP
:
1734 cpu_abort(cs
, "Tried to call a TRAP\n");
1737 /* Should not happen ! */
1738 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1742 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1743 queue_signal(env
, info
.si_signo
, &info
);
1745 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1746 EXCP_DUMP(env
, "No floating point allowed\n");
1747 info
.si_signo
= TARGET_SIGILL
;
1749 info
.si_code
= TARGET_ILL_COPROC
;
1750 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1751 queue_signal(env
, info
.si_signo
, &info
);
1753 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1754 cpu_abort(cs
, "Syscall exception while in user mode. "
1757 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1758 EXCP_DUMP(env
, "No APU instruction allowed\n");
1759 info
.si_signo
= TARGET_SIGILL
;
1761 info
.si_code
= TARGET_ILL_COPROC
;
1762 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1763 queue_signal(env
, info
.si_signo
, &info
);
1765 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1766 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1769 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1770 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1773 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1774 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1777 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1778 cpu_abort(cs
, "Data TLB exception while in user mode. "
1781 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1782 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1785 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1786 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1787 info
.si_signo
= TARGET_SIGILL
;
1789 info
.si_code
= TARGET_ILL_COPROC
;
1790 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1791 queue_signal(env
, info
.si_signo
, &info
);
1793 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1794 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1796 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1797 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1799 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1800 cpu_abort(cs
, "Performance monitor exception not handled\n");
1802 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1803 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1806 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1807 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1810 case POWERPC_EXCP_RESET
: /* System reset exception */
1811 cpu_abort(cs
, "Reset interrupt while in user mode. "
1814 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1815 cpu_abort(cs
, "Data segment exception while in user mode. "
1818 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1819 cpu_abort(cs
, "Instruction segment exception "
1820 "while in user mode. Aborting\n");
1822 /* PowerPC 64 with hypervisor mode support */
1823 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1824 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1825 "while in user mode. Aborting\n");
1827 case POWERPC_EXCP_TRACE
: /* Trace exception */
1829 * we use this exception to emulate step-by-step execution mode.
1832 /* PowerPC 64 with hypervisor mode support */
1833 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1834 cpu_abort(cs
, "Hypervisor data storage exception "
1835 "while in user mode. Aborting\n");
1837 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1838 cpu_abort(cs
, "Hypervisor instruction storage exception "
1839 "while in user mode. Aborting\n");
1841 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1842 cpu_abort(cs
, "Hypervisor data segment exception "
1843 "while in user mode. Aborting\n");
1845 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1846 cpu_abort(cs
, "Hypervisor instruction segment exception "
1847 "while in user mode. Aborting\n");
1849 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1850 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1851 info
.si_signo
= TARGET_SIGILL
;
1853 info
.si_code
= TARGET_ILL_COPROC
;
1854 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1855 queue_signal(env
, info
.si_signo
, &info
);
1857 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1858 cpu_abort(cs
, "Programmable interval timer interrupt "
1859 "while in user mode. Aborting\n");
1861 case POWERPC_EXCP_IO
: /* IO error exception */
1862 cpu_abort(cs
, "IO error exception while in user mode. "
1865 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1866 cpu_abort(cs
, "Run mode exception while in user mode. "
1869 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1870 cpu_abort(cs
, "Emulation trap exception not handled\n");
1872 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1873 cpu_abort(cs
, "Instruction fetch TLB exception "
1874 "while in user-mode. Aborting");
1876 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1877 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1880 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1881 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1884 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1885 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1887 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1888 cpu_abort(cs
, "Instruction address breakpoint exception "
1891 case POWERPC_EXCP_SMI
: /* System management interrupt */
1892 cpu_abort(cs
, "System management interrupt while in user mode. "
1895 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1896 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1899 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1900 cpu_abort(cs
, "Performance monitor exception not handled\n");
1902 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1903 cpu_abort(cs
, "Vector assist exception not handled\n");
1905 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1906 cpu_abort(cs
, "Soft patch exception not handled\n");
1908 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1909 cpu_abort(cs
, "Maintenance exception while in user mode. "
1912 case POWERPC_EXCP_STOP
: /* stop translation */
1913 /* We did invalidate the instruction cache. Go on */
1915 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1916 /* We just stopped because of a branch. Go on */
1918 case POWERPC_EXCP_SYSCALL_USER
:
1919 /* system call in user-mode emulation */
1921 * PPC ABI uses overflow flag in cr0 to signal an error
1924 env
->crf
[0] &= ~0x1;
1925 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1926 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1928 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1929 /* Returning from a successful sigreturn syscall.
1930 Avoid corrupting register state. */
1933 if (ret
> (target_ulong
)(-515)) {
1939 case POWERPC_EXCP_STCX
:
1940 if (do_store_exclusive(env
)) {
1941 info
.si_signo
= TARGET_SIGSEGV
;
1943 info
.si_code
= TARGET_SEGV_MAPERR
;
1944 info
._sifields
._sigfault
._addr
= env
->nip
;
1945 queue_signal(env
, info
.si_signo
, &info
);
1952 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1954 info
.si_signo
= sig
;
1956 info
.si_code
= TARGET_TRAP_BRKPT
;
1957 queue_signal(env
, info
.si_signo
, &info
);
1961 case EXCP_INTERRUPT
:
1962 /* just indicate that signals should be handled asap */
1965 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1968 process_pending_signals(env
);
1975 # ifdef TARGET_ABI_MIPSO32
1976 # define MIPS_SYS(name, args) args,
1977 static const uint8_t mips_syscall_args
[] = {
1978 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1979 MIPS_SYS(sys_exit
, 1)
1980 MIPS_SYS(sys_fork
, 0)
1981 MIPS_SYS(sys_read
, 3)
1982 MIPS_SYS(sys_write
, 3)
1983 MIPS_SYS(sys_open
, 3) /* 4005 */
1984 MIPS_SYS(sys_close
, 1)
1985 MIPS_SYS(sys_waitpid
, 3)
1986 MIPS_SYS(sys_creat
, 2)
1987 MIPS_SYS(sys_link
, 2)
1988 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1989 MIPS_SYS(sys_execve
, 0)
1990 MIPS_SYS(sys_chdir
, 1)
1991 MIPS_SYS(sys_time
, 1)
1992 MIPS_SYS(sys_mknod
, 3)
1993 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1994 MIPS_SYS(sys_lchown
, 3)
1995 MIPS_SYS(sys_ni_syscall
, 0)
1996 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
1997 MIPS_SYS(sys_lseek
, 3)
1998 MIPS_SYS(sys_getpid
, 0) /* 4020 */
1999 MIPS_SYS(sys_mount
, 5)
2000 MIPS_SYS(sys_umount
, 1)
2001 MIPS_SYS(sys_setuid
, 1)
2002 MIPS_SYS(sys_getuid
, 0)
2003 MIPS_SYS(sys_stime
, 1) /* 4025 */
2004 MIPS_SYS(sys_ptrace
, 4)
2005 MIPS_SYS(sys_alarm
, 1)
2006 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2007 MIPS_SYS(sys_pause
, 0)
2008 MIPS_SYS(sys_utime
, 2) /* 4030 */
2009 MIPS_SYS(sys_ni_syscall
, 0)
2010 MIPS_SYS(sys_ni_syscall
, 0)
2011 MIPS_SYS(sys_access
, 2)
2012 MIPS_SYS(sys_nice
, 1)
2013 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2014 MIPS_SYS(sys_sync
, 0)
2015 MIPS_SYS(sys_kill
, 2)
2016 MIPS_SYS(sys_rename
, 2)
2017 MIPS_SYS(sys_mkdir
, 2)
2018 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2019 MIPS_SYS(sys_dup
, 1)
2020 MIPS_SYS(sys_pipe
, 0)
2021 MIPS_SYS(sys_times
, 1)
2022 MIPS_SYS(sys_ni_syscall
, 0)
2023 MIPS_SYS(sys_brk
, 1) /* 4045 */
2024 MIPS_SYS(sys_setgid
, 1)
2025 MIPS_SYS(sys_getgid
, 0)
2026 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2027 MIPS_SYS(sys_geteuid
, 0)
2028 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2029 MIPS_SYS(sys_acct
, 0)
2030 MIPS_SYS(sys_umount2
, 2)
2031 MIPS_SYS(sys_ni_syscall
, 0)
2032 MIPS_SYS(sys_ioctl
, 3)
2033 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2034 MIPS_SYS(sys_ni_syscall
, 2)
2035 MIPS_SYS(sys_setpgid
, 2)
2036 MIPS_SYS(sys_ni_syscall
, 0)
2037 MIPS_SYS(sys_olduname
, 1)
2038 MIPS_SYS(sys_umask
, 1) /* 4060 */
2039 MIPS_SYS(sys_chroot
, 1)
2040 MIPS_SYS(sys_ustat
, 2)
2041 MIPS_SYS(sys_dup2
, 2)
2042 MIPS_SYS(sys_getppid
, 0)
2043 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2044 MIPS_SYS(sys_setsid
, 0)
2045 MIPS_SYS(sys_sigaction
, 3)
2046 MIPS_SYS(sys_sgetmask
, 0)
2047 MIPS_SYS(sys_ssetmask
, 1)
2048 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2049 MIPS_SYS(sys_setregid
, 2)
2050 MIPS_SYS(sys_sigsuspend
, 0)
2051 MIPS_SYS(sys_sigpending
, 1)
2052 MIPS_SYS(sys_sethostname
, 2)
2053 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2054 MIPS_SYS(sys_getrlimit
, 2)
2055 MIPS_SYS(sys_getrusage
, 2)
2056 MIPS_SYS(sys_gettimeofday
, 2)
2057 MIPS_SYS(sys_settimeofday
, 2)
2058 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2059 MIPS_SYS(sys_setgroups
, 2)
2060 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2061 MIPS_SYS(sys_symlink
, 2)
2062 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2063 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2064 MIPS_SYS(sys_uselib
, 1)
2065 MIPS_SYS(sys_swapon
, 2)
2066 MIPS_SYS(sys_reboot
, 3)
2067 MIPS_SYS(old_readdir
, 3)
2068 MIPS_SYS(old_mmap
, 6) /* 4090 */
2069 MIPS_SYS(sys_munmap
, 2)
2070 MIPS_SYS(sys_truncate
, 2)
2071 MIPS_SYS(sys_ftruncate
, 2)
2072 MIPS_SYS(sys_fchmod
, 2)
2073 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2074 MIPS_SYS(sys_getpriority
, 2)
2075 MIPS_SYS(sys_setpriority
, 3)
2076 MIPS_SYS(sys_ni_syscall
, 0)
2077 MIPS_SYS(sys_statfs
, 2)
2078 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2079 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2080 MIPS_SYS(sys_socketcall
, 2)
2081 MIPS_SYS(sys_syslog
, 3)
2082 MIPS_SYS(sys_setitimer
, 3)
2083 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2084 MIPS_SYS(sys_newstat
, 2)
2085 MIPS_SYS(sys_newlstat
, 2)
2086 MIPS_SYS(sys_newfstat
, 2)
2087 MIPS_SYS(sys_uname
, 1)
2088 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2089 MIPS_SYS(sys_vhangup
, 0)
2090 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2091 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2092 MIPS_SYS(sys_wait4
, 4)
2093 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2094 MIPS_SYS(sys_sysinfo
, 1)
2095 MIPS_SYS(sys_ipc
, 6)
2096 MIPS_SYS(sys_fsync
, 1)
2097 MIPS_SYS(sys_sigreturn
, 0)
2098 MIPS_SYS(sys_clone
, 6) /* 4120 */
2099 MIPS_SYS(sys_setdomainname
, 2)
2100 MIPS_SYS(sys_newuname
, 1)
2101 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2102 MIPS_SYS(sys_adjtimex
, 1)
2103 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2104 MIPS_SYS(sys_sigprocmask
, 3)
2105 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2106 MIPS_SYS(sys_init_module
, 5)
2107 MIPS_SYS(sys_delete_module
, 1)
2108 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2109 MIPS_SYS(sys_quotactl
, 0)
2110 MIPS_SYS(sys_getpgid
, 1)
2111 MIPS_SYS(sys_fchdir
, 1)
2112 MIPS_SYS(sys_bdflush
, 2)
2113 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2114 MIPS_SYS(sys_personality
, 1)
2115 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2116 MIPS_SYS(sys_setfsuid
, 1)
2117 MIPS_SYS(sys_setfsgid
, 1)
2118 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2119 MIPS_SYS(sys_getdents
, 3)
2120 MIPS_SYS(sys_select
, 5)
2121 MIPS_SYS(sys_flock
, 2)
2122 MIPS_SYS(sys_msync
, 3)
2123 MIPS_SYS(sys_readv
, 3) /* 4145 */
2124 MIPS_SYS(sys_writev
, 3)
2125 MIPS_SYS(sys_cacheflush
, 3)
2126 MIPS_SYS(sys_cachectl
, 3)
2127 MIPS_SYS(sys_sysmips
, 4)
2128 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2129 MIPS_SYS(sys_getsid
, 1)
2130 MIPS_SYS(sys_fdatasync
, 0)
2131 MIPS_SYS(sys_sysctl
, 1)
2132 MIPS_SYS(sys_mlock
, 2)
2133 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2134 MIPS_SYS(sys_mlockall
, 1)
2135 MIPS_SYS(sys_munlockall
, 0)
2136 MIPS_SYS(sys_sched_setparam
, 2)
2137 MIPS_SYS(sys_sched_getparam
, 2)
2138 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2139 MIPS_SYS(sys_sched_getscheduler
, 1)
2140 MIPS_SYS(sys_sched_yield
, 0)
2141 MIPS_SYS(sys_sched_get_priority_max
, 1)
2142 MIPS_SYS(sys_sched_get_priority_min
, 1)
2143 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2144 MIPS_SYS(sys_nanosleep
, 2)
2145 MIPS_SYS(sys_mremap
, 5)
2146 MIPS_SYS(sys_accept
, 3)
2147 MIPS_SYS(sys_bind
, 3)
2148 MIPS_SYS(sys_connect
, 3) /* 4170 */
2149 MIPS_SYS(sys_getpeername
, 3)
2150 MIPS_SYS(sys_getsockname
, 3)
2151 MIPS_SYS(sys_getsockopt
, 5)
2152 MIPS_SYS(sys_listen
, 2)
2153 MIPS_SYS(sys_recv
, 4) /* 4175 */
2154 MIPS_SYS(sys_recvfrom
, 6)
2155 MIPS_SYS(sys_recvmsg
, 3)
2156 MIPS_SYS(sys_send
, 4)
2157 MIPS_SYS(sys_sendmsg
, 3)
2158 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2159 MIPS_SYS(sys_setsockopt
, 5)
2160 MIPS_SYS(sys_shutdown
, 2)
2161 MIPS_SYS(sys_socket
, 3)
2162 MIPS_SYS(sys_socketpair
, 4)
2163 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2164 MIPS_SYS(sys_getresuid
, 3)
2165 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2166 MIPS_SYS(sys_poll
, 3)
2167 MIPS_SYS(sys_nfsservctl
, 3)
2168 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2169 MIPS_SYS(sys_getresgid
, 3)
2170 MIPS_SYS(sys_prctl
, 5)
2171 MIPS_SYS(sys_rt_sigreturn
, 0)
2172 MIPS_SYS(sys_rt_sigaction
, 4)
2173 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2174 MIPS_SYS(sys_rt_sigpending
, 2)
2175 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2176 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2177 MIPS_SYS(sys_rt_sigsuspend
, 0)
2178 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2179 MIPS_SYS(sys_pwrite64
, 6)
2180 MIPS_SYS(sys_chown
, 3)
2181 MIPS_SYS(sys_getcwd
, 2)
2182 MIPS_SYS(sys_capget
, 2)
2183 MIPS_SYS(sys_capset
, 2) /* 4205 */
2184 MIPS_SYS(sys_sigaltstack
, 2)
2185 MIPS_SYS(sys_sendfile
, 4)
2186 MIPS_SYS(sys_ni_syscall
, 0)
2187 MIPS_SYS(sys_ni_syscall
, 0)
2188 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2189 MIPS_SYS(sys_truncate64
, 4)
2190 MIPS_SYS(sys_ftruncate64
, 4)
2191 MIPS_SYS(sys_stat64
, 2)
2192 MIPS_SYS(sys_lstat64
, 2)
2193 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2194 MIPS_SYS(sys_pivot_root
, 2)
2195 MIPS_SYS(sys_mincore
, 3)
2196 MIPS_SYS(sys_madvise
, 3)
2197 MIPS_SYS(sys_getdents64
, 3)
2198 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2199 MIPS_SYS(sys_ni_syscall
, 0)
2200 MIPS_SYS(sys_gettid
, 0)
2201 MIPS_SYS(sys_readahead
, 5)
2202 MIPS_SYS(sys_setxattr
, 5)
2203 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2204 MIPS_SYS(sys_fsetxattr
, 5)
2205 MIPS_SYS(sys_getxattr
, 4)
2206 MIPS_SYS(sys_lgetxattr
, 4)
2207 MIPS_SYS(sys_fgetxattr
, 4)
2208 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2209 MIPS_SYS(sys_llistxattr
, 3)
2210 MIPS_SYS(sys_flistxattr
, 3)
2211 MIPS_SYS(sys_removexattr
, 2)
2212 MIPS_SYS(sys_lremovexattr
, 2)
2213 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2214 MIPS_SYS(sys_tkill
, 2)
2215 MIPS_SYS(sys_sendfile64
, 5)
2216 MIPS_SYS(sys_futex
, 6)
2217 MIPS_SYS(sys_sched_setaffinity
, 3)
2218 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2219 MIPS_SYS(sys_io_setup
, 2)
2220 MIPS_SYS(sys_io_destroy
, 1)
2221 MIPS_SYS(sys_io_getevents
, 5)
2222 MIPS_SYS(sys_io_submit
, 3)
2223 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2224 MIPS_SYS(sys_exit_group
, 1)
2225 MIPS_SYS(sys_lookup_dcookie
, 3)
2226 MIPS_SYS(sys_epoll_create
, 1)
2227 MIPS_SYS(sys_epoll_ctl
, 4)
2228 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2229 MIPS_SYS(sys_remap_file_pages
, 5)
2230 MIPS_SYS(sys_set_tid_address
, 1)
2231 MIPS_SYS(sys_restart_syscall
, 0)
2232 MIPS_SYS(sys_fadvise64_64
, 7)
2233 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2234 MIPS_SYS(sys_fstatfs64
, 2)
2235 MIPS_SYS(sys_timer_create
, 3)
2236 MIPS_SYS(sys_timer_settime
, 4)
2237 MIPS_SYS(sys_timer_gettime
, 2)
2238 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2239 MIPS_SYS(sys_timer_delete
, 1)
2240 MIPS_SYS(sys_clock_settime
, 2)
2241 MIPS_SYS(sys_clock_gettime
, 2)
2242 MIPS_SYS(sys_clock_getres
, 2)
2243 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2244 MIPS_SYS(sys_tgkill
, 3)
2245 MIPS_SYS(sys_utimes
, 2)
2246 MIPS_SYS(sys_mbind
, 4)
2247 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2248 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2249 MIPS_SYS(sys_mq_open
, 4)
2250 MIPS_SYS(sys_mq_unlink
, 1)
2251 MIPS_SYS(sys_mq_timedsend
, 5)
2252 MIPS_SYS(sys_mq_timedreceive
, 5)
2253 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2254 MIPS_SYS(sys_mq_getsetattr
, 3)
2255 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2256 MIPS_SYS(sys_waitid
, 4)
2257 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2258 MIPS_SYS(sys_add_key
, 5)
2259 MIPS_SYS(sys_request_key
, 4)
2260 MIPS_SYS(sys_keyctl
, 5)
2261 MIPS_SYS(sys_set_thread_area
, 1)
2262 MIPS_SYS(sys_inotify_init
, 0)
2263 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2264 MIPS_SYS(sys_inotify_rm_watch
, 2)
2265 MIPS_SYS(sys_migrate_pages
, 4)
2266 MIPS_SYS(sys_openat
, 4)
2267 MIPS_SYS(sys_mkdirat
, 3)
2268 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2269 MIPS_SYS(sys_fchownat
, 5)
2270 MIPS_SYS(sys_futimesat
, 3)
2271 MIPS_SYS(sys_fstatat64
, 4)
2272 MIPS_SYS(sys_unlinkat
, 3)
2273 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2274 MIPS_SYS(sys_linkat
, 5)
2275 MIPS_SYS(sys_symlinkat
, 3)
2276 MIPS_SYS(sys_readlinkat
, 4)
2277 MIPS_SYS(sys_fchmodat
, 3)
2278 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2279 MIPS_SYS(sys_pselect6
, 6)
2280 MIPS_SYS(sys_ppoll
, 5)
2281 MIPS_SYS(sys_unshare
, 1)
2282 MIPS_SYS(sys_splice
, 6)
2283 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2284 MIPS_SYS(sys_tee
, 4)
2285 MIPS_SYS(sys_vmsplice
, 4)
2286 MIPS_SYS(sys_move_pages
, 6)
2287 MIPS_SYS(sys_set_robust_list
, 2)
2288 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2289 MIPS_SYS(sys_kexec_load
, 4)
2290 MIPS_SYS(sys_getcpu
, 3)
2291 MIPS_SYS(sys_epoll_pwait
, 6)
2292 MIPS_SYS(sys_ioprio_set
, 3)
2293 MIPS_SYS(sys_ioprio_get
, 2)
2294 MIPS_SYS(sys_utimensat
, 4)
2295 MIPS_SYS(sys_signalfd
, 3)
2296 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2297 MIPS_SYS(sys_eventfd
, 1)
2298 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2299 MIPS_SYS(sys_timerfd_create
, 2)
2300 MIPS_SYS(sys_timerfd_gettime
, 2)
2301 MIPS_SYS(sys_timerfd_settime
, 4)
2302 MIPS_SYS(sys_signalfd4
, 4)
2303 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2304 MIPS_SYS(sys_epoll_create1
, 1)
2305 MIPS_SYS(sys_dup3
, 3)
2306 MIPS_SYS(sys_pipe2
, 2)
2307 MIPS_SYS(sys_inotify_init1
, 1)
2308 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2309 MIPS_SYS(sys_pwritev
, 6)
2310 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2311 MIPS_SYS(sys_perf_event_open
, 5)
2312 MIPS_SYS(sys_accept4
, 4)
2313 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2314 MIPS_SYS(sys_fanotify_init
, 2)
2315 MIPS_SYS(sys_fanotify_mark
, 6)
2316 MIPS_SYS(sys_prlimit64
, 4)
2317 MIPS_SYS(sys_name_to_handle_at
, 5)
2318 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2319 MIPS_SYS(sys_clock_adjtime
, 2)
2320 MIPS_SYS(sys_syncfs
, 1)
2325 static int do_store_exclusive(CPUMIPSState
*env
)
2328 target_ulong page_addr
;
2336 page_addr
= addr
& TARGET_PAGE_MASK
;
2339 flags
= page_get_flags(page_addr
);
2340 if ((flags
& PAGE_READ
) == 0) {
2343 reg
= env
->llreg
& 0x1f;
2344 d
= (env
->llreg
& 0x20) != 0;
2346 segv
= get_user_s64(val
, addr
);
2348 segv
= get_user_s32(val
, addr
);
2351 if (val
!= env
->llval
) {
2352 env
->active_tc
.gpr
[reg
] = 0;
2355 segv
= put_user_u64(env
->llnewval
, addr
);
2357 segv
= put_user_u32(env
->llnewval
, addr
);
2360 env
->active_tc
.gpr
[reg
] = 1;
2367 env
->active_tc
.PC
+= 4;
2380 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2388 info
->si_signo
= TARGET_SIGFPE
;
2390 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2391 queue_signal(env
, info
->si_signo
, &*info
);
2395 info
->si_signo
= TARGET_SIGTRAP
;
2397 queue_signal(env
, info
->si_signo
, &*info
);
2405 void cpu_loop(CPUMIPSState
*env
)
2407 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2408 target_siginfo_t info
;
2411 # ifdef TARGET_ABI_MIPSO32
2412 unsigned int syscall_num
;
2417 trapnr
= cpu_mips_exec(cs
);
2421 env
->active_tc
.PC
+= 4;
2422 # ifdef TARGET_ABI_MIPSO32
2423 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2424 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2425 ret
= -TARGET_ENOSYS
;
2429 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2431 nb_args
= mips_syscall_args
[syscall_num
];
2432 sp_reg
= env
->active_tc
.gpr
[29];
2434 /* these arguments are taken from the stack */
2436 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2440 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2444 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2448 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2454 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2455 env
->active_tc
.gpr
[4],
2456 env
->active_tc
.gpr
[5],
2457 env
->active_tc
.gpr
[6],
2458 env
->active_tc
.gpr
[7],
2459 arg5
, arg6
, arg7
, arg8
);
2463 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2464 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2465 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2466 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2467 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2469 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2470 /* Returning from a successful sigreturn syscall.
2471 Avoid clobbering register state. */
2474 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2475 env
->active_tc
.gpr
[7] = 1; /* error flag */
2478 env
->active_tc
.gpr
[7] = 0; /* error flag */
2480 env
->active_tc
.gpr
[2] = ret
;
2486 info
.si_signo
= TARGET_SIGSEGV
;
2488 /* XXX: check env->error_code */
2489 info
.si_code
= TARGET_SEGV_MAPERR
;
2490 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2491 queue_signal(env
, info
.si_signo
, &info
);
2495 info
.si_signo
= TARGET_SIGILL
;
2498 queue_signal(env
, info
.si_signo
, &info
);
2500 case EXCP_INTERRUPT
:
2501 /* just indicate that signals should be handled asap */
2507 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2510 info
.si_signo
= sig
;
2512 info
.si_code
= TARGET_TRAP_BRKPT
;
2513 queue_signal(env
, info
.si_signo
, &info
);
2518 if (do_store_exclusive(env
)) {
2519 info
.si_signo
= TARGET_SIGSEGV
;
2521 info
.si_code
= TARGET_SEGV_MAPERR
;
2522 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2523 queue_signal(env
, info
.si_signo
, &info
);
2527 info
.si_signo
= TARGET_SIGILL
;
2529 info
.si_code
= TARGET_ILL_ILLOPC
;
2530 queue_signal(env
, info
.si_signo
, &info
);
2532 /* The code below was inspired by the MIPS Linux kernel trap
2533 * handling code in arch/mips/kernel/traps.c.
2537 abi_ulong trap_instr
;
2540 if (env
->hflags
& MIPS_HFLAG_M16
) {
2541 if (env
->insn_flags
& ASE_MICROMIPS
) {
2542 /* microMIPS mode */
2543 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2548 if ((trap_instr
>> 10) == 0x11) {
2549 /* 16-bit instruction */
2550 code
= trap_instr
& 0xf;
2552 /* 32-bit instruction */
2555 ret
= get_user_u16(instr_lo
,
2556 env
->active_tc
.PC
+ 2);
2560 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2561 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2562 /* Unfortunately, microMIPS also suffers from
2563 the old assembler bug... */
2564 if (code
>= (1 << 10)) {
2570 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2574 code
= (trap_instr
>> 6) & 0x3f;
2577 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2582 /* As described in the original Linux kernel code, the
2583 * below checks on 'code' are to work around an old
2586 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2587 if (code
>= (1 << 10)) {
2592 if (do_break(env
, &info
, code
) != 0) {
2599 abi_ulong trap_instr
;
2600 unsigned int code
= 0;
2602 if (env
->hflags
& MIPS_HFLAG_M16
) {
2603 /* microMIPS mode */
2606 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2607 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2609 trap_instr
= (instr
[0] << 16) | instr
[1];
2611 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2618 /* The immediate versions don't provide a code. */
2619 if (!(trap_instr
& 0xFC000000)) {
2620 if (env
->hflags
& MIPS_HFLAG_M16
) {
2621 /* microMIPS mode */
2622 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2624 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2628 if (do_break(env
, &info
, code
) != 0) {
2635 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2638 process_pending_signals(env
);
2643 #ifdef TARGET_OPENRISC
2645 void cpu_loop(CPUOpenRISCState
*env
)
2647 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2652 trapnr
= cpu_openrisc_exec(cs
);
2658 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2662 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2663 gdbsig
= TARGET_SIGBUS
;
2667 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2668 gdbsig
= TARGET_SIGSEGV
;
2671 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2674 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2675 gdbsig
= TARGET_SIGBUS
;
2678 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2679 gdbsig
= TARGET_SIGILL
;
2682 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2686 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2689 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2690 gdbsig
= TARGET_SIGSEGV
;
2693 env
->pc
+= 4; /* 0xc00; */
2694 env
->gpr
[11] = do_syscall(env
,
2695 env
->gpr
[11], /* return value */
2696 env
->gpr
[3], /* r3 - r7 are params */
2704 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2707 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2708 gdbsig
= TARGET_SIGTRAP
;
2711 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2714 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2716 gdbsig
= TARGET_SIGILL
;
2720 gdb_handlesig(cs
, gdbsig
);
2721 if (gdbsig
!= TARGET_SIGTRAP
) {
2726 process_pending_signals(env
);
2730 #endif /* TARGET_OPENRISC */
2733 void cpu_loop(CPUSH4State
*env
)
2735 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2737 target_siginfo_t info
;
2741 trapnr
= cpu_sh4_exec(cs
);
2747 ret
= do_syscall(env
,
2756 env
->gregs
[0] = ret
;
2758 case EXCP_INTERRUPT
:
2759 /* just indicate that signals should be handled asap */
2765 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2768 info
.si_signo
= sig
;
2770 info
.si_code
= TARGET_TRAP_BRKPT
;
2771 queue_signal(env
, info
.si_signo
, &info
);
2777 info
.si_signo
= TARGET_SIGSEGV
;
2779 info
.si_code
= TARGET_SEGV_MAPERR
;
2780 info
._sifields
._sigfault
._addr
= env
->tea
;
2781 queue_signal(env
, info
.si_signo
, &info
);
2785 printf ("Unhandled trap: 0x%x\n", trapnr
);
2786 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2789 process_pending_signals (env
);
2795 void cpu_loop(CPUCRISState
*env
)
2797 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2799 target_siginfo_t info
;
2803 trapnr
= cpu_cris_exec(cs
);
2808 info
.si_signo
= TARGET_SIGSEGV
;
2810 /* XXX: check env->error_code */
2811 info
.si_code
= TARGET_SEGV_MAPERR
;
2812 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2813 queue_signal(env
, info
.si_signo
, &info
);
2816 case EXCP_INTERRUPT
:
2817 /* just indicate that signals should be handled asap */
2820 ret
= do_syscall(env
,
2829 env
->regs
[10] = ret
;
2835 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2838 info
.si_signo
= sig
;
2840 info
.si_code
= TARGET_TRAP_BRKPT
;
2841 queue_signal(env
, info
.si_signo
, &info
);
2846 printf ("Unhandled trap: 0x%x\n", trapnr
);
2847 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2850 process_pending_signals (env
);
2855 #ifdef TARGET_MICROBLAZE
2856 void cpu_loop(CPUMBState
*env
)
2858 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2860 target_siginfo_t info
;
2864 trapnr
= cpu_mb_exec(cs
);
2869 info
.si_signo
= TARGET_SIGSEGV
;
2871 /* XXX: check env->error_code */
2872 info
.si_code
= TARGET_SEGV_MAPERR
;
2873 info
._sifields
._sigfault
._addr
= 0;
2874 queue_signal(env
, info
.si_signo
, &info
);
2877 case EXCP_INTERRUPT
:
2878 /* just indicate that signals should be handled asap */
2881 /* Return address is 4 bytes after the call. */
2883 env
->sregs
[SR_PC
] = env
->regs
[14];
2884 ret
= do_syscall(env
,
2896 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2897 if (env
->iflags
& D_FLAG
) {
2898 env
->sregs
[SR_ESR
] |= 1 << 12;
2899 env
->sregs
[SR_PC
] -= 4;
2900 /* FIXME: if branch was immed, replay the imm as well. */
2903 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2905 switch (env
->sregs
[SR_ESR
] & 31) {
2906 case ESR_EC_DIVZERO
:
2907 info
.si_signo
= TARGET_SIGFPE
;
2909 info
.si_code
= TARGET_FPE_FLTDIV
;
2910 info
._sifields
._sigfault
._addr
= 0;
2911 queue_signal(env
, info
.si_signo
, &info
);
2914 info
.si_signo
= TARGET_SIGFPE
;
2916 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2917 info
.si_code
= TARGET_FPE_FLTINV
;
2919 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2920 info
.si_code
= TARGET_FPE_FLTDIV
;
2922 info
._sifields
._sigfault
._addr
= 0;
2923 queue_signal(env
, info
.si_signo
, &info
);
2926 printf ("Unhandled hw-exception: 0x%x\n",
2927 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2928 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2937 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2940 info
.si_signo
= sig
;
2942 info
.si_code
= TARGET_TRAP_BRKPT
;
2943 queue_signal(env
, info
.si_signo
, &info
);
2948 printf ("Unhandled trap: 0x%x\n", trapnr
);
2949 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2952 process_pending_signals (env
);
2959 void cpu_loop(CPUM68KState
*env
)
2961 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2964 target_siginfo_t info
;
2965 TaskState
*ts
= cs
->opaque
;
2969 trapnr
= cpu_m68k_exec(cs
);
2974 if (ts
->sim_syscalls
) {
2976 get_user_u16(nr
, env
->pc
+ 2);
2978 do_m68k_simcall(env
, nr
);
2984 case EXCP_HALT_INSN
:
2985 /* Semihosing syscall. */
2987 do_m68k_semihosting(env
, env
->dregs
[0]);
2991 case EXCP_UNSUPPORTED
:
2993 info
.si_signo
= TARGET_SIGILL
;
2995 info
.si_code
= TARGET_ILL_ILLOPN
;
2996 info
._sifields
._sigfault
._addr
= env
->pc
;
2997 queue_signal(env
, info
.si_signo
, &info
);
3001 ts
->sim_syscalls
= 0;
3004 env
->dregs
[0] = do_syscall(env
,
3015 case EXCP_INTERRUPT
:
3016 /* just indicate that signals should be handled asap */
3020 info
.si_signo
= TARGET_SIGSEGV
;
3022 /* XXX: check env->error_code */
3023 info
.si_code
= TARGET_SEGV_MAPERR
;
3024 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3025 queue_signal(env
, info
.si_signo
, &info
);
3032 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3035 info
.si_signo
= sig
;
3037 info
.si_code
= TARGET_TRAP_BRKPT
;
3038 queue_signal(env
, info
.si_signo
, &info
);
3043 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3046 process_pending_signals(env
);
3049 #endif /* TARGET_M68K */
3052 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3054 target_ulong addr
, val
, tmp
;
3055 target_siginfo_t info
;
3058 addr
= env
->lock_addr
;
3059 tmp
= env
->lock_st_addr
;
3060 env
->lock_addr
= -1;
3061 env
->lock_st_addr
= 0;
3067 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3071 if (val
== env
->lock_value
) {
3073 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3090 info
.si_signo
= TARGET_SIGSEGV
;
3092 info
.si_code
= TARGET_SEGV_MAPERR
;
3093 info
._sifields
._sigfault
._addr
= addr
;
3094 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3097 void cpu_loop(CPUAlphaState
*env
)
3099 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3101 target_siginfo_t info
;
3106 trapnr
= cpu_alpha_exec(cs
);
3109 /* All of the traps imply a transition through PALcode, which
3110 implies an REI instruction has been executed. Which means
3111 that the intr_flag should be cleared. */
3116 fprintf(stderr
, "Reset requested. Exit\n");
3120 fprintf(stderr
, "Machine check exception. Exit\n");
3123 case EXCP_SMP_INTERRUPT
:
3124 case EXCP_CLK_INTERRUPT
:
3125 case EXCP_DEV_INTERRUPT
:
3126 fprintf(stderr
, "External interrupt. Exit\n");
3130 env
->lock_addr
= -1;
3131 info
.si_signo
= TARGET_SIGSEGV
;
3133 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3134 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3135 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3136 queue_signal(env
, info
.si_signo
, &info
);
3139 env
->lock_addr
= -1;
3140 info
.si_signo
= TARGET_SIGBUS
;
3142 info
.si_code
= TARGET_BUS_ADRALN
;
3143 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3144 queue_signal(env
, info
.si_signo
, &info
);
3148 env
->lock_addr
= -1;
3149 info
.si_signo
= TARGET_SIGILL
;
3151 info
.si_code
= TARGET_ILL_ILLOPC
;
3152 info
._sifields
._sigfault
._addr
= env
->pc
;
3153 queue_signal(env
, info
.si_signo
, &info
);
3156 env
->lock_addr
= -1;
3157 info
.si_signo
= TARGET_SIGFPE
;
3159 info
.si_code
= TARGET_FPE_FLTINV
;
3160 info
._sifields
._sigfault
._addr
= env
->pc
;
3161 queue_signal(env
, info
.si_signo
, &info
);
3164 /* No-op. Linux simply re-enables the FPU. */
3167 env
->lock_addr
= -1;
3168 switch (env
->error_code
) {
3171 info
.si_signo
= TARGET_SIGTRAP
;
3173 info
.si_code
= TARGET_TRAP_BRKPT
;
3174 info
._sifields
._sigfault
._addr
= env
->pc
;
3175 queue_signal(env
, info
.si_signo
, &info
);
3179 info
.si_signo
= TARGET_SIGTRAP
;
3182 info
._sifields
._sigfault
._addr
= env
->pc
;
3183 queue_signal(env
, info
.si_signo
, &info
);
3187 trapnr
= env
->ir
[IR_V0
];
3188 sysret
= do_syscall(env
, trapnr
,
3189 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3190 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3191 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3193 if (trapnr
== TARGET_NR_sigreturn
3194 || trapnr
== TARGET_NR_rt_sigreturn
) {
3197 /* Syscall writes 0 to V0 to bypass error check, similar
3198 to how this is handled internal to Linux kernel.
3199 (Ab)use trapnr temporarily as boolean indicating error. */
3200 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3201 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3202 env
->ir
[IR_A3
] = trapnr
;
3206 /* ??? We can probably elide the code using page_unprotect
3207 that is checking for self-modifying code. Instead we
3208 could simply call tb_flush here. Until we work out the
3209 changes required to turn off the extra write protection,
3210 this can be a no-op. */
3214 /* Handled in the translator for usermode. */
3218 /* Handled in the translator for usermode. */
3222 info
.si_signo
= TARGET_SIGFPE
;
3223 switch (env
->ir
[IR_A0
]) {
3224 case TARGET_GEN_INTOVF
:
3225 info
.si_code
= TARGET_FPE_INTOVF
;
3227 case TARGET_GEN_INTDIV
:
3228 info
.si_code
= TARGET_FPE_INTDIV
;
3230 case TARGET_GEN_FLTOVF
:
3231 info
.si_code
= TARGET_FPE_FLTOVF
;
3233 case TARGET_GEN_FLTUND
:
3234 info
.si_code
= TARGET_FPE_FLTUND
;
3236 case TARGET_GEN_FLTINV
:
3237 info
.si_code
= TARGET_FPE_FLTINV
;
3239 case TARGET_GEN_FLTINE
:
3240 info
.si_code
= TARGET_FPE_FLTRES
;
3242 case TARGET_GEN_ROPRAND
:
3246 info
.si_signo
= TARGET_SIGTRAP
;
3251 info
._sifields
._sigfault
._addr
= env
->pc
;
3252 queue_signal(env
, info
.si_signo
, &info
);
3259 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3260 if (info
.si_signo
) {
3261 env
->lock_addr
= -1;
3263 info
.si_code
= TARGET_TRAP_BRKPT
;
3264 queue_signal(env
, info
.si_signo
, &info
);
3269 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3271 case EXCP_INTERRUPT
:
3272 /* Just indicate that signals should be handled asap. */
3275 printf ("Unhandled trap: 0x%x\n", trapnr
);
3276 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3279 process_pending_signals (env
);
3282 #endif /* TARGET_ALPHA */
3285 void cpu_loop(CPUS390XState
*env
)
3287 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3289 target_siginfo_t info
;
3294 trapnr
= cpu_s390x_exec(cs
);
3297 case EXCP_INTERRUPT
:
3298 /* Just indicate that signals should be handled asap. */
3302 n
= env
->int_svc_code
;
3304 /* syscalls > 255 */
3307 env
->psw
.addr
+= env
->int_svc_ilen
;
3308 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3309 env
->regs
[4], env
->regs
[5],
3310 env
->regs
[6], env
->regs
[7], 0, 0);
3314 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3316 n
= TARGET_TRAP_BRKPT
;
3321 n
= env
->int_pgm_code
;
3324 case PGM_PRIVILEGED
:
3325 sig
= TARGET_SIGILL
;
3326 n
= TARGET_ILL_ILLOPC
;
3328 case PGM_PROTECTION
:
3329 case PGM_ADDRESSING
:
3330 sig
= TARGET_SIGSEGV
;
3331 /* XXX: check env->error_code */
3332 n
= TARGET_SEGV_MAPERR
;
3333 addr
= env
->__excp_addr
;
3336 case PGM_SPECIFICATION
:
3337 case PGM_SPECIAL_OP
:
3340 sig
= TARGET_SIGILL
;
3341 n
= TARGET_ILL_ILLOPN
;
3344 case PGM_FIXPT_OVERFLOW
:
3345 sig
= TARGET_SIGFPE
;
3346 n
= TARGET_FPE_INTOVF
;
3348 case PGM_FIXPT_DIVIDE
:
3349 sig
= TARGET_SIGFPE
;
3350 n
= TARGET_FPE_INTDIV
;
3354 n
= (env
->fpc
>> 8) & 0xff;
3356 /* compare-and-trap */
3359 /* An IEEE exception, simulated or otherwise. */
3361 n
= TARGET_FPE_FLTINV
;
3362 } else if (n
& 0x40) {
3363 n
= TARGET_FPE_FLTDIV
;
3364 } else if (n
& 0x20) {
3365 n
= TARGET_FPE_FLTOVF
;
3366 } else if (n
& 0x10) {
3367 n
= TARGET_FPE_FLTUND
;
3368 } else if (n
& 0x08) {
3369 n
= TARGET_FPE_FLTRES
;
3371 /* ??? Quantum exception; BFP, DFP error. */
3374 sig
= TARGET_SIGFPE
;
3379 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3380 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3386 addr
= env
->psw
.addr
;
3388 info
.si_signo
= sig
;
3391 info
._sifields
._sigfault
._addr
= addr
;
3392 queue_signal(env
, info
.si_signo
, &info
);
3396 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3397 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3400 process_pending_signals (env
);
3404 #endif /* TARGET_S390X */
3406 #ifdef TARGET_TILEGX
3408 static void gen_sigill_reg(CPUTLGState
*env
)
3410 target_siginfo_t info
;
3412 info
.si_signo
= TARGET_SIGILL
;
3414 info
.si_code
= TARGET_ILL_PRVREG
;
3415 info
._sifields
._sigfault
._addr
= env
->pc
;
3416 queue_signal(env
, info
.si_signo
, &info
);
3419 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3421 target_siginfo_t info
;
3423 info
.si_signo
= signo
;
3425 info
._sifields
._sigfault
._addr
= env
->pc
;
3427 if (signo
== TARGET_SIGSEGV
) {
3428 /* The passed in sigcode is a dummy; check for a page mapping
3429 and pass either MAPERR or ACCERR. */
3430 target_ulong addr
= env
->excaddr
;
3431 info
._sifields
._sigfault
._addr
= addr
;
3432 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3433 sigcode
= TARGET_SEGV_MAPERR
;
3435 sigcode
= TARGET_SEGV_ACCERR
;
3438 info
.si_code
= sigcode
;
3440 queue_signal(env
, info
.si_signo
, &info
);
3443 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3445 env
->excaddr
= addr
;
3446 do_signal(env
, TARGET_SIGSEGV
, 0);
3449 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3451 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3462 gen_sigill_reg(env
);
3465 g_assert_not_reached();
3468 env
->regs
[reg
] = val
;
3472 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3473 * memory at the address held in the first source register. If the values are
3474 * not equal, then no memory operation is performed. If the values are equal,
3475 * the 8-byte quantity from the second source register is written into memory
3476 * at the address held in the first source register. In either case, the result
3477 * of the instruction is the value read from memory. The compare and write to
3478 * memory are atomic and thus can be used for synchronization purposes. This
3479 * instruction only operates for addresses aligned to a 8-byte boundary.
3480 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3482 * Functional Description (64-bit)
3483 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3484 * rf[Dest] = memVal;
3485 * if (memVal == SPR[CmpValueSPR])
3486 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3488 * Functional Description (32-bit)
3489 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3490 * rf[Dest] = memVal;
3491 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3492 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3495 * This function also processes exch and exch4 which need not process SPR.
3497 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3500 target_long val
, sprval
;
3504 addr
= env
->atomic_srca
;
3505 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3506 goto sigsegv_maperr
;
3511 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3513 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3517 if (!cmp
|| val
== sprval
) {
3518 target_long valb
= env
->atomic_srcb
;
3519 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3520 goto sigsegv_maperr
;
3524 set_regval(env
, env
->atomic_dstr
, val
);
3530 gen_sigsegv_maperr(env
, addr
);
3533 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3537 target_long val
, valb
;
3541 addr
= env
->atomic_srca
;
3542 valb
= env
->atomic_srcb
;
3543 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3544 goto sigsegv_maperr
;
3548 case TILEGX_EXCP_OPCODE_FETCHADD
:
3549 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3552 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3558 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3560 if ((int32_t)valb
< 0) {
3564 case TILEGX_EXCP_OPCODE_FETCHAND
:
3565 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3568 case TILEGX_EXCP_OPCODE_FETCHOR
:
3569 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3573 g_assert_not_reached();
3577 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3578 goto sigsegv_maperr
;
3582 set_regval(env
, env
->atomic_dstr
, val
);
3588 gen_sigsegv_maperr(env
, addr
);
3591 void cpu_loop(CPUTLGState
*env
)
3593 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3598 trapnr
= cpu_tilegx_exec(cs
);
3601 case TILEGX_EXCP_SYSCALL
:
3602 env
->regs
[TILEGX_R_RE
] = do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3603 env
->regs
[0], env
->regs
[1],
3604 env
->regs
[2], env
->regs
[3],
3605 env
->regs
[4], env
->regs
[5],
3606 env
->regs
[6], env
->regs
[7]);
3607 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(env
->regs
[TILEGX_R_RE
])
3608 ? - env
->regs
[TILEGX_R_RE
]
3611 case TILEGX_EXCP_OPCODE_EXCH
:
3612 do_exch(env
, true, false);
3614 case TILEGX_EXCP_OPCODE_EXCH4
:
3615 do_exch(env
, false, false);
3617 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3618 do_exch(env
, true, true);
3620 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3621 do_exch(env
, false, true);
3623 case TILEGX_EXCP_OPCODE_FETCHADD
:
3624 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3625 case TILEGX_EXCP_OPCODE_FETCHAND
:
3626 case TILEGX_EXCP_OPCODE_FETCHOR
:
3627 do_fetch(env
, trapnr
, true);
3629 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3630 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3631 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3632 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3633 do_fetch(env
, trapnr
, false);
3635 case TILEGX_EXCP_SIGNAL
:
3636 do_signal(env
, env
->signo
, env
->sigcode
);
3638 case TILEGX_EXCP_REG_IDN_ACCESS
:
3639 case TILEGX_EXCP_REG_UDN_ACCESS
:
3640 gen_sigill_reg(env
);
3643 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3644 g_assert_not_reached();
3646 process_pending_signals(env
);
3652 THREAD CPUState
*thread_cpu
;
3654 void task_settid(TaskState
*ts
)
3656 if (ts
->ts_tid
== 0) {
3657 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3661 void stop_all_tasks(void)
3664 * We trust that when using NPTL, start_exclusive()
3665 * handles thread stopping correctly.
3670 /* Assumes contents are already zeroed. */
3671 void init_task_state(TaskState
*ts
)
3676 ts
->first_free
= ts
->sigqueue_table
;
3677 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3678 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3680 ts
->sigqueue_table
[i
].next
= NULL
;
3683 CPUArchState
*cpu_copy(CPUArchState
*env
)
3685 CPUState
*cpu
= ENV_GET_CPU(env
);
3686 CPUState
*new_cpu
= cpu_init(cpu_model
);
3687 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3691 /* Reset non arch specific state */
3694 memcpy(new_env
, env
, sizeof(CPUArchState
));
3696 /* Clone all break/watchpoints.
3697 Note: Once we support ptrace with hw-debug register access, make sure
3698 BP_CPU break/watchpoints are handled correctly on clone. */
3699 QTAILQ_INIT(&new_cpu
->breakpoints
);
3700 QTAILQ_INIT(&new_cpu
->watchpoints
);
3701 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3702 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3704 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3705 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3711 static void handle_arg_help(const char *arg
)
3713 usage(EXIT_SUCCESS
);
3716 static void handle_arg_log(const char *arg
)
3720 mask
= qemu_str_to_log_mask(arg
);
3722 qemu_print_log_usage(stdout
);
3728 static void handle_arg_log_filename(const char *arg
)
3730 qemu_set_log_filename(arg
);
3733 static void handle_arg_set_env(const char *arg
)
3735 char *r
, *p
, *token
;
3736 r
= p
= strdup(arg
);
3737 while ((token
= strsep(&p
, ",")) != NULL
) {
3738 if (envlist_setenv(envlist
, token
) != 0) {
3739 usage(EXIT_FAILURE
);
3745 static void handle_arg_unset_env(const char *arg
)
3747 char *r
, *p
, *token
;
3748 r
= p
= strdup(arg
);
3749 while ((token
= strsep(&p
, ",")) != NULL
) {
3750 if (envlist_unsetenv(envlist
, token
) != 0) {
3751 usage(EXIT_FAILURE
);
3757 static void handle_arg_argv0(const char *arg
)
3759 argv0
= strdup(arg
);
3762 static void handle_arg_stack_size(const char *arg
)
3765 guest_stack_size
= strtoul(arg
, &p
, 0);
3766 if (guest_stack_size
== 0) {
3767 usage(EXIT_FAILURE
);
3771 guest_stack_size
*= 1024 * 1024;
3772 } else if (*p
== 'k' || *p
== 'K') {
3773 guest_stack_size
*= 1024;
3777 static void handle_arg_ld_prefix(const char *arg
)
3779 interp_prefix
= strdup(arg
);
3782 static void handle_arg_pagesize(const char *arg
)
3784 qemu_host_page_size
= atoi(arg
);
3785 if (qemu_host_page_size
== 0 ||
3786 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3787 fprintf(stderr
, "page size must be a power of two\n");
3792 static void handle_arg_randseed(const char *arg
)
3794 unsigned long long seed
;
3796 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3797 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3803 static void handle_arg_gdb(const char *arg
)
3805 gdbstub_port
= atoi(arg
);
3808 static void handle_arg_uname(const char *arg
)
3810 qemu_uname_release
= strdup(arg
);
3813 static void handle_arg_cpu(const char *arg
)
3815 cpu_model
= strdup(arg
);
3816 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3817 /* XXX: implement xxx_cpu_list for targets that still miss it */
3818 #if defined(cpu_list)
3819 cpu_list(stdout
, &fprintf
);
3825 static void handle_arg_guest_base(const char *arg
)
3827 guest_base
= strtol(arg
, NULL
, 0);
3828 have_guest_base
= 1;
3831 static void handle_arg_reserved_va(const char *arg
)
3835 reserved_va
= strtoul(arg
, &p
, 0);
3849 unsigned long unshifted
= reserved_va
;
3851 reserved_va
<<= shift
;
3852 if (((reserved_va
>> shift
) != unshifted
)
3853 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3854 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3857 fprintf(stderr
, "Reserved virtual address too big\n");
3862 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3867 static void handle_arg_singlestep(const char *arg
)
3872 static void handle_arg_strace(const char *arg
)
3877 static void handle_arg_version(const char *arg
)
3879 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3880 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3884 struct qemu_argument
{
3888 void (*handle_opt
)(const char *arg
);
3889 const char *example
;
3893 static const struct qemu_argument arg_table
[] = {
3894 {"h", "", false, handle_arg_help
,
3895 "", "print this help"},
3896 {"help", "", false, handle_arg_help
,
3898 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3899 "port", "wait gdb connection to 'port'"},
3900 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3901 "path", "set the elf interpreter prefix to 'path'"},
3902 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3903 "size", "set the stack size to 'size' bytes"},
3904 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3905 "model", "select CPU (-cpu help for list)"},
3906 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3907 "var=value", "sets targets environment variable (see below)"},
3908 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3909 "var", "unsets targets environment variable (see below)"},
3910 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3911 "argv0", "forces target process argv[0] to be 'argv0'"},
3912 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3913 "uname", "set qemu uname release string to 'uname'"},
3914 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3915 "address", "set guest_base address to 'address'"},
3916 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3917 "size", "reserve 'size' bytes for guest virtual address space"},
3918 {"d", "QEMU_LOG", true, handle_arg_log
,
3919 "item[,...]", "enable logging of specified items "
3920 "(use '-d help' for a list of items)"},
3921 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3922 "logfile", "write logs to 'logfile' (default stderr)"},
3923 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3924 "pagesize", "set the host page size to 'pagesize'"},
3925 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3926 "", "run in singlestep mode"},
3927 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3928 "", "log system calls"},
3929 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3930 "", "Seed for pseudo-random number generator"},
3931 {"version", "QEMU_VERSION", false, handle_arg_version
,
3932 "", "display version information and exit"},
3933 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3936 static void usage(int exitcode
)
3938 const struct qemu_argument
*arginfo
;
3942 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3943 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3945 "Options and associated environment variables:\n"
3948 /* Calculate column widths. We must always have at least enough space
3949 * for the column header.
3951 maxarglen
= strlen("Argument");
3952 maxenvlen
= strlen("Env-variable");
3954 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3955 int arglen
= strlen(arginfo
->argv
);
3956 if (arginfo
->has_arg
) {
3957 arglen
+= strlen(arginfo
->example
) + 1;
3959 if (strlen(arginfo
->env
) > maxenvlen
) {
3960 maxenvlen
= strlen(arginfo
->env
);
3962 if (arglen
> maxarglen
) {
3967 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3968 maxenvlen
, "Env-variable");
3970 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3971 if (arginfo
->has_arg
) {
3972 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3973 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3974 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3976 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3977 maxenvlen
, arginfo
->env
,
3984 "QEMU_LD_PREFIX = %s\n"
3985 "QEMU_STACK_SIZE = %ld byte\n",
3990 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3991 "QEMU_UNSET_ENV environment variables to set and unset\n"
3992 "environment variables for the target process.\n"
3993 "It is possible to provide several variables by separating them\n"
3994 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3995 "provide the -E and -U options multiple times.\n"
3996 "The following lines are equivalent:\n"
3997 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3998 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3999 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4000 "Note that if you provide several changes to a single variable\n"
4001 "the last change will stay in effect.\n");
4006 static int parse_args(int argc
, char **argv
)
4010 const struct qemu_argument
*arginfo
;
4012 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4013 if (arginfo
->env
== NULL
) {
4017 r
= getenv(arginfo
->env
);
4019 arginfo
->handle_opt(r
);
4025 if (optind
>= argc
) {
4034 if (!strcmp(r
, "-")) {
4037 /* Treat --foo the same as -foo. */
4042 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4043 if (!strcmp(r
, arginfo
->argv
)) {
4044 if (arginfo
->has_arg
) {
4045 if (optind
>= argc
) {
4046 (void) fprintf(stderr
,
4047 "qemu: missing argument for option '%s'\n", r
);
4050 arginfo
->handle_opt(argv
[optind
]);
4053 arginfo
->handle_opt(NULL
);
4059 /* no option matched the current argv */
4060 if (arginfo
->handle_opt
== NULL
) {
4061 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4066 if (optind
>= argc
) {
4067 (void) fprintf(stderr
, "qemu: no user program specified\n");
4071 filename
= argv
[optind
];
4072 exec_path
= argv
[optind
];
4077 int main(int argc
, char **argv
, char **envp
)
4079 struct target_pt_regs regs1
, *regs
= ®s1
;
4080 struct image_info info1
, *info
= &info1
;
4081 struct linux_binprm bprm
;
4086 char **target_environ
, **wrk
;
4093 module_call_init(MODULE_INIT_QOM
);
4095 if ((envlist
= envlist_create()) == NULL
) {
4096 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4100 /* add current environment into the list */
4101 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4102 (void) envlist_setenv(envlist
, *wrk
);
4105 /* Read the stack limit from the kernel. If it's "unlimited",
4106 then we can do little else besides use the default. */
4109 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4110 && lim
.rlim_cur
!= RLIM_INFINITY
4111 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4112 guest_stack_size
= lim
.rlim_cur
;
4117 #if defined(cpudef_setup)
4118 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
4123 optind
= parse_args(argc
, argv
);
4126 memset(regs
, 0, sizeof(struct target_pt_regs
));
4128 /* Zero out image_info */
4129 memset(info
, 0, sizeof(struct image_info
));
4131 memset(&bprm
, 0, sizeof (bprm
));
4133 /* Scan interp_prefix dir for replacement files. */
4134 init_paths(interp_prefix
);
4136 init_qemu_uname_release();
4138 if (cpu_model
== NULL
) {
4139 #if defined(TARGET_I386)
4140 #ifdef TARGET_X86_64
4141 cpu_model
= "qemu64";
4143 cpu_model
= "qemu32";
4145 #elif defined(TARGET_ARM)
4147 #elif defined(TARGET_UNICORE32)
4149 #elif defined(TARGET_M68K)
4151 #elif defined(TARGET_SPARC)
4152 #ifdef TARGET_SPARC64
4153 cpu_model
= "TI UltraSparc II";
4155 cpu_model
= "Fujitsu MB86904";
4157 #elif defined(TARGET_MIPS)
4158 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4163 #elif defined TARGET_OPENRISC
4164 cpu_model
= "or1200";
4165 #elif defined(TARGET_PPC)
4166 # ifdef TARGET_PPC64
4167 cpu_model
= "POWER7";
4171 #elif defined TARGET_SH4
4172 cpu_model
= TYPE_SH7785_CPU
;
4178 /* NOTE: we need to init the CPU at this stage to get
4179 qemu_host_page_size */
4180 cpu
= cpu_init(cpu_model
);
4182 fprintf(stderr
, "Unable to find CPU definition\n");
4190 if (getenv("QEMU_STRACE")) {
4194 if (getenv("QEMU_RAND_SEED")) {
4195 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4198 target_environ
= envlist_to_environ(envlist
, NULL
);
4199 envlist_free(envlist
);
4202 * Now that page sizes are configured in cpu_init() we can do
4203 * proper page alignment for guest_base.
4205 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4207 if (reserved_va
|| have_guest_base
) {
4208 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4210 if (guest_base
== (unsigned long)-1) {
4211 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4212 "space for use as guest address space (check your virtual "
4213 "memory ulimit setting or reserve less using -R option)\n",
4219 mmap_next_start
= reserved_va
;
4224 * Read in mmap_min_addr kernel parameter. This value is used
4225 * When loading the ELF image to determine whether guest_base
4226 * is needed. It is also used in mmap_find_vma.
4231 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4233 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4234 mmap_min_addr
= tmp
;
4235 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4242 * Prepare copy of argv vector for target.
4244 target_argc
= argc
- optind
;
4245 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4246 if (target_argv
== NULL
) {
4247 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4252 * If argv0 is specified (using '-0' switch) we replace
4253 * argv[0] pointer with the given one.
4256 if (argv0
!= NULL
) {
4257 target_argv
[i
++] = strdup(argv0
);
4259 for (; i
< target_argc
; i
++) {
4260 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4262 target_argv
[target_argc
] = NULL
;
4264 ts
= g_new0(TaskState
, 1);
4265 init_task_state(ts
);
4266 /* build Task State */
4272 execfd
= qemu_getauxval(AT_EXECFD
);
4274 execfd
= open(filename
, O_RDONLY
);
4276 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4277 _exit(EXIT_FAILURE
);
4281 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4284 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4285 _exit(EXIT_FAILURE
);
4288 for (wrk
= target_environ
; *wrk
; wrk
++) {
4292 free(target_environ
);
4294 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4295 qemu_log("guest_base 0x%lx\n", guest_base
);
4298 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4299 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4300 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4302 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4304 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4305 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4307 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4308 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4311 target_set_brk(info
->brk
);
4315 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4316 generating the prologue until now so that the prologue can take
4317 the real value of GUEST_BASE into account. */
4318 tcg_prologue_init(&tcg_ctx
);
4320 #if defined(TARGET_I386)
4321 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4322 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4323 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4324 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4325 env
->hflags
|= HF_OSFXSR_MASK
;
4327 #ifndef TARGET_ABI32
4328 /* enable 64 bit mode if possible */
4329 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4330 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4333 env
->cr
[4] |= CR4_PAE_MASK
;
4334 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4335 env
->hflags
|= HF_LMA_MASK
;
4338 /* flags setup : we activate the IRQs by default as in user mode */
4339 env
->eflags
|= IF_MASK
;
4341 /* linux register setup */
4342 #ifndef TARGET_ABI32
4343 env
->regs
[R_EAX
] = regs
->rax
;
4344 env
->regs
[R_EBX
] = regs
->rbx
;
4345 env
->regs
[R_ECX
] = regs
->rcx
;
4346 env
->regs
[R_EDX
] = regs
->rdx
;
4347 env
->regs
[R_ESI
] = regs
->rsi
;
4348 env
->regs
[R_EDI
] = regs
->rdi
;
4349 env
->regs
[R_EBP
] = regs
->rbp
;
4350 env
->regs
[R_ESP
] = regs
->rsp
;
4351 env
->eip
= regs
->rip
;
4353 env
->regs
[R_EAX
] = regs
->eax
;
4354 env
->regs
[R_EBX
] = regs
->ebx
;
4355 env
->regs
[R_ECX
] = regs
->ecx
;
4356 env
->regs
[R_EDX
] = regs
->edx
;
4357 env
->regs
[R_ESI
] = regs
->esi
;
4358 env
->regs
[R_EDI
] = regs
->edi
;
4359 env
->regs
[R_EBP
] = regs
->ebp
;
4360 env
->regs
[R_ESP
] = regs
->esp
;
4361 env
->eip
= regs
->eip
;
4364 /* linux interrupt setup */
4365 #ifndef TARGET_ABI32
4366 env
->idt
.limit
= 511;
4368 env
->idt
.limit
= 255;
4370 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4371 PROT_READ
|PROT_WRITE
,
4372 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4373 idt_table
= g2h(env
->idt
.base
);
4396 /* linux segment setup */
4398 uint64_t *gdt_table
;
4399 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4400 PROT_READ
|PROT_WRITE
,
4401 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4402 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4403 gdt_table
= g2h(env
->gdt
.base
);
4405 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4406 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4407 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4409 /* 64 bit code segment */
4410 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4411 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4413 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4415 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4416 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4417 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4419 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4420 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4422 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4423 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4424 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4425 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4426 /* This hack makes Wine work... */
4427 env
->segs
[R_FS
].selector
= 0;
4429 cpu_x86_load_seg(env
, R_DS
, 0);
4430 cpu_x86_load_seg(env
, R_ES
, 0);
4431 cpu_x86_load_seg(env
, R_FS
, 0);
4432 cpu_x86_load_seg(env
, R_GS
, 0);
4434 #elif defined(TARGET_AARCH64)
4438 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4440 "The selected ARM CPU does not support 64 bit mode\n");
4444 for (i
= 0; i
< 31; i
++) {
4445 env
->xregs
[i
] = regs
->regs
[i
];
4448 env
->xregs
[31] = regs
->sp
;
4450 #elif defined(TARGET_ARM)
4453 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4454 for(i
= 0; i
< 16; i
++) {
4455 env
->regs
[i
] = regs
->uregs
[i
];
4458 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4459 && (info
->elf_flags
& EF_ARM_BE8
)) {
4460 env
->bswap_code
= 1;
4463 #elif defined(TARGET_UNICORE32)
4466 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4467 for (i
= 0; i
< 32; i
++) {
4468 env
->regs
[i
] = regs
->uregs
[i
];
4471 #elif defined(TARGET_SPARC)
4475 env
->npc
= regs
->npc
;
4477 for(i
= 0; i
< 8; i
++)
4478 env
->gregs
[i
] = regs
->u_regs
[i
];
4479 for(i
= 0; i
< 8; i
++)
4480 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4482 #elif defined(TARGET_PPC)
4486 #if defined(TARGET_PPC64)
4487 #if defined(TARGET_ABI32)
4488 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4490 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4493 env
->nip
= regs
->nip
;
4494 for(i
= 0; i
< 32; i
++) {
4495 env
->gpr
[i
] = regs
->gpr
[i
];
4498 #elif defined(TARGET_M68K)
4501 env
->dregs
[0] = regs
->d0
;
4502 env
->dregs
[1] = regs
->d1
;
4503 env
->dregs
[2] = regs
->d2
;
4504 env
->dregs
[3] = regs
->d3
;
4505 env
->dregs
[4] = regs
->d4
;
4506 env
->dregs
[5] = regs
->d5
;
4507 env
->dregs
[6] = regs
->d6
;
4508 env
->dregs
[7] = regs
->d7
;
4509 env
->aregs
[0] = regs
->a0
;
4510 env
->aregs
[1] = regs
->a1
;
4511 env
->aregs
[2] = regs
->a2
;
4512 env
->aregs
[3] = regs
->a3
;
4513 env
->aregs
[4] = regs
->a4
;
4514 env
->aregs
[5] = regs
->a5
;
4515 env
->aregs
[6] = regs
->a6
;
4516 env
->aregs
[7] = regs
->usp
;
4518 ts
->sim_syscalls
= 1;
4520 #elif defined(TARGET_MICROBLAZE)
4522 env
->regs
[0] = regs
->r0
;
4523 env
->regs
[1] = regs
->r1
;
4524 env
->regs
[2] = regs
->r2
;
4525 env
->regs
[3] = regs
->r3
;
4526 env
->regs
[4] = regs
->r4
;
4527 env
->regs
[5] = regs
->r5
;
4528 env
->regs
[6] = regs
->r6
;
4529 env
->regs
[7] = regs
->r7
;
4530 env
->regs
[8] = regs
->r8
;
4531 env
->regs
[9] = regs
->r9
;
4532 env
->regs
[10] = regs
->r10
;
4533 env
->regs
[11] = regs
->r11
;
4534 env
->regs
[12] = regs
->r12
;
4535 env
->regs
[13] = regs
->r13
;
4536 env
->regs
[14] = regs
->r14
;
4537 env
->regs
[15] = regs
->r15
;
4538 env
->regs
[16] = regs
->r16
;
4539 env
->regs
[17] = regs
->r17
;
4540 env
->regs
[18] = regs
->r18
;
4541 env
->regs
[19] = regs
->r19
;
4542 env
->regs
[20] = regs
->r20
;
4543 env
->regs
[21] = regs
->r21
;
4544 env
->regs
[22] = regs
->r22
;
4545 env
->regs
[23] = regs
->r23
;
4546 env
->regs
[24] = regs
->r24
;
4547 env
->regs
[25] = regs
->r25
;
4548 env
->regs
[26] = regs
->r26
;
4549 env
->regs
[27] = regs
->r27
;
4550 env
->regs
[28] = regs
->r28
;
4551 env
->regs
[29] = regs
->r29
;
4552 env
->regs
[30] = regs
->r30
;
4553 env
->regs
[31] = regs
->r31
;
4554 env
->sregs
[SR_PC
] = regs
->pc
;
4556 #elif defined(TARGET_MIPS)
4560 for(i
= 0; i
< 32; i
++) {
4561 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4563 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4564 if (regs
->cp0_epc
& 1) {
4565 env
->hflags
|= MIPS_HFLAG_M16
;
4568 #elif defined(TARGET_OPENRISC)
4572 for (i
= 0; i
< 32; i
++) {
4573 env
->gpr
[i
] = regs
->gpr
[i
];
4579 #elif defined(TARGET_SH4)
4583 for(i
= 0; i
< 16; i
++) {
4584 env
->gregs
[i
] = regs
->regs
[i
];
4588 #elif defined(TARGET_ALPHA)
4592 for(i
= 0; i
< 28; i
++) {
4593 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4595 env
->ir
[IR_SP
] = regs
->usp
;
4598 #elif defined(TARGET_CRIS)
4600 env
->regs
[0] = regs
->r0
;
4601 env
->regs
[1] = regs
->r1
;
4602 env
->regs
[2] = regs
->r2
;
4603 env
->regs
[3] = regs
->r3
;
4604 env
->regs
[4] = regs
->r4
;
4605 env
->regs
[5] = regs
->r5
;
4606 env
->regs
[6] = regs
->r6
;
4607 env
->regs
[7] = regs
->r7
;
4608 env
->regs
[8] = regs
->r8
;
4609 env
->regs
[9] = regs
->r9
;
4610 env
->regs
[10] = regs
->r10
;
4611 env
->regs
[11] = regs
->r11
;
4612 env
->regs
[12] = regs
->r12
;
4613 env
->regs
[13] = regs
->r13
;
4614 env
->regs
[14] = info
->start_stack
;
4615 env
->regs
[15] = regs
->acr
;
4616 env
->pc
= regs
->erp
;
4618 #elif defined(TARGET_S390X)
4621 for (i
= 0; i
< 16; i
++) {
4622 env
->regs
[i
] = regs
->gprs
[i
];
4624 env
->psw
.mask
= regs
->psw
.mask
;
4625 env
->psw
.addr
= regs
->psw
.addr
;
4627 #elif defined(TARGET_TILEGX)
4630 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4631 env
->regs
[i
] = regs
->regs
[i
];
4633 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4639 #error unsupported target CPU
4642 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4643 ts
->stack_base
= info
->start_stack
;
4644 ts
->heap_base
= info
->brk
;
4645 /* This will be filled in on the first SYS_HEAPINFO call. */
4650 if (gdbserver_start(gdbstub_port
) < 0) {
4651 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4655 gdb_handlesig(cpu
, 0);