4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
27 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
31 #include "qemu/timer.h"
32 #include "qemu/envlist.h"
39 static const char *filename
;
40 static const char *argv0
;
41 static int gdbstub_port
;
42 static envlist_t
*envlist
;
43 static const char *cpu_model
;
44 unsigned long mmap_min_addr
;
48 #define EXCP_DUMP(env, fmt, ...) \
50 CPUState *cs = ENV_GET_CPU(env); \
51 fprintf(stderr, fmt , ## __VA_ARGS__); \
52 cpu_dump_state(cs, stderr, fprintf, 0); \
53 if (qemu_log_separate()) { \
54 qemu_log(fmt, ## __VA_ARGS__); \
55 log_cpu_state(cs, 0); \
59 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
61 * When running 32-on-64 we should make sure we can fit all of the possible
62 * guest address space into a contiguous chunk of virtual host memory.
64 * This way we will never overlap with our own libraries or binaries or stack
65 * or anything else that QEMU maps.
68 /* MIPS only supports 31 bits of virtual address space for user space */
69 uintptr_t reserved_va
= 0x77000000;
71 uintptr_t reserved_va
= 0xf7000000;
74 uintptr_t reserved_va
;
77 static void usage(int exitcode
);
79 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
80 const char *qemu_uname_release
;
82 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
83 we allocate a bigger stack. Need a better solution, for example
84 by remapping the process stack directly at the right place */
85 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
87 void gemu_log(const char *fmt
, ...)
92 vfprintf(stderr
, fmt
, ap
);
96 #if defined(TARGET_I386)
97 int cpu_get_pic_interrupt(CPUX86State
*env
)
103 /***********************************************************/
104 /* Helper routines for implementing atomic operations. */
106 /* To implement exclusive operations we force all cpus to syncronise.
107 We don't require a full sync, only that no cpus are executing guest code.
108 The alternative is to map target atomic ops onto host equivalents,
109 which requires quite a lot of per host/target work. */
110 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
111 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
112 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
113 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
114 static int pending_cpus
;
116 /* Make sure everything is in a consistent state for calling fork(). */
117 void fork_start(void)
119 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
120 pthread_mutex_lock(&exclusive_lock
);
124 void fork_end(int child
)
126 mmap_fork_end(child
);
128 CPUState
*cpu
, *next_cpu
;
129 /* Child processes created by fork() only have a single thread.
130 Discard information about the parent threads. */
131 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
132 if (cpu
!= thread_cpu
) {
133 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
137 pthread_mutex_init(&exclusive_lock
, NULL
);
138 pthread_mutex_init(&cpu_list_mutex
, NULL
);
139 pthread_cond_init(&exclusive_cond
, NULL
);
140 pthread_cond_init(&exclusive_resume
, NULL
);
141 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
142 gdbserver_fork(thread_cpu
);
144 pthread_mutex_unlock(&exclusive_lock
);
145 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
149 /* Wait for pending exclusive operations to complete. The exclusive lock
151 static inline void exclusive_idle(void)
153 while (pending_cpus
) {
154 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
158 /* Start an exclusive operation.
159 Must only be called from outside cpu_arm_exec. */
160 static inline void start_exclusive(void)
164 pthread_mutex_lock(&exclusive_lock
);
168 /* Make all other cpus stop executing. */
169 CPU_FOREACH(other_cpu
) {
170 if (other_cpu
->running
) {
175 if (pending_cpus
> 1) {
176 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
180 /* Finish an exclusive operation. */
181 static inline void __attribute__((unused
)) end_exclusive(void)
184 pthread_cond_broadcast(&exclusive_resume
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Wait for exclusive ops to finish, and begin cpu execution. */
189 static inline void cpu_exec_start(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
194 pthread_mutex_unlock(&exclusive_lock
);
197 /* Mark cpu as not executing, and release pending exclusive ops. */
198 static inline void cpu_exec_end(CPUState
*cpu
)
200 pthread_mutex_lock(&exclusive_lock
);
201 cpu
->running
= false;
202 if (pending_cpus
> 1) {
204 if (pending_cpus
== 1) {
205 pthread_cond_signal(&exclusive_cond
);
209 pthread_mutex_unlock(&exclusive_lock
);
212 void cpu_list_lock(void)
214 pthread_mutex_lock(&cpu_list_mutex
);
217 void cpu_list_unlock(void)
219 pthread_mutex_unlock(&cpu_list_mutex
);
224 /***********************************************************/
225 /* CPUX86 core interface */
227 uint64_t cpu_get_tsc(CPUX86State
*env
)
229 return cpu_get_host_ticks();
232 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
237 e1
= (addr
<< 16) | (limit
& 0xffff);
238 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
245 static uint64_t *idt_table
;
247 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
248 uint64_t addr
, unsigned int sel
)
251 e1
= (addr
& 0xffff) | (sel
<< 16);
252 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
256 p
[2] = tswap32(addr
>> 32);
259 /* only dpl matters as we do only user space emulation */
260 static void set_idt(int n
, unsigned int dpl
)
262 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
265 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
266 uint32_t addr
, unsigned int sel
)
269 e1
= (addr
& 0xffff) | (sel
<< 16);
270 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
276 /* only dpl matters as we do only user space emulation */
277 static void set_idt(int n
, unsigned int dpl
)
279 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
283 void cpu_loop(CPUX86State
*env
)
285 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
288 target_siginfo_t info
;
296 trapnr
= cpu_x86_exec(cs
);
300 /* linux syscall from int $0x80 */
301 env
->regs
[R_EAX
] = do_syscall(env
,
313 /* linux syscall from syscall instruction */
314 env
->regs
[R_EAX
] = do_syscall(env
,
328 case TARGET_VSYSCALL_ADDR(__NR_vgettimeofday
):
329 syscall_num
= __NR_gettimeofday
;
331 case TARGET_VSYSCALL_ADDR(__NR_vtime
):
333 syscall_num
= __NR_time
;
335 /* XXX: not yet implemented (arm eabi host) */
336 cpu_abort(cs
, "Unimplemented vsyscall vtime");
339 case TARGET_VSYSCALL_ADDR(__NR_vgetcpu
):
340 /* XXX: not yet implemented */
341 cpu_abort(cs
, "Unimplemented vsyscall vgetcpu");
345 "Invalid vsyscall to address " TARGET_FMT_lx
"\n",
348 env
->regs
[R_EAX
] = do_syscall(env
,
358 get_user_u64(val
, env
->regs
[R_ESP
]);
360 env
->regs
[R_ESP
] += 8;
365 info
.si_signo
= TARGET_SIGBUS
;
367 info
.si_code
= TARGET_SI_KERNEL
;
368 info
._sifields
._sigfault
._addr
= 0;
369 queue_signal(env
, info
.si_signo
, &info
);
372 /* XXX: potential problem if ABI32 */
373 #ifndef TARGET_X86_64
374 if (env
->eflags
& VM_MASK
) {
375 handle_vm86_fault(env
);
379 info
.si_signo
= TARGET_SIGSEGV
;
381 info
.si_code
= TARGET_SI_KERNEL
;
382 info
._sifields
._sigfault
._addr
= 0;
383 queue_signal(env
, info
.si_signo
, &info
);
387 info
.si_signo
= TARGET_SIGSEGV
;
389 if (!(env
->error_code
& 1))
390 info
.si_code
= TARGET_SEGV_MAPERR
;
392 info
.si_code
= TARGET_SEGV_ACCERR
;
393 info
._sifields
._sigfault
._addr
= env
->cr
[2];
394 queue_signal(env
, info
.si_signo
, &info
);
397 #ifndef TARGET_X86_64
398 if (env
->eflags
& VM_MASK
) {
399 handle_vm86_trap(env
, trapnr
);
403 /* division by zero */
404 info
.si_signo
= TARGET_SIGFPE
;
406 info
.si_code
= TARGET_FPE_INTDIV
;
407 info
._sifields
._sigfault
._addr
= env
->eip
;
408 queue_signal(env
, info
.si_signo
, &info
);
413 #ifndef TARGET_X86_64
414 if (env
->eflags
& VM_MASK
) {
415 handle_vm86_trap(env
, trapnr
);
419 info
.si_signo
= TARGET_SIGTRAP
;
421 if (trapnr
== EXCP01_DB
) {
422 info
.si_code
= TARGET_TRAP_BRKPT
;
423 info
._sifields
._sigfault
._addr
= env
->eip
;
425 info
.si_code
= TARGET_SI_KERNEL
;
426 info
._sifields
._sigfault
._addr
= 0;
428 queue_signal(env
, info
.si_signo
, &info
);
433 #ifndef TARGET_X86_64
434 if (env
->eflags
& VM_MASK
) {
435 handle_vm86_trap(env
, trapnr
);
439 info
.si_signo
= TARGET_SIGSEGV
;
441 info
.si_code
= TARGET_SI_KERNEL
;
442 info
._sifields
._sigfault
._addr
= 0;
443 queue_signal(env
, info
.si_signo
, &info
);
447 info
.si_signo
= TARGET_SIGILL
;
449 info
.si_code
= TARGET_ILL_ILLOPN
;
450 info
._sifields
._sigfault
._addr
= env
->eip
;
451 queue_signal(env
, info
.si_signo
, &info
);
454 /* just indicate that signals should be handled asap */
460 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
465 info
.si_code
= TARGET_TRAP_BRKPT
;
466 queue_signal(env
, info
.si_signo
, &info
);
471 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
472 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
476 process_pending_signals(env
);
483 #define get_user_code_u32(x, gaddr, env) \
484 ({ abi_long __r = get_user_u32((x), (gaddr)); \
485 if (!__r && bswap_code(arm_sctlr_b(env))) { \
491 #define get_user_code_u16(x, gaddr, env) \
492 ({ abi_long __r = get_user_u16((x), (gaddr)); \
493 if (!__r && bswap_code(arm_sctlr_b(env))) { \
499 #define get_user_data_u32(x, gaddr, env) \
500 ({ abi_long __r = get_user_u32((x), (gaddr)); \
501 if (!__r && arm_cpu_bswap_data(env)) { \
507 #define get_user_data_u16(x, gaddr, env) \
508 ({ abi_long __r = get_user_u16((x), (gaddr)); \
509 if (!__r && arm_cpu_bswap_data(env)) { \
515 #define put_user_data_u32(x, gaddr, env) \
516 ({ typeof(x) __x = (x); \
517 if (arm_cpu_bswap_data(env)) { \
518 __x = bswap32(__x); \
520 put_user_u32(__x, (gaddr)); \
523 #define put_user_data_u16(x, gaddr, env) \
524 ({ typeof(x) __x = (x); \
525 if (arm_cpu_bswap_data(env)) { \
526 __x = bswap16(__x); \
528 put_user_u16(__x, (gaddr)); \
532 /* Commpage handling -- there is no commpage for AArch64 */
535 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
537 * r0 = pointer to oldval
538 * r1 = pointer to newval
539 * r2 = pointer to target value
542 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
543 * C set if *ptr was changed, clear if no exchange happened
545 * Note segv's in kernel helpers are a bit tricky, we can set the
546 * data address sensibly but the PC address is just the entry point.
548 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
550 uint64_t oldval
, newval
, val
;
552 target_siginfo_t info
;
554 /* Based on the 32 bit code in do_kernel_trap */
556 /* XXX: This only works between threads, not between processes.
557 It's probably possible to implement this with native host
558 operations. However things like ldrex/strex are much harder so
559 there's not much point trying. */
561 cpsr
= cpsr_read(env
);
564 if (get_user_u64(oldval
, env
->regs
[0])) {
565 env
->exception
.vaddress
= env
->regs
[0];
569 if (get_user_u64(newval
, env
->regs
[1])) {
570 env
->exception
.vaddress
= env
->regs
[1];
574 if (get_user_u64(val
, addr
)) {
575 env
->exception
.vaddress
= addr
;
582 if (put_user_u64(val
, addr
)) {
583 env
->exception
.vaddress
= addr
;
593 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
599 /* We get the PC of the entry address - which is as good as anything,
600 on a real kernel what you get depends on which mode it uses. */
601 info
.si_signo
= TARGET_SIGSEGV
;
603 /* XXX: check env->error_code */
604 info
.si_code
= TARGET_SEGV_MAPERR
;
605 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
606 queue_signal(env
, info
.si_signo
, &info
);
609 /* Handle a jump to the kernel code page. */
611 do_kernel_trap(CPUARMState
*env
)
617 switch (env
->regs
[15]) {
618 case 0xffff0fa0: /* __kernel_memory_barrier */
619 /* ??? No-op. Will need to do better for SMP. */
621 case 0xffff0fc0: /* __kernel_cmpxchg */
622 /* XXX: This only works between threads, not between processes.
623 It's probably possible to implement this with native host
624 operations. However things like ldrex/strex are much harder so
625 there's not much point trying. */
627 cpsr
= cpsr_read(env
);
629 /* FIXME: This should SEGV if the access fails. */
630 if (get_user_u32(val
, addr
))
632 if (val
== env
->regs
[0]) {
634 /* FIXME: Check for segfaults. */
635 put_user_u32(val
, addr
);
642 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
645 case 0xffff0fe0: /* __kernel_get_tls */
646 env
->regs
[0] = cpu_get_tls(env
);
648 case 0xffff0f60: /* __kernel_cmpxchg64 */
649 arm_kernel_cmpxchg64_helper(env
);
655 /* Jump back to the caller. */
656 addr
= env
->regs
[14];
661 env
->regs
[15] = addr
;
666 /* Store exclusive handling for AArch32 */
667 static int do_strex(CPUARMState
*env
)
675 if (env
->exclusive_addr
!= env
->exclusive_test
) {
678 /* We know we're always AArch32 so the address is in uint32_t range
679 * unless it was the -1 exclusive-monitor-lost value (which won't
680 * match exclusive_test above).
682 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
683 addr
= env
->exclusive_addr
;
684 size
= env
->exclusive_info
& 0xf;
687 segv
= get_user_u8(val
, addr
);
690 segv
= get_user_data_u16(val
, addr
, env
);
694 segv
= get_user_data_u32(val
, addr
, env
);
700 env
->exception
.vaddress
= addr
;
705 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
707 env
->exception
.vaddress
= addr
+ 4;
710 if (arm_cpu_bswap_data(env
)) {
711 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
713 val
= deposit64(val
, 32, 32, valhi
);
716 if (val
!= env
->exclusive_val
) {
720 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
723 segv
= put_user_u8(val
, addr
);
726 segv
= put_user_data_u16(val
, addr
, env
);
730 segv
= put_user_data_u32(val
, addr
, env
);
734 env
->exception
.vaddress
= addr
;
738 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
739 segv
= put_user_data_u32(val
, addr
+ 4, env
);
741 env
->exception
.vaddress
= addr
+ 4;
748 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
754 void cpu_loop(CPUARMState
*env
)
756 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
758 unsigned int n
, insn
;
759 target_siginfo_t info
;
764 trapnr
= cpu_arm_exec(cs
);
769 TaskState
*ts
= cs
->opaque
;
773 /* we handle the FPU emulation here, as Linux */
774 /* we get the opcode */
775 /* FIXME - what to do if get_user() fails? */
776 get_user_code_u32(opcode
, env
->regs
[15], env
);
778 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
779 if (rc
== 0) { /* illegal instruction */
780 info
.si_signo
= TARGET_SIGILL
;
782 info
.si_code
= TARGET_ILL_ILLOPN
;
783 info
._sifields
._sigfault
._addr
= env
->regs
[15];
784 queue_signal(env
, info
.si_signo
, &info
);
785 } else if (rc
< 0) { /* FP exception */
788 /* translate softfloat flags to FPSR flags */
789 if (-rc
& float_flag_invalid
)
791 if (-rc
& float_flag_divbyzero
)
793 if (-rc
& float_flag_overflow
)
795 if (-rc
& float_flag_underflow
)
797 if (-rc
& float_flag_inexact
)
800 FPSR fpsr
= ts
->fpa
.fpsr
;
801 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
803 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
804 info
.si_signo
= TARGET_SIGFPE
;
807 /* ordered by priority, least first */
808 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
809 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
810 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
811 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
812 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
814 info
._sifields
._sigfault
._addr
= env
->regs
[15];
815 queue_signal(env
, info
.si_signo
, &info
);
820 /* accumulate unenabled exceptions */
821 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
823 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
825 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
827 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
829 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
832 } else { /* everything OK */
843 if (trapnr
== EXCP_BKPT
) {
845 /* FIXME - what to do if get_user() fails? */
846 get_user_code_u16(insn
, env
->regs
[15], env
);
850 /* FIXME - what to do if get_user() fails? */
851 get_user_code_u32(insn
, env
->regs
[15], env
);
852 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
857 /* FIXME - what to do if get_user() fails? */
858 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
861 /* FIXME - what to do if get_user() fails? */
862 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
867 if (n
== ARM_NR_cacheflush
) {
869 } else if (n
== ARM_NR_semihosting
870 || n
== ARM_NR_thumb_semihosting
) {
871 env
->regs
[0] = do_arm_semihosting (env
);
872 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
874 if (env
->thumb
|| n
== 0) {
877 n
-= ARM_SYSCALL_BASE
;
880 if ( n
> ARM_NR_BASE
) {
882 case ARM_NR_cacheflush
:
886 cpu_set_tls(env
, env
->regs
[0]);
889 case ARM_NR_breakpoint
:
890 env
->regs
[15] -= env
->thumb
? 2 : 4;
893 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
895 env
->regs
[0] = -TARGET_ENOSYS
;
899 env
->regs
[0] = do_syscall(env
,
915 /* just indicate that signals should be handled asap */
918 if (!do_strex(env
)) {
921 /* fall through for segv */
922 case EXCP_PREFETCH_ABORT
:
923 case EXCP_DATA_ABORT
:
924 addr
= env
->exception
.vaddress
;
926 info
.si_signo
= TARGET_SIGSEGV
;
928 /* XXX: check env->error_code */
929 info
.si_code
= TARGET_SEGV_MAPERR
;
930 info
._sifields
._sigfault
._addr
= addr
;
931 queue_signal(env
, info
.si_signo
, &info
);
939 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
944 info
.si_code
= TARGET_TRAP_BRKPT
;
945 queue_signal(env
, info
.si_signo
, &info
);
949 case EXCP_KERNEL_TRAP
:
950 if (do_kernel_trap(env
))
955 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
958 process_pending_signals(env
);
965 * Handle AArch64 store-release exclusive
967 * rs = gets the status result of store exclusive
968 * rt = is the register that is stored
969 * rt2 = is the second register store (in STP)
972 static int do_strex_a64(CPUARMState
*env
)
983 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
984 size
= extract32(env
->exclusive_info
, 0, 2);
985 is_pair
= extract32(env
->exclusive_info
, 2, 1);
986 rs
= extract32(env
->exclusive_info
, 4, 5);
987 rt
= extract32(env
->exclusive_info
, 9, 5);
988 rt2
= extract32(env
->exclusive_info
, 14, 5);
990 addr
= env
->exclusive_addr
;
992 if (addr
!= env
->exclusive_test
) {
998 segv
= get_user_u8(val
, addr
);
1001 segv
= get_user_u16(val
, addr
);
1004 segv
= get_user_u32(val
, addr
);
1007 segv
= get_user_u64(val
, addr
);
1013 env
->exception
.vaddress
= addr
;
1016 if (val
!= env
->exclusive_val
) {
1021 segv
= get_user_u32(val
, addr
+ 4);
1023 segv
= get_user_u64(val
, addr
+ 8);
1026 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1029 if (val
!= env
->exclusive_high
) {
1033 /* handle the zero register */
1034 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
1037 segv
= put_user_u8(val
, addr
);
1040 segv
= put_user_u16(val
, addr
);
1043 segv
= put_user_u32(val
, addr
);
1046 segv
= put_user_u64(val
, addr
);
1053 /* handle the zero register */
1054 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1056 segv
= put_user_u32(val
, addr
+ 4);
1058 segv
= put_user_u64(val
, addr
+ 8);
1061 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1068 /* rs == 31 encodes a write to the ZR, thus throwing away
1069 * the status return. This is rather silly but valid.
1072 env
->xregs
[rs
] = rc
;
1075 /* instruction faulted, PC does not advance */
1076 /* either way a strex releases any exclusive lock we have */
1077 env
->exclusive_addr
= -1;
1082 /* AArch64 main loop */
1083 void cpu_loop(CPUARMState
*env
)
1085 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1087 target_siginfo_t info
;
1091 trapnr
= cpu_arm_exec(cs
);
1096 env
->xregs
[0] = do_syscall(env
,
1106 case EXCP_INTERRUPT
:
1107 /* just indicate that signals should be handled asap */
1110 info
.si_signo
= TARGET_SIGILL
;
1112 info
.si_code
= TARGET_ILL_ILLOPN
;
1113 info
._sifields
._sigfault
._addr
= env
->pc
;
1114 queue_signal(env
, info
.si_signo
, &info
);
1117 if (!do_strex_a64(env
)) {
1120 /* fall through for segv */
1121 case EXCP_PREFETCH_ABORT
:
1122 case EXCP_DATA_ABORT
:
1123 info
.si_signo
= TARGET_SIGSEGV
;
1125 /* XXX: check env->error_code */
1126 info
.si_code
= TARGET_SEGV_MAPERR
;
1127 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1128 queue_signal(env
, info
.si_signo
, &info
);
1132 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1134 info
.si_signo
= sig
;
1136 info
.si_code
= TARGET_TRAP_BRKPT
;
1137 queue_signal(env
, info
.si_signo
, &info
);
1141 env
->xregs
[0] = do_arm_semihosting(env
);
1144 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1147 process_pending_signals(env
);
1148 /* Exception return on AArch64 always clears the exclusive monitor,
1149 * so any return to running guest code implies this.
1150 * A strex (successful or otherwise) also clears the monitor, so
1151 * we don't need to specialcase EXCP_STREX.
1153 env
->exclusive_addr
= -1;
1156 #endif /* ndef TARGET_ABI32 */
1160 #ifdef TARGET_UNICORE32
1162 void cpu_loop(CPUUniCore32State
*env
)
1164 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1166 unsigned int n
, insn
;
1167 target_siginfo_t info
;
1171 trapnr
= uc32_cpu_exec(cs
);
1174 case UC32_EXCP_PRIV
:
1177 get_user_u32(insn
, env
->regs
[31] - 4);
1178 n
= insn
& 0xffffff;
1180 if (n
>= UC32_SYSCALL_BASE
) {
1182 n
-= UC32_SYSCALL_BASE
;
1183 if (n
== UC32_SYSCALL_NR_set_tls
) {
1184 cpu_set_tls(env
, env
->regs
[0]);
1187 env
->regs
[0] = do_syscall(env
,
1202 case UC32_EXCP_DTRAP
:
1203 case UC32_EXCP_ITRAP
:
1204 info
.si_signo
= TARGET_SIGSEGV
;
1206 /* XXX: check env->error_code */
1207 info
.si_code
= TARGET_SEGV_MAPERR
;
1208 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1209 queue_signal(env
, info
.si_signo
, &info
);
1211 case EXCP_INTERRUPT
:
1212 /* just indicate that signals should be handled asap */
1218 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1220 info
.si_signo
= sig
;
1222 info
.si_code
= TARGET_TRAP_BRKPT
;
1223 queue_signal(env
, info
.si_signo
, &info
);
1230 process_pending_signals(env
);
1234 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1240 #define SPARC64_STACK_BIAS 2047
1244 /* WARNING: dealing with register windows _is_ complicated. More info
1245 can be found at http://www.sics.se/~psm/sparcstack.html */
1246 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1248 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1249 /* wrap handling : if cwp is on the last window, then we use the
1250 registers 'after' the end */
1251 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1252 index
+= 16 * env
->nwindows
;
1256 /* save the register window 'cwp1' */
1257 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1262 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1263 #ifdef TARGET_SPARC64
1265 sp_ptr
+= SPARC64_STACK_BIAS
;
1267 #if defined(DEBUG_WIN)
1268 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1271 for(i
= 0; i
< 16; i
++) {
1272 /* FIXME - what to do if put_user() fails? */
1273 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1274 sp_ptr
+= sizeof(abi_ulong
);
1278 static void save_window(CPUSPARCState
*env
)
1280 #ifndef TARGET_SPARC64
1281 unsigned int new_wim
;
1282 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1283 ((1LL << env
->nwindows
) - 1);
1284 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1287 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1293 static void restore_window(CPUSPARCState
*env
)
1295 #ifndef TARGET_SPARC64
1296 unsigned int new_wim
;
1298 unsigned int i
, cwp1
;
1301 #ifndef TARGET_SPARC64
1302 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1303 ((1LL << env
->nwindows
) - 1);
1306 /* restore the invalid window */
1307 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1308 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1309 #ifdef TARGET_SPARC64
1311 sp_ptr
+= SPARC64_STACK_BIAS
;
1313 #if defined(DEBUG_WIN)
1314 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1317 for(i
= 0; i
< 16; i
++) {
1318 /* FIXME - what to do if get_user() fails? */
1319 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1320 sp_ptr
+= sizeof(abi_ulong
);
1322 #ifdef TARGET_SPARC64
1324 if (env
->cleanwin
< env
->nwindows
- 1)
1332 static void flush_windows(CPUSPARCState
*env
)
1338 /* if restore would invoke restore_window(), then we can stop */
1339 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1340 #ifndef TARGET_SPARC64
1341 if (env
->wim
& (1 << cwp1
))
1344 if (env
->canrestore
== 0)
1349 save_window_offset(env
, cwp1
);
1352 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1353 #ifndef TARGET_SPARC64
1354 /* set wim so that restore will reload the registers */
1355 env
->wim
= 1 << cwp1
;
1357 #if defined(DEBUG_WIN)
1358 printf("flush_windows: nb=%d\n", offset
- 1);
1362 void cpu_loop (CPUSPARCState
*env
)
1364 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1367 target_siginfo_t info
;
1371 trapnr
= cpu_sparc_exec(cs
);
1374 /* Compute PSR before exposing state. */
1375 if (env
->cc_op
!= CC_OP_FLAGS
) {
1380 #ifndef TARGET_SPARC64
1387 ret
= do_syscall (env
, env
->gregs
[1],
1388 env
->regwptr
[0], env
->regwptr
[1],
1389 env
->regwptr
[2], env
->regwptr
[3],
1390 env
->regwptr
[4], env
->regwptr
[5],
1392 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1393 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1394 env
->xcc
|= PSR_CARRY
;
1396 env
->psr
|= PSR_CARRY
;
1400 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1401 env
->xcc
&= ~PSR_CARRY
;
1403 env
->psr
&= ~PSR_CARRY
;
1406 env
->regwptr
[0] = ret
;
1407 /* next instruction */
1409 env
->npc
= env
->npc
+ 4;
1411 case 0x83: /* flush windows */
1416 /* next instruction */
1418 env
->npc
= env
->npc
+ 4;
1420 #ifndef TARGET_SPARC64
1421 case TT_WIN_OVF
: /* window overflow */
1424 case TT_WIN_UNF
: /* window underflow */
1425 restore_window(env
);
1430 info
.si_signo
= TARGET_SIGSEGV
;
1432 /* XXX: check env->error_code */
1433 info
.si_code
= TARGET_SEGV_MAPERR
;
1434 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1435 queue_signal(env
, info
.si_signo
, &info
);
1439 case TT_SPILL
: /* window overflow */
1442 case TT_FILL
: /* window underflow */
1443 restore_window(env
);
1448 info
.si_signo
= TARGET_SIGSEGV
;
1450 /* XXX: check env->error_code */
1451 info
.si_code
= TARGET_SEGV_MAPERR
;
1452 if (trapnr
== TT_DFAULT
)
1453 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1455 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1456 queue_signal(env
, info
.si_signo
, &info
);
1459 #ifndef TARGET_ABI32
1462 sparc64_get_context(env
);
1466 sparc64_set_context(env
);
1470 case EXCP_INTERRUPT
:
1471 /* just indicate that signals should be handled asap */
1475 info
.si_signo
= TARGET_SIGILL
;
1477 info
.si_code
= TARGET_ILL_ILLOPC
;
1478 info
._sifields
._sigfault
._addr
= env
->pc
;
1479 queue_signal(env
, info
.si_signo
, &info
);
1486 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1489 info
.si_signo
= sig
;
1491 info
.si_code
= TARGET_TRAP_BRKPT
;
1492 queue_signal(env
, info
.si_signo
, &info
);
1497 printf ("Unhandled trap: 0x%x\n", trapnr
);
1498 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1501 process_pending_signals (env
);
1508 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1510 return cpu_get_host_ticks();
1513 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1515 return cpu_ppc_get_tb(env
);
1518 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1520 return cpu_ppc_get_tb(env
) >> 32;
1523 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1525 return cpu_ppc_get_tb(env
);
1528 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1530 return cpu_ppc_get_tb(env
) >> 32;
1533 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1534 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1536 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1538 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1541 /* XXX: to be fixed */
1542 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1547 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1552 static int do_store_exclusive(CPUPPCState
*env
)
1555 target_ulong page_addr
;
1556 target_ulong val
, val2
__attribute__((unused
)) = 0;
1560 addr
= env
->reserve_ea
;
1561 page_addr
= addr
& TARGET_PAGE_MASK
;
1564 flags
= page_get_flags(page_addr
);
1565 if ((flags
& PAGE_READ
) == 0) {
1568 int reg
= env
->reserve_info
& 0x1f;
1569 int size
= env
->reserve_info
>> 5;
1572 if (addr
== env
->reserve_addr
) {
1574 case 1: segv
= get_user_u8(val
, addr
); break;
1575 case 2: segv
= get_user_u16(val
, addr
); break;
1576 case 4: segv
= get_user_u32(val
, addr
); break;
1577 #if defined(TARGET_PPC64)
1578 case 8: segv
= get_user_u64(val
, addr
); break;
1580 segv
= get_user_u64(val
, addr
);
1582 segv
= get_user_u64(val2
, addr
+ 8);
1589 if (!segv
&& val
== env
->reserve_val
) {
1590 val
= env
->gpr
[reg
];
1592 case 1: segv
= put_user_u8(val
, addr
); break;
1593 case 2: segv
= put_user_u16(val
, addr
); break;
1594 case 4: segv
= put_user_u32(val
, addr
); break;
1595 #if defined(TARGET_PPC64)
1596 case 8: segv
= put_user_u64(val
, addr
); break;
1598 if (val2
== env
->reserve_val2
) {
1601 val
= env
->gpr
[reg
+1];
1603 val2
= env
->gpr
[reg
+1];
1605 segv
= put_user_u64(val
, addr
);
1607 segv
= put_user_u64(val2
, addr
+ 8);
1620 env
->crf
[0] = (stored
<< 1) | xer_so
;
1621 env
->reserve_addr
= (target_ulong
)-1;
1631 void cpu_loop(CPUPPCState
*env
)
1633 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1634 target_siginfo_t info
;
1640 trapnr
= cpu_ppc_exec(cs
);
1643 case POWERPC_EXCP_NONE
:
1646 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1647 cpu_abort(cs
, "Critical interrupt while in user mode. "
1650 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1651 cpu_abort(cs
, "Machine check exception while in user mode. "
1654 case POWERPC_EXCP_DSI
: /* Data storage exception */
1655 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1657 /* XXX: check this. Seems bugged */
1658 switch (env
->error_code
& 0xFF000000) {
1660 info
.si_signo
= TARGET_SIGSEGV
;
1662 info
.si_code
= TARGET_SEGV_MAPERR
;
1665 info
.si_signo
= TARGET_SIGILL
;
1667 info
.si_code
= TARGET_ILL_ILLADR
;
1670 info
.si_signo
= TARGET_SIGSEGV
;
1672 info
.si_code
= TARGET_SEGV_ACCERR
;
1675 /* Let's send a regular segfault... */
1676 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1678 info
.si_signo
= TARGET_SIGSEGV
;
1680 info
.si_code
= TARGET_SEGV_MAPERR
;
1683 info
._sifields
._sigfault
._addr
= env
->nip
;
1684 queue_signal(env
, info
.si_signo
, &info
);
1686 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1687 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1688 "\n", env
->spr
[SPR_SRR0
]);
1689 /* XXX: check this */
1690 switch (env
->error_code
& 0xFF000000) {
1692 info
.si_signo
= TARGET_SIGSEGV
;
1694 info
.si_code
= TARGET_SEGV_MAPERR
;
1698 info
.si_signo
= TARGET_SIGSEGV
;
1700 info
.si_code
= TARGET_SEGV_ACCERR
;
1703 /* Let's send a regular segfault... */
1704 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1706 info
.si_signo
= TARGET_SIGSEGV
;
1708 info
.si_code
= TARGET_SEGV_MAPERR
;
1711 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1712 queue_signal(env
, info
.si_signo
, &info
);
1714 case POWERPC_EXCP_EXTERNAL
: /* External input */
1715 cpu_abort(cs
, "External interrupt while in user mode. "
1718 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1719 EXCP_DUMP(env
, "Unaligned memory access\n");
1720 /* XXX: check this */
1721 info
.si_signo
= TARGET_SIGBUS
;
1723 info
.si_code
= TARGET_BUS_ADRALN
;
1724 info
._sifields
._sigfault
._addr
= env
->nip
;
1725 queue_signal(env
, info
.si_signo
, &info
);
1727 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1728 /* XXX: check this */
1729 switch (env
->error_code
& ~0xF) {
1730 case POWERPC_EXCP_FP
:
1731 EXCP_DUMP(env
, "Floating point program exception\n");
1732 info
.si_signo
= TARGET_SIGFPE
;
1734 switch (env
->error_code
& 0xF) {
1735 case POWERPC_EXCP_FP_OX
:
1736 info
.si_code
= TARGET_FPE_FLTOVF
;
1738 case POWERPC_EXCP_FP_UX
:
1739 info
.si_code
= TARGET_FPE_FLTUND
;
1741 case POWERPC_EXCP_FP_ZX
:
1742 case POWERPC_EXCP_FP_VXZDZ
:
1743 info
.si_code
= TARGET_FPE_FLTDIV
;
1745 case POWERPC_EXCP_FP_XX
:
1746 info
.si_code
= TARGET_FPE_FLTRES
;
1748 case POWERPC_EXCP_FP_VXSOFT
:
1749 info
.si_code
= TARGET_FPE_FLTINV
;
1751 case POWERPC_EXCP_FP_VXSNAN
:
1752 case POWERPC_EXCP_FP_VXISI
:
1753 case POWERPC_EXCP_FP_VXIDI
:
1754 case POWERPC_EXCP_FP_VXIMZ
:
1755 case POWERPC_EXCP_FP_VXVC
:
1756 case POWERPC_EXCP_FP_VXSQRT
:
1757 case POWERPC_EXCP_FP_VXCVI
:
1758 info
.si_code
= TARGET_FPE_FLTSUB
;
1761 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1766 case POWERPC_EXCP_INVAL
:
1767 EXCP_DUMP(env
, "Invalid instruction\n");
1768 info
.si_signo
= TARGET_SIGILL
;
1770 switch (env
->error_code
& 0xF) {
1771 case POWERPC_EXCP_INVAL_INVAL
:
1772 info
.si_code
= TARGET_ILL_ILLOPC
;
1774 case POWERPC_EXCP_INVAL_LSWX
:
1775 info
.si_code
= TARGET_ILL_ILLOPN
;
1777 case POWERPC_EXCP_INVAL_SPR
:
1778 info
.si_code
= TARGET_ILL_PRVREG
;
1780 case POWERPC_EXCP_INVAL_FP
:
1781 info
.si_code
= TARGET_ILL_COPROC
;
1784 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1785 env
->error_code
& 0xF);
1786 info
.si_code
= TARGET_ILL_ILLADR
;
1790 case POWERPC_EXCP_PRIV
:
1791 EXCP_DUMP(env
, "Privilege violation\n");
1792 info
.si_signo
= TARGET_SIGILL
;
1794 switch (env
->error_code
& 0xF) {
1795 case POWERPC_EXCP_PRIV_OPC
:
1796 info
.si_code
= TARGET_ILL_PRVOPC
;
1798 case POWERPC_EXCP_PRIV_REG
:
1799 info
.si_code
= TARGET_ILL_PRVREG
;
1802 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1803 env
->error_code
& 0xF);
1804 info
.si_code
= TARGET_ILL_PRVOPC
;
1808 case POWERPC_EXCP_TRAP
:
1809 cpu_abort(cs
, "Tried to call a TRAP\n");
1812 /* Should not happen ! */
1813 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1817 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1818 queue_signal(env
, info
.si_signo
, &info
);
1820 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1821 EXCP_DUMP(env
, "No floating point allowed\n");
1822 info
.si_signo
= TARGET_SIGILL
;
1824 info
.si_code
= TARGET_ILL_COPROC
;
1825 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1826 queue_signal(env
, info
.si_signo
, &info
);
1828 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1829 cpu_abort(cs
, "Syscall exception while in user mode. "
1832 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1833 EXCP_DUMP(env
, "No APU instruction allowed\n");
1834 info
.si_signo
= TARGET_SIGILL
;
1836 info
.si_code
= TARGET_ILL_COPROC
;
1837 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1838 queue_signal(env
, info
.si_signo
, &info
);
1840 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1841 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1844 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1845 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1848 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1849 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1852 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1853 cpu_abort(cs
, "Data TLB exception while in user mode. "
1856 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1857 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1860 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1861 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1862 info
.si_signo
= TARGET_SIGILL
;
1864 info
.si_code
= TARGET_ILL_COPROC
;
1865 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1866 queue_signal(env
, info
.si_signo
, &info
);
1868 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1869 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1871 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1872 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1874 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1875 cpu_abort(cs
, "Performance monitor exception not handled\n");
1877 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1878 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1881 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1882 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1885 case POWERPC_EXCP_RESET
: /* System reset exception */
1886 cpu_abort(cs
, "Reset interrupt while in user mode. "
1889 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1890 cpu_abort(cs
, "Data segment exception while in user mode. "
1893 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1894 cpu_abort(cs
, "Instruction segment exception "
1895 "while in user mode. Aborting\n");
1897 /* PowerPC 64 with hypervisor mode support */
1898 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1899 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1900 "while in user mode. Aborting\n");
1902 case POWERPC_EXCP_TRACE
: /* Trace exception */
1904 * we use this exception to emulate step-by-step execution mode.
1907 /* PowerPC 64 with hypervisor mode support */
1908 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1909 cpu_abort(cs
, "Hypervisor data storage exception "
1910 "while in user mode. Aborting\n");
1912 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1913 cpu_abort(cs
, "Hypervisor instruction storage exception "
1914 "while in user mode. Aborting\n");
1916 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1917 cpu_abort(cs
, "Hypervisor data segment exception "
1918 "while in user mode. Aborting\n");
1920 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1921 cpu_abort(cs
, "Hypervisor instruction segment exception "
1922 "while in user mode. Aborting\n");
1924 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1925 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1926 info
.si_signo
= TARGET_SIGILL
;
1928 info
.si_code
= TARGET_ILL_COPROC
;
1929 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1930 queue_signal(env
, info
.si_signo
, &info
);
1932 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1933 cpu_abort(cs
, "Programmable interval timer interrupt "
1934 "while in user mode. Aborting\n");
1936 case POWERPC_EXCP_IO
: /* IO error exception */
1937 cpu_abort(cs
, "IO error exception while in user mode. "
1940 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1941 cpu_abort(cs
, "Run mode exception while in user mode. "
1944 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1945 cpu_abort(cs
, "Emulation trap exception not handled\n");
1947 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1948 cpu_abort(cs
, "Instruction fetch TLB exception "
1949 "while in user-mode. Aborting");
1951 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1952 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1955 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1956 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1959 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1960 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1962 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1963 cpu_abort(cs
, "Instruction address breakpoint exception "
1966 case POWERPC_EXCP_SMI
: /* System management interrupt */
1967 cpu_abort(cs
, "System management interrupt while in user mode. "
1970 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1971 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1974 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1975 cpu_abort(cs
, "Performance monitor exception not handled\n");
1977 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1978 cpu_abort(cs
, "Vector assist exception not handled\n");
1980 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1981 cpu_abort(cs
, "Soft patch exception not handled\n");
1983 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1984 cpu_abort(cs
, "Maintenance exception while in user mode. "
1987 case POWERPC_EXCP_STOP
: /* stop translation */
1988 /* We did invalidate the instruction cache. Go on */
1990 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1991 /* We just stopped because of a branch. Go on */
1993 case POWERPC_EXCP_SYSCALL_USER
:
1994 /* system call in user-mode emulation */
1996 * PPC ABI uses overflow flag in cr0 to signal an error
1999 env
->crf
[0] &= ~0x1;
2000 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
2001 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
2003 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
2004 /* Returning from a successful sigreturn syscall.
2005 Avoid corrupting register state. */
2008 if (ret
> (target_ulong
)(-515)) {
2014 case POWERPC_EXCP_STCX
:
2015 if (do_store_exclusive(env
)) {
2016 info
.si_signo
= TARGET_SIGSEGV
;
2018 info
.si_code
= TARGET_SEGV_MAPERR
;
2019 info
._sifields
._sigfault
._addr
= env
->nip
;
2020 queue_signal(env
, info
.si_signo
, &info
);
2027 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2029 info
.si_signo
= sig
;
2031 info
.si_code
= TARGET_TRAP_BRKPT
;
2032 queue_signal(env
, info
.si_signo
, &info
);
2036 case EXCP_INTERRUPT
:
2037 /* just indicate that signals should be handled asap */
2040 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2043 process_pending_signals(env
);
2050 # ifdef TARGET_ABI_MIPSO32
2051 # define MIPS_SYS(name, args) args,
2052 static const uint8_t mips_syscall_args
[] = {
2053 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2054 MIPS_SYS(sys_exit
, 1)
2055 MIPS_SYS(sys_fork
, 0)
2056 MIPS_SYS(sys_read
, 3)
2057 MIPS_SYS(sys_write
, 3)
2058 MIPS_SYS(sys_open
, 3) /* 4005 */
2059 MIPS_SYS(sys_close
, 1)
2060 MIPS_SYS(sys_waitpid
, 3)
2061 MIPS_SYS(sys_creat
, 2)
2062 MIPS_SYS(sys_link
, 2)
2063 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2064 MIPS_SYS(sys_execve
, 0)
2065 MIPS_SYS(sys_chdir
, 1)
2066 MIPS_SYS(sys_time
, 1)
2067 MIPS_SYS(sys_mknod
, 3)
2068 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2069 MIPS_SYS(sys_lchown
, 3)
2070 MIPS_SYS(sys_ni_syscall
, 0)
2071 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2072 MIPS_SYS(sys_lseek
, 3)
2073 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2074 MIPS_SYS(sys_mount
, 5)
2075 MIPS_SYS(sys_umount
, 1)
2076 MIPS_SYS(sys_setuid
, 1)
2077 MIPS_SYS(sys_getuid
, 0)
2078 MIPS_SYS(sys_stime
, 1) /* 4025 */
2079 MIPS_SYS(sys_ptrace
, 4)
2080 MIPS_SYS(sys_alarm
, 1)
2081 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2082 MIPS_SYS(sys_pause
, 0)
2083 MIPS_SYS(sys_utime
, 2) /* 4030 */
2084 MIPS_SYS(sys_ni_syscall
, 0)
2085 MIPS_SYS(sys_ni_syscall
, 0)
2086 MIPS_SYS(sys_access
, 2)
2087 MIPS_SYS(sys_nice
, 1)
2088 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2089 MIPS_SYS(sys_sync
, 0)
2090 MIPS_SYS(sys_kill
, 2)
2091 MIPS_SYS(sys_rename
, 2)
2092 MIPS_SYS(sys_mkdir
, 2)
2093 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2094 MIPS_SYS(sys_dup
, 1)
2095 MIPS_SYS(sys_pipe
, 0)
2096 MIPS_SYS(sys_times
, 1)
2097 MIPS_SYS(sys_ni_syscall
, 0)
2098 MIPS_SYS(sys_brk
, 1) /* 4045 */
2099 MIPS_SYS(sys_setgid
, 1)
2100 MIPS_SYS(sys_getgid
, 0)
2101 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2102 MIPS_SYS(sys_geteuid
, 0)
2103 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2104 MIPS_SYS(sys_acct
, 0)
2105 MIPS_SYS(sys_umount2
, 2)
2106 MIPS_SYS(sys_ni_syscall
, 0)
2107 MIPS_SYS(sys_ioctl
, 3)
2108 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2109 MIPS_SYS(sys_ni_syscall
, 2)
2110 MIPS_SYS(sys_setpgid
, 2)
2111 MIPS_SYS(sys_ni_syscall
, 0)
2112 MIPS_SYS(sys_olduname
, 1)
2113 MIPS_SYS(sys_umask
, 1) /* 4060 */
2114 MIPS_SYS(sys_chroot
, 1)
2115 MIPS_SYS(sys_ustat
, 2)
2116 MIPS_SYS(sys_dup2
, 2)
2117 MIPS_SYS(sys_getppid
, 0)
2118 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2119 MIPS_SYS(sys_setsid
, 0)
2120 MIPS_SYS(sys_sigaction
, 3)
2121 MIPS_SYS(sys_sgetmask
, 0)
2122 MIPS_SYS(sys_ssetmask
, 1)
2123 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2124 MIPS_SYS(sys_setregid
, 2)
2125 MIPS_SYS(sys_sigsuspend
, 0)
2126 MIPS_SYS(sys_sigpending
, 1)
2127 MIPS_SYS(sys_sethostname
, 2)
2128 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2129 MIPS_SYS(sys_getrlimit
, 2)
2130 MIPS_SYS(sys_getrusage
, 2)
2131 MIPS_SYS(sys_gettimeofday
, 2)
2132 MIPS_SYS(sys_settimeofday
, 2)
2133 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2134 MIPS_SYS(sys_setgroups
, 2)
2135 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2136 MIPS_SYS(sys_symlink
, 2)
2137 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2138 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2139 MIPS_SYS(sys_uselib
, 1)
2140 MIPS_SYS(sys_swapon
, 2)
2141 MIPS_SYS(sys_reboot
, 3)
2142 MIPS_SYS(old_readdir
, 3)
2143 MIPS_SYS(old_mmap
, 6) /* 4090 */
2144 MIPS_SYS(sys_munmap
, 2)
2145 MIPS_SYS(sys_truncate
, 2)
2146 MIPS_SYS(sys_ftruncate
, 2)
2147 MIPS_SYS(sys_fchmod
, 2)
2148 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2149 MIPS_SYS(sys_getpriority
, 2)
2150 MIPS_SYS(sys_setpriority
, 3)
2151 MIPS_SYS(sys_ni_syscall
, 0)
2152 MIPS_SYS(sys_statfs
, 2)
2153 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2154 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2155 MIPS_SYS(sys_socketcall
, 2)
2156 MIPS_SYS(sys_syslog
, 3)
2157 MIPS_SYS(sys_setitimer
, 3)
2158 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2159 MIPS_SYS(sys_newstat
, 2)
2160 MIPS_SYS(sys_newlstat
, 2)
2161 MIPS_SYS(sys_newfstat
, 2)
2162 MIPS_SYS(sys_uname
, 1)
2163 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2164 MIPS_SYS(sys_vhangup
, 0)
2165 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2166 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2167 MIPS_SYS(sys_wait4
, 4)
2168 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2169 MIPS_SYS(sys_sysinfo
, 1)
2170 MIPS_SYS(sys_ipc
, 6)
2171 MIPS_SYS(sys_fsync
, 1)
2172 MIPS_SYS(sys_sigreturn
, 0)
2173 MIPS_SYS(sys_clone
, 6) /* 4120 */
2174 MIPS_SYS(sys_setdomainname
, 2)
2175 MIPS_SYS(sys_newuname
, 1)
2176 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2177 MIPS_SYS(sys_adjtimex
, 1)
2178 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2179 MIPS_SYS(sys_sigprocmask
, 3)
2180 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2181 MIPS_SYS(sys_init_module
, 5)
2182 MIPS_SYS(sys_delete_module
, 1)
2183 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2184 MIPS_SYS(sys_quotactl
, 0)
2185 MIPS_SYS(sys_getpgid
, 1)
2186 MIPS_SYS(sys_fchdir
, 1)
2187 MIPS_SYS(sys_bdflush
, 2)
2188 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2189 MIPS_SYS(sys_personality
, 1)
2190 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2191 MIPS_SYS(sys_setfsuid
, 1)
2192 MIPS_SYS(sys_setfsgid
, 1)
2193 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2194 MIPS_SYS(sys_getdents
, 3)
2195 MIPS_SYS(sys_select
, 5)
2196 MIPS_SYS(sys_flock
, 2)
2197 MIPS_SYS(sys_msync
, 3)
2198 MIPS_SYS(sys_readv
, 3) /* 4145 */
2199 MIPS_SYS(sys_writev
, 3)
2200 MIPS_SYS(sys_cacheflush
, 3)
2201 MIPS_SYS(sys_cachectl
, 3)
2202 MIPS_SYS(sys_sysmips
, 4)
2203 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2204 MIPS_SYS(sys_getsid
, 1)
2205 MIPS_SYS(sys_fdatasync
, 0)
2206 MIPS_SYS(sys_sysctl
, 1)
2207 MIPS_SYS(sys_mlock
, 2)
2208 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2209 MIPS_SYS(sys_mlockall
, 1)
2210 MIPS_SYS(sys_munlockall
, 0)
2211 MIPS_SYS(sys_sched_setparam
, 2)
2212 MIPS_SYS(sys_sched_getparam
, 2)
2213 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2214 MIPS_SYS(sys_sched_getscheduler
, 1)
2215 MIPS_SYS(sys_sched_yield
, 0)
2216 MIPS_SYS(sys_sched_get_priority_max
, 1)
2217 MIPS_SYS(sys_sched_get_priority_min
, 1)
2218 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2219 MIPS_SYS(sys_nanosleep
, 2)
2220 MIPS_SYS(sys_mremap
, 5)
2221 MIPS_SYS(sys_accept
, 3)
2222 MIPS_SYS(sys_bind
, 3)
2223 MIPS_SYS(sys_connect
, 3) /* 4170 */
2224 MIPS_SYS(sys_getpeername
, 3)
2225 MIPS_SYS(sys_getsockname
, 3)
2226 MIPS_SYS(sys_getsockopt
, 5)
2227 MIPS_SYS(sys_listen
, 2)
2228 MIPS_SYS(sys_recv
, 4) /* 4175 */
2229 MIPS_SYS(sys_recvfrom
, 6)
2230 MIPS_SYS(sys_recvmsg
, 3)
2231 MIPS_SYS(sys_send
, 4)
2232 MIPS_SYS(sys_sendmsg
, 3)
2233 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2234 MIPS_SYS(sys_setsockopt
, 5)
2235 MIPS_SYS(sys_shutdown
, 2)
2236 MIPS_SYS(sys_socket
, 3)
2237 MIPS_SYS(sys_socketpair
, 4)
2238 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2239 MIPS_SYS(sys_getresuid
, 3)
2240 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2241 MIPS_SYS(sys_poll
, 3)
2242 MIPS_SYS(sys_nfsservctl
, 3)
2243 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2244 MIPS_SYS(sys_getresgid
, 3)
2245 MIPS_SYS(sys_prctl
, 5)
2246 MIPS_SYS(sys_rt_sigreturn
, 0)
2247 MIPS_SYS(sys_rt_sigaction
, 4)
2248 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2249 MIPS_SYS(sys_rt_sigpending
, 2)
2250 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2251 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2252 MIPS_SYS(sys_rt_sigsuspend
, 0)
2253 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2254 MIPS_SYS(sys_pwrite64
, 6)
2255 MIPS_SYS(sys_chown
, 3)
2256 MIPS_SYS(sys_getcwd
, 2)
2257 MIPS_SYS(sys_capget
, 2)
2258 MIPS_SYS(sys_capset
, 2) /* 4205 */
2259 MIPS_SYS(sys_sigaltstack
, 2)
2260 MIPS_SYS(sys_sendfile
, 4)
2261 MIPS_SYS(sys_ni_syscall
, 0)
2262 MIPS_SYS(sys_ni_syscall
, 0)
2263 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2264 MIPS_SYS(sys_truncate64
, 4)
2265 MIPS_SYS(sys_ftruncate64
, 4)
2266 MIPS_SYS(sys_stat64
, 2)
2267 MIPS_SYS(sys_lstat64
, 2)
2268 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2269 MIPS_SYS(sys_pivot_root
, 2)
2270 MIPS_SYS(sys_mincore
, 3)
2271 MIPS_SYS(sys_madvise
, 3)
2272 MIPS_SYS(sys_getdents64
, 3)
2273 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2274 MIPS_SYS(sys_ni_syscall
, 0)
2275 MIPS_SYS(sys_gettid
, 0)
2276 MIPS_SYS(sys_readahead
, 5)
2277 MIPS_SYS(sys_setxattr
, 5)
2278 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2279 MIPS_SYS(sys_fsetxattr
, 5)
2280 MIPS_SYS(sys_getxattr
, 4)
2281 MIPS_SYS(sys_lgetxattr
, 4)
2282 MIPS_SYS(sys_fgetxattr
, 4)
2283 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2284 MIPS_SYS(sys_llistxattr
, 3)
2285 MIPS_SYS(sys_flistxattr
, 3)
2286 MIPS_SYS(sys_removexattr
, 2)
2287 MIPS_SYS(sys_lremovexattr
, 2)
2288 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2289 MIPS_SYS(sys_tkill
, 2)
2290 MIPS_SYS(sys_sendfile64
, 5)
2291 MIPS_SYS(sys_futex
, 6)
2292 MIPS_SYS(sys_sched_setaffinity
, 3)
2293 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2294 MIPS_SYS(sys_io_setup
, 2)
2295 MIPS_SYS(sys_io_destroy
, 1)
2296 MIPS_SYS(sys_io_getevents
, 5)
2297 MIPS_SYS(sys_io_submit
, 3)
2298 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2299 MIPS_SYS(sys_exit_group
, 1)
2300 MIPS_SYS(sys_lookup_dcookie
, 3)
2301 MIPS_SYS(sys_epoll_create
, 1)
2302 MIPS_SYS(sys_epoll_ctl
, 4)
2303 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2304 MIPS_SYS(sys_remap_file_pages
, 5)
2305 MIPS_SYS(sys_set_tid_address
, 1)
2306 MIPS_SYS(sys_restart_syscall
, 0)
2307 MIPS_SYS(sys_fadvise64_64
, 7)
2308 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2309 MIPS_SYS(sys_fstatfs64
, 2)
2310 MIPS_SYS(sys_timer_create
, 3)
2311 MIPS_SYS(sys_timer_settime
, 4)
2312 MIPS_SYS(sys_timer_gettime
, 2)
2313 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2314 MIPS_SYS(sys_timer_delete
, 1)
2315 MIPS_SYS(sys_clock_settime
, 2)
2316 MIPS_SYS(sys_clock_gettime
, 2)
2317 MIPS_SYS(sys_clock_getres
, 2)
2318 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2319 MIPS_SYS(sys_tgkill
, 3)
2320 MIPS_SYS(sys_utimes
, 2)
2321 MIPS_SYS(sys_mbind
, 4)
2322 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2323 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2324 MIPS_SYS(sys_mq_open
, 4)
2325 MIPS_SYS(sys_mq_unlink
, 1)
2326 MIPS_SYS(sys_mq_timedsend
, 5)
2327 MIPS_SYS(sys_mq_timedreceive
, 5)
2328 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2329 MIPS_SYS(sys_mq_getsetattr
, 3)
2330 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2331 MIPS_SYS(sys_waitid
, 4)
2332 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2333 MIPS_SYS(sys_add_key
, 5)
2334 MIPS_SYS(sys_request_key
, 4)
2335 MIPS_SYS(sys_keyctl
, 5)
2336 MIPS_SYS(sys_set_thread_area
, 1)
2337 MIPS_SYS(sys_inotify_init
, 0)
2338 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2339 MIPS_SYS(sys_inotify_rm_watch
, 2)
2340 MIPS_SYS(sys_migrate_pages
, 4)
2341 MIPS_SYS(sys_openat
, 4)
2342 MIPS_SYS(sys_mkdirat
, 3)
2343 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2344 MIPS_SYS(sys_fchownat
, 5)
2345 MIPS_SYS(sys_futimesat
, 3)
2346 MIPS_SYS(sys_fstatat64
, 4)
2347 MIPS_SYS(sys_unlinkat
, 3)
2348 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2349 MIPS_SYS(sys_linkat
, 5)
2350 MIPS_SYS(sys_symlinkat
, 3)
2351 MIPS_SYS(sys_readlinkat
, 4)
2352 MIPS_SYS(sys_fchmodat
, 3)
2353 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2354 MIPS_SYS(sys_pselect6
, 6)
2355 MIPS_SYS(sys_ppoll
, 5)
2356 MIPS_SYS(sys_unshare
, 1)
2357 MIPS_SYS(sys_splice
, 6)
2358 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2359 MIPS_SYS(sys_tee
, 4)
2360 MIPS_SYS(sys_vmsplice
, 4)
2361 MIPS_SYS(sys_move_pages
, 6)
2362 MIPS_SYS(sys_set_robust_list
, 2)
2363 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2364 MIPS_SYS(sys_kexec_load
, 4)
2365 MIPS_SYS(sys_getcpu
, 3)
2366 MIPS_SYS(sys_epoll_pwait
, 6)
2367 MIPS_SYS(sys_ioprio_set
, 3)
2368 MIPS_SYS(sys_ioprio_get
, 2)
2369 MIPS_SYS(sys_utimensat
, 4)
2370 MIPS_SYS(sys_signalfd
, 3)
2371 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2372 MIPS_SYS(sys_eventfd
, 1)
2373 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2374 MIPS_SYS(sys_timerfd_create
, 2)
2375 MIPS_SYS(sys_timerfd_gettime
, 2)
2376 MIPS_SYS(sys_timerfd_settime
, 4)
2377 MIPS_SYS(sys_signalfd4
, 4)
2378 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2379 MIPS_SYS(sys_epoll_create1
, 1)
2380 MIPS_SYS(sys_dup3
, 3)
2381 MIPS_SYS(sys_pipe2
, 2)
2382 MIPS_SYS(sys_inotify_init1
, 1)
2383 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2384 MIPS_SYS(sys_pwritev
, 6)
2385 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2386 MIPS_SYS(sys_perf_event_open
, 5)
2387 MIPS_SYS(sys_accept4
, 4)
2388 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2389 MIPS_SYS(sys_fanotify_init
, 2)
2390 MIPS_SYS(sys_fanotify_mark
, 6)
2391 MIPS_SYS(sys_prlimit64
, 4)
2392 MIPS_SYS(sys_name_to_handle_at
, 5)
2393 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2394 MIPS_SYS(sys_clock_adjtime
, 2)
2395 MIPS_SYS(sys_syncfs
, 1)
2400 static int do_store_exclusive(CPUMIPSState
*env
)
2403 target_ulong page_addr
;
2411 page_addr
= addr
& TARGET_PAGE_MASK
;
2414 flags
= page_get_flags(page_addr
);
2415 if ((flags
& PAGE_READ
) == 0) {
2418 reg
= env
->llreg
& 0x1f;
2419 d
= (env
->llreg
& 0x20) != 0;
2421 segv
= get_user_s64(val
, addr
);
2423 segv
= get_user_s32(val
, addr
);
2426 if (val
!= env
->llval
) {
2427 env
->active_tc
.gpr
[reg
] = 0;
2430 segv
= put_user_u64(env
->llnewval
, addr
);
2432 segv
= put_user_u32(env
->llnewval
, addr
);
2435 env
->active_tc
.gpr
[reg
] = 1;
2442 env
->active_tc
.PC
+= 4;
2455 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2463 info
->si_signo
= TARGET_SIGFPE
;
2465 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2466 queue_signal(env
, info
->si_signo
, &*info
);
2470 info
->si_signo
= TARGET_SIGTRAP
;
2472 queue_signal(env
, info
->si_signo
, &*info
);
2480 void cpu_loop(CPUMIPSState
*env
)
2482 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2483 target_siginfo_t info
;
2486 # ifdef TARGET_ABI_MIPSO32
2487 unsigned int syscall_num
;
2492 trapnr
= cpu_mips_exec(cs
);
2496 env
->active_tc
.PC
+= 4;
2497 # ifdef TARGET_ABI_MIPSO32
2498 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2499 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2500 ret
= -TARGET_ENOSYS
;
2504 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2506 nb_args
= mips_syscall_args
[syscall_num
];
2507 sp_reg
= env
->active_tc
.gpr
[29];
2509 /* these arguments are taken from the stack */
2511 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2515 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2519 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2523 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2529 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2530 env
->active_tc
.gpr
[4],
2531 env
->active_tc
.gpr
[5],
2532 env
->active_tc
.gpr
[6],
2533 env
->active_tc
.gpr
[7],
2534 arg5
, arg6
, arg7
, arg8
);
2538 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2539 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2540 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2541 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2542 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2544 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2545 /* Returning from a successful sigreturn syscall.
2546 Avoid clobbering register state. */
2549 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2550 env
->active_tc
.gpr
[7] = 1; /* error flag */
2553 env
->active_tc
.gpr
[7] = 0; /* error flag */
2555 env
->active_tc
.gpr
[2] = ret
;
2561 info
.si_signo
= TARGET_SIGSEGV
;
2563 /* XXX: check env->error_code */
2564 info
.si_code
= TARGET_SEGV_MAPERR
;
2565 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2566 queue_signal(env
, info
.si_signo
, &info
);
2570 info
.si_signo
= TARGET_SIGILL
;
2573 queue_signal(env
, info
.si_signo
, &info
);
2575 case EXCP_INTERRUPT
:
2576 /* just indicate that signals should be handled asap */
2582 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2585 info
.si_signo
= sig
;
2587 info
.si_code
= TARGET_TRAP_BRKPT
;
2588 queue_signal(env
, info
.si_signo
, &info
);
2593 if (do_store_exclusive(env
)) {
2594 info
.si_signo
= TARGET_SIGSEGV
;
2596 info
.si_code
= TARGET_SEGV_MAPERR
;
2597 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2598 queue_signal(env
, info
.si_signo
, &info
);
2602 info
.si_signo
= TARGET_SIGILL
;
2604 info
.si_code
= TARGET_ILL_ILLOPC
;
2605 queue_signal(env
, info
.si_signo
, &info
);
2607 /* The code below was inspired by the MIPS Linux kernel trap
2608 * handling code in arch/mips/kernel/traps.c.
2612 abi_ulong trap_instr
;
2615 if (env
->hflags
& MIPS_HFLAG_M16
) {
2616 if (env
->insn_flags
& ASE_MICROMIPS
) {
2617 /* microMIPS mode */
2618 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2623 if ((trap_instr
>> 10) == 0x11) {
2624 /* 16-bit instruction */
2625 code
= trap_instr
& 0xf;
2627 /* 32-bit instruction */
2630 ret
= get_user_u16(instr_lo
,
2631 env
->active_tc
.PC
+ 2);
2635 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2636 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2637 /* Unfortunately, microMIPS also suffers from
2638 the old assembler bug... */
2639 if (code
>= (1 << 10)) {
2645 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2649 code
= (trap_instr
>> 6) & 0x3f;
2652 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2657 /* As described in the original Linux kernel code, the
2658 * below checks on 'code' are to work around an old
2661 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2662 if (code
>= (1 << 10)) {
2667 if (do_break(env
, &info
, code
) != 0) {
2674 abi_ulong trap_instr
;
2675 unsigned int code
= 0;
2677 if (env
->hflags
& MIPS_HFLAG_M16
) {
2678 /* microMIPS mode */
2681 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2682 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2684 trap_instr
= (instr
[0] << 16) | instr
[1];
2686 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2693 /* The immediate versions don't provide a code. */
2694 if (!(trap_instr
& 0xFC000000)) {
2695 if (env
->hflags
& MIPS_HFLAG_M16
) {
2696 /* microMIPS mode */
2697 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2699 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2703 if (do_break(env
, &info
, code
) != 0) {
2710 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2713 process_pending_signals(env
);
2718 #ifdef TARGET_OPENRISC
2720 void cpu_loop(CPUOpenRISCState
*env
)
2722 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2727 trapnr
= cpu_openrisc_exec(cs
);
2733 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2737 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2738 gdbsig
= TARGET_SIGBUS
;
2742 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2743 gdbsig
= TARGET_SIGSEGV
;
2746 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2749 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2750 gdbsig
= TARGET_SIGBUS
;
2753 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2754 gdbsig
= TARGET_SIGILL
;
2757 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2761 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2764 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2765 gdbsig
= TARGET_SIGSEGV
;
2768 env
->pc
+= 4; /* 0xc00; */
2769 env
->gpr
[11] = do_syscall(env
,
2770 env
->gpr
[11], /* return value */
2771 env
->gpr
[3], /* r3 - r7 are params */
2779 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2782 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2783 gdbsig
= TARGET_SIGTRAP
;
2786 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2789 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2791 gdbsig
= TARGET_SIGILL
;
2795 gdb_handlesig(cs
, gdbsig
);
2796 if (gdbsig
!= TARGET_SIGTRAP
) {
2801 process_pending_signals(env
);
2805 #endif /* TARGET_OPENRISC */
2808 void cpu_loop(CPUSH4State
*env
)
2810 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2812 target_siginfo_t info
;
2816 trapnr
= cpu_sh4_exec(cs
);
2822 ret
= do_syscall(env
,
2831 env
->gregs
[0] = ret
;
2833 case EXCP_INTERRUPT
:
2834 /* just indicate that signals should be handled asap */
2840 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2843 info
.si_signo
= sig
;
2845 info
.si_code
= TARGET_TRAP_BRKPT
;
2846 queue_signal(env
, info
.si_signo
, &info
);
2852 info
.si_signo
= TARGET_SIGSEGV
;
2854 info
.si_code
= TARGET_SEGV_MAPERR
;
2855 info
._sifields
._sigfault
._addr
= env
->tea
;
2856 queue_signal(env
, info
.si_signo
, &info
);
2860 printf ("Unhandled trap: 0x%x\n", trapnr
);
2861 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2864 process_pending_signals (env
);
2870 void cpu_loop(CPUCRISState
*env
)
2872 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2874 target_siginfo_t info
;
2878 trapnr
= cpu_cris_exec(cs
);
2883 info
.si_signo
= TARGET_SIGSEGV
;
2885 /* XXX: check env->error_code */
2886 info
.si_code
= TARGET_SEGV_MAPERR
;
2887 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2888 queue_signal(env
, info
.si_signo
, &info
);
2891 case EXCP_INTERRUPT
:
2892 /* just indicate that signals should be handled asap */
2895 ret
= do_syscall(env
,
2904 env
->regs
[10] = ret
;
2910 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2913 info
.si_signo
= sig
;
2915 info
.si_code
= TARGET_TRAP_BRKPT
;
2916 queue_signal(env
, info
.si_signo
, &info
);
2921 printf ("Unhandled trap: 0x%x\n", trapnr
);
2922 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2925 process_pending_signals (env
);
2930 #ifdef TARGET_MICROBLAZE
2931 void cpu_loop(CPUMBState
*env
)
2933 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2935 target_siginfo_t info
;
2939 trapnr
= cpu_mb_exec(cs
);
2944 info
.si_signo
= TARGET_SIGSEGV
;
2946 /* XXX: check env->error_code */
2947 info
.si_code
= TARGET_SEGV_MAPERR
;
2948 info
._sifields
._sigfault
._addr
= 0;
2949 queue_signal(env
, info
.si_signo
, &info
);
2952 case EXCP_INTERRUPT
:
2953 /* just indicate that signals should be handled asap */
2956 /* Return address is 4 bytes after the call. */
2958 env
->sregs
[SR_PC
] = env
->regs
[14];
2959 ret
= do_syscall(env
,
2971 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2972 if (env
->iflags
& D_FLAG
) {
2973 env
->sregs
[SR_ESR
] |= 1 << 12;
2974 env
->sregs
[SR_PC
] -= 4;
2975 /* FIXME: if branch was immed, replay the imm as well. */
2978 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2980 switch (env
->sregs
[SR_ESR
] & 31) {
2981 case ESR_EC_DIVZERO
:
2982 info
.si_signo
= TARGET_SIGFPE
;
2984 info
.si_code
= TARGET_FPE_FLTDIV
;
2985 info
._sifields
._sigfault
._addr
= 0;
2986 queue_signal(env
, info
.si_signo
, &info
);
2989 info
.si_signo
= TARGET_SIGFPE
;
2991 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2992 info
.si_code
= TARGET_FPE_FLTINV
;
2994 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2995 info
.si_code
= TARGET_FPE_FLTDIV
;
2997 info
._sifields
._sigfault
._addr
= 0;
2998 queue_signal(env
, info
.si_signo
, &info
);
3001 printf ("Unhandled hw-exception: 0x%x\n",
3002 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
3003 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3012 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3015 info
.si_signo
= sig
;
3017 info
.si_code
= TARGET_TRAP_BRKPT
;
3018 queue_signal(env
, info
.si_signo
, &info
);
3023 printf ("Unhandled trap: 0x%x\n", trapnr
);
3024 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3027 process_pending_signals (env
);
3034 void cpu_loop(CPUM68KState
*env
)
3036 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3039 target_siginfo_t info
;
3040 TaskState
*ts
= cs
->opaque
;
3044 trapnr
= cpu_m68k_exec(cs
);
3049 if (ts
->sim_syscalls
) {
3051 get_user_u16(nr
, env
->pc
+ 2);
3053 do_m68k_simcall(env
, nr
);
3059 case EXCP_HALT_INSN
:
3060 /* Semihosing syscall. */
3062 do_m68k_semihosting(env
, env
->dregs
[0]);
3066 case EXCP_UNSUPPORTED
:
3068 info
.si_signo
= TARGET_SIGILL
;
3070 info
.si_code
= TARGET_ILL_ILLOPN
;
3071 info
._sifields
._sigfault
._addr
= env
->pc
;
3072 queue_signal(env
, info
.si_signo
, &info
);
3076 ts
->sim_syscalls
= 0;
3079 env
->dregs
[0] = do_syscall(env
,
3090 case EXCP_INTERRUPT
:
3091 /* just indicate that signals should be handled asap */
3095 info
.si_signo
= TARGET_SIGSEGV
;
3097 /* XXX: check env->error_code */
3098 info
.si_code
= TARGET_SEGV_MAPERR
;
3099 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3100 queue_signal(env
, info
.si_signo
, &info
);
3107 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3110 info
.si_signo
= sig
;
3112 info
.si_code
= TARGET_TRAP_BRKPT
;
3113 queue_signal(env
, info
.si_signo
, &info
);
3118 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3121 process_pending_signals(env
);
3124 #endif /* TARGET_M68K */
3127 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3129 target_ulong addr
, val
, tmp
;
3130 target_siginfo_t info
;
3133 addr
= env
->lock_addr
;
3134 tmp
= env
->lock_st_addr
;
3135 env
->lock_addr
= -1;
3136 env
->lock_st_addr
= 0;
3142 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3146 if (val
== env
->lock_value
) {
3148 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3165 info
.si_signo
= TARGET_SIGSEGV
;
3167 info
.si_code
= TARGET_SEGV_MAPERR
;
3168 info
._sifields
._sigfault
._addr
= addr
;
3169 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3172 void cpu_loop(CPUAlphaState
*env
)
3174 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3176 target_siginfo_t info
;
3181 trapnr
= cpu_alpha_exec(cs
);
3184 /* All of the traps imply a transition through PALcode, which
3185 implies an REI instruction has been executed. Which means
3186 that the intr_flag should be cleared. */
3191 fprintf(stderr
, "Reset requested. Exit\n");
3195 fprintf(stderr
, "Machine check exception. Exit\n");
3198 case EXCP_SMP_INTERRUPT
:
3199 case EXCP_CLK_INTERRUPT
:
3200 case EXCP_DEV_INTERRUPT
:
3201 fprintf(stderr
, "External interrupt. Exit\n");
3205 env
->lock_addr
= -1;
3206 info
.si_signo
= TARGET_SIGSEGV
;
3208 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3209 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3210 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3211 queue_signal(env
, info
.si_signo
, &info
);
3214 env
->lock_addr
= -1;
3215 info
.si_signo
= TARGET_SIGBUS
;
3217 info
.si_code
= TARGET_BUS_ADRALN
;
3218 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3219 queue_signal(env
, info
.si_signo
, &info
);
3223 env
->lock_addr
= -1;
3224 info
.si_signo
= TARGET_SIGILL
;
3226 info
.si_code
= TARGET_ILL_ILLOPC
;
3227 info
._sifields
._sigfault
._addr
= env
->pc
;
3228 queue_signal(env
, info
.si_signo
, &info
);
3231 env
->lock_addr
= -1;
3232 info
.si_signo
= TARGET_SIGFPE
;
3234 info
.si_code
= TARGET_FPE_FLTINV
;
3235 info
._sifields
._sigfault
._addr
= env
->pc
;
3236 queue_signal(env
, info
.si_signo
, &info
);
3239 /* No-op. Linux simply re-enables the FPU. */
3242 env
->lock_addr
= -1;
3243 switch (env
->error_code
) {
3246 info
.si_signo
= TARGET_SIGTRAP
;
3248 info
.si_code
= TARGET_TRAP_BRKPT
;
3249 info
._sifields
._sigfault
._addr
= env
->pc
;
3250 queue_signal(env
, info
.si_signo
, &info
);
3254 info
.si_signo
= TARGET_SIGTRAP
;
3257 info
._sifields
._sigfault
._addr
= env
->pc
;
3258 queue_signal(env
, info
.si_signo
, &info
);
3262 trapnr
= env
->ir
[IR_V0
];
3263 sysret
= do_syscall(env
, trapnr
,
3264 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3265 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3266 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3268 if (trapnr
== TARGET_NR_sigreturn
3269 || trapnr
== TARGET_NR_rt_sigreturn
) {
3272 /* Syscall writes 0 to V0 to bypass error check, similar
3273 to how this is handled internal to Linux kernel.
3274 (Ab)use trapnr temporarily as boolean indicating error. */
3275 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3276 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3277 env
->ir
[IR_A3
] = trapnr
;
3281 /* ??? We can probably elide the code using page_unprotect
3282 that is checking for self-modifying code. Instead we
3283 could simply call tb_flush here. Until we work out the
3284 changes required to turn off the extra write protection,
3285 this can be a no-op. */
3289 /* Handled in the translator for usermode. */
3293 /* Handled in the translator for usermode. */
3297 info
.si_signo
= TARGET_SIGFPE
;
3298 switch (env
->ir
[IR_A0
]) {
3299 case TARGET_GEN_INTOVF
:
3300 info
.si_code
= TARGET_FPE_INTOVF
;
3302 case TARGET_GEN_INTDIV
:
3303 info
.si_code
= TARGET_FPE_INTDIV
;
3305 case TARGET_GEN_FLTOVF
:
3306 info
.si_code
= TARGET_FPE_FLTOVF
;
3308 case TARGET_GEN_FLTUND
:
3309 info
.si_code
= TARGET_FPE_FLTUND
;
3311 case TARGET_GEN_FLTINV
:
3312 info
.si_code
= TARGET_FPE_FLTINV
;
3314 case TARGET_GEN_FLTINE
:
3315 info
.si_code
= TARGET_FPE_FLTRES
;
3317 case TARGET_GEN_ROPRAND
:
3321 info
.si_signo
= TARGET_SIGTRAP
;
3326 info
._sifields
._sigfault
._addr
= env
->pc
;
3327 queue_signal(env
, info
.si_signo
, &info
);
3334 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3335 if (info
.si_signo
) {
3336 env
->lock_addr
= -1;
3338 info
.si_code
= TARGET_TRAP_BRKPT
;
3339 queue_signal(env
, info
.si_signo
, &info
);
3344 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3346 case EXCP_INTERRUPT
:
3347 /* Just indicate that signals should be handled asap. */
3350 printf ("Unhandled trap: 0x%x\n", trapnr
);
3351 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3354 process_pending_signals (env
);
3357 #endif /* TARGET_ALPHA */
3360 void cpu_loop(CPUS390XState
*env
)
3362 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3364 target_siginfo_t info
;
3369 trapnr
= cpu_s390x_exec(cs
);
3372 case EXCP_INTERRUPT
:
3373 /* Just indicate that signals should be handled asap. */
3377 n
= env
->int_svc_code
;
3379 /* syscalls > 255 */
3382 env
->psw
.addr
+= env
->int_svc_ilen
;
3383 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3384 env
->regs
[4], env
->regs
[5],
3385 env
->regs
[6], env
->regs
[7], 0, 0);
3389 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3391 n
= TARGET_TRAP_BRKPT
;
3396 n
= env
->int_pgm_code
;
3399 case PGM_PRIVILEGED
:
3400 sig
= TARGET_SIGILL
;
3401 n
= TARGET_ILL_ILLOPC
;
3403 case PGM_PROTECTION
:
3404 case PGM_ADDRESSING
:
3405 sig
= TARGET_SIGSEGV
;
3406 /* XXX: check env->error_code */
3407 n
= TARGET_SEGV_MAPERR
;
3408 addr
= env
->__excp_addr
;
3411 case PGM_SPECIFICATION
:
3412 case PGM_SPECIAL_OP
:
3415 sig
= TARGET_SIGILL
;
3416 n
= TARGET_ILL_ILLOPN
;
3419 case PGM_FIXPT_OVERFLOW
:
3420 sig
= TARGET_SIGFPE
;
3421 n
= TARGET_FPE_INTOVF
;
3423 case PGM_FIXPT_DIVIDE
:
3424 sig
= TARGET_SIGFPE
;
3425 n
= TARGET_FPE_INTDIV
;
3429 n
= (env
->fpc
>> 8) & 0xff;
3431 /* compare-and-trap */
3434 /* An IEEE exception, simulated or otherwise. */
3436 n
= TARGET_FPE_FLTINV
;
3437 } else if (n
& 0x40) {
3438 n
= TARGET_FPE_FLTDIV
;
3439 } else if (n
& 0x20) {
3440 n
= TARGET_FPE_FLTOVF
;
3441 } else if (n
& 0x10) {
3442 n
= TARGET_FPE_FLTUND
;
3443 } else if (n
& 0x08) {
3444 n
= TARGET_FPE_FLTRES
;
3446 /* ??? Quantum exception; BFP, DFP error. */
3449 sig
= TARGET_SIGFPE
;
3454 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3455 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3461 addr
= env
->psw
.addr
;
3463 info
.si_signo
= sig
;
3466 info
._sifields
._sigfault
._addr
= addr
;
3467 queue_signal(env
, info
.si_signo
, &info
);
3471 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3472 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3475 process_pending_signals (env
);
3479 #endif /* TARGET_S390X */
3481 #ifdef TARGET_TILEGX
3483 static void gen_sigill_reg(CPUTLGState
*env
)
3485 target_siginfo_t info
;
3487 info
.si_signo
= TARGET_SIGILL
;
3489 info
.si_code
= TARGET_ILL_PRVREG
;
3490 info
._sifields
._sigfault
._addr
= env
->pc
;
3491 queue_signal(env
, info
.si_signo
, &info
);
3494 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3496 target_siginfo_t info
;
3498 info
.si_signo
= signo
;
3500 info
._sifields
._sigfault
._addr
= env
->pc
;
3502 if (signo
== TARGET_SIGSEGV
) {
3503 /* The passed in sigcode is a dummy; check for a page mapping
3504 and pass either MAPERR or ACCERR. */
3505 target_ulong addr
= env
->excaddr
;
3506 info
._sifields
._sigfault
._addr
= addr
;
3507 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3508 sigcode
= TARGET_SEGV_MAPERR
;
3510 sigcode
= TARGET_SEGV_ACCERR
;
3513 info
.si_code
= sigcode
;
3515 queue_signal(env
, info
.si_signo
, &info
);
3518 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3520 env
->excaddr
= addr
;
3521 do_signal(env
, TARGET_SIGSEGV
, 0);
3524 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3526 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3537 gen_sigill_reg(env
);
3540 g_assert_not_reached();
3543 env
->regs
[reg
] = val
;
3547 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3548 * memory at the address held in the first source register. If the values are
3549 * not equal, then no memory operation is performed. If the values are equal,
3550 * the 8-byte quantity from the second source register is written into memory
3551 * at the address held in the first source register. In either case, the result
3552 * of the instruction is the value read from memory. The compare and write to
3553 * memory are atomic and thus can be used for synchronization purposes. This
3554 * instruction only operates for addresses aligned to a 8-byte boundary.
3555 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3557 * Functional Description (64-bit)
3558 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3559 * rf[Dest] = memVal;
3560 * if (memVal == SPR[CmpValueSPR])
3561 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3563 * Functional Description (32-bit)
3564 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3565 * rf[Dest] = memVal;
3566 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3567 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3570 * This function also processes exch and exch4 which need not process SPR.
3572 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3575 target_long val
, sprval
;
3579 addr
= env
->atomic_srca
;
3580 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3581 goto sigsegv_maperr
;
3586 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3588 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3592 if (!cmp
|| val
== sprval
) {
3593 target_long valb
= env
->atomic_srcb
;
3594 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3595 goto sigsegv_maperr
;
3599 set_regval(env
, env
->atomic_dstr
, val
);
3605 gen_sigsegv_maperr(env
, addr
);
3608 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3612 target_long val
, valb
;
3616 addr
= env
->atomic_srca
;
3617 valb
= env
->atomic_srcb
;
3618 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3619 goto sigsegv_maperr
;
3623 case TILEGX_EXCP_OPCODE_FETCHADD
:
3624 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3627 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3633 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3635 if ((int32_t)valb
< 0) {
3639 case TILEGX_EXCP_OPCODE_FETCHAND
:
3640 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3643 case TILEGX_EXCP_OPCODE_FETCHOR
:
3644 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3648 g_assert_not_reached();
3652 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3653 goto sigsegv_maperr
;
3657 set_regval(env
, env
->atomic_dstr
, val
);
3663 gen_sigsegv_maperr(env
, addr
);
3666 void cpu_loop(CPUTLGState
*env
)
3668 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3673 trapnr
= cpu_tilegx_exec(cs
);
3676 case TILEGX_EXCP_SYSCALL
:
3677 env
->regs
[TILEGX_R_RE
] = do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3678 env
->regs
[0], env
->regs
[1],
3679 env
->regs
[2], env
->regs
[3],
3680 env
->regs
[4], env
->regs
[5],
3681 env
->regs
[6], env
->regs
[7]);
3682 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(env
->regs
[TILEGX_R_RE
])
3683 ? - env
->regs
[TILEGX_R_RE
]
3686 case TILEGX_EXCP_OPCODE_EXCH
:
3687 do_exch(env
, true, false);
3689 case TILEGX_EXCP_OPCODE_EXCH4
:
3690 do_exch(env
, false, false);
3692 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3693 do_exch(env
, true, true);
3695 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3696 do_exch(env
, false, true);
3698 case TILEGX_EXCP_OPCODE_FETCHADD
:
3699 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3700 case TILEGX_EXCP_OPCODE_FETCHAND
:
3701 case TILEGX_EXCP_OPCODE_FETCHOR
:
3702 do_fetch(env
, trapnr
, true);
3704 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3705 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3706 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3707 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3708 do_fetch(env
, trapnr
, false);
3710 case TILEGX_EXCP_SIGNAL
:
3711 do_signal(env
, env
->signo
, env
->sigcode
);
3713 case TILEGX_EXCP_REG_IDN_ACCESS
:
3714 case TILEGX_EXCP_REG_UDN_ACCESS
:
3715 gen_sigill_reg(env
);
3718 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3719 g_assert_not_reached();
3721 process_pending_signals(env
);
3727 THREAD CPUState
*thread_cpu
;
3729 void task_settid(TaskState
*ts
)
3731 if (ts
->ts_tid
== 0) {
3732 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3736 void stop_all_tasks(void)
3739 * We trust that when using NPTL, start_exclusive()
3740 * handles thread stopping correctly.
3745 /* Assumes contents are already zeroed. */
3746 void init_task_state(TaskState
*ts
)
3751 ts
->first_free
= ts
->sigqueue_table
;
3752 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3753 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3755 ts
->sigqueue_table
[i
].next
= NULL
;
3758 CPUArchState
*cpu_copy(CPUArchState
*env
)
3760 CPUState
*cpu
= ENV_GET_CPU(env
);
3761 CPUState
*new_cpu
= cpu_init(cpu_model
);
3762 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3766 /* Reset non arch specific state */
3769 memcpy(new_env
, env
, sizeof(CPUArchState
));
3771 /* Clone all break/watchpoints.
3772 Note: Once we support ptrace with hw-debug register access, make sure
3773 BP_CPU break/watchpoints are handled correctly on clone. */
3774 QTAILQ_INIT(&new_cpu
->breakpoints
);
3775 QTAILQ_INIT(&new_cpu
->watchpoints
);
3776 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3777 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3779 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3780 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3786 static void handle_arg_help(const char *arg
)
3788 usage(EXIT_SUCCESS
);
3791 static void handle_arg_log(const char *arg
)
3795 mask
= qemu_str_to_log_mask(arg
);
3797 qemu_print_log_usage(stdout
);
3803 static void handle_arg_log_filename(const char *arg
)
3805 qemu_set_log_filename(arg
);
3808 static void handle_arg_set_env(const char *arg
)
3810 char *r
, *p
, *token
;
3811 r
= p
= strdup(arg
);
3812 while ((token
= strsep(&p
, ",")) != NULL
) {
3813 if (envlist_setenv(envlist
, token
) != 0) {
3814 usage(EXIT_FAILURE
);
3820 static void handle_arg_unset_env(const char *arg
)
3822 char *r
, *p
, *token
;
3823 r
= p
= strdup(arg
);
3824 while ((token
= strsep(&p
, ",")) != NULL
) {
3825 if (envlist_unsetenv(envlist
, token
) != 0) {
3826 usage(EXIT_FAILURE
);
3832 static void handle_arg_argv0(const char *arg
)
3834 argv0
= strdup(arg
);
3837 static void handle_arg_stack_size(const char *arg
)
3840 guest_stack_size
= strtoul(arg
, &p
, 0);
3841 if (guest_stack_size
== 0) {
3842 usage(EXIT_FAILURE
);
3846 guest_stack_size
*= 1024 * 1024;
3847 } else if (*p
== 'k' || *p
== 'K') {
3848 guest_stack_size
*= 1024;
3852 static void handle_arg_ld_prefix(const char *arg
)
3854 interp_prefix
= strdup(arg
);
3857 static void handle_arg_pagesize(const char *arg
)
3859 qemu_host_page_size
= atoi(arg
);
3860 if (qemu_host_page_size
== 0 ||
3861 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3862 fprintf(stderr
, "page size must be a power of two\n");
3867 static void handle_arg_randseed(const char *arg
)
3869 unsigned long long seed
;
3871 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3872 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3878 static void handle_arg_gdb(const char *arg
)
3880 gdbstub_port
= atoi(arg
);
3883 static void handle_arg_uname(const char *arg
)
3885 qemu_uname_release
= strdup(arg
);
3888 static void handle_arg_cpu(const char *arg
)
3890 cpu_model
= strdup(arg
);
3891 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3892 /* XXX: implement xxx_cpu_list for targets that still miss it */
3893 #if defined(cpu_list_id)
3894 cpu_list_id(stdout
, &fprintf
, "");
3895 #elif defined(cpu_list)
3896 cpu_list(stdout
, &fprintf
); /* deprecated */
3898 /* TODO: add cpu selection for alpha, microblaze, unicore32, s390x. */
3899 printf("Target ignores cpu selection\n");
3905 static void handle_arg_guest_base(const char *arg
)
3907 guest_base
= strtol(arg
, NULL
, 0);
3908 have_guest_base
= 1;
3911 static void handle_arg_reserved_va(const char *arg
)
3915 reserved_va
= strtoul(arg
, &p
, 0);
3929 unsigned long unshifted
= reserved_va
;
3931 reserved_va
<<= shift
;
3932 if (((reserved_va
>> shift
) != unshifted
)
3933 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3934 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3937 fprintf(stderr
, "Reserved virtual address too big\n");
3942 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3947 static void handle_arg_singlestep(const char *arg
)
3952 static void handle_arg_strace(const char *arg
)
3957 static void handle_arg_version(const char *arg
)
3959 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3960 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3964 struct qemu_argument
{
3968 void (*handle_opt
)(const char *arg
);
3969 const char *example
;
3973 static const struct qemu_argument arg_table
[] = {
3974 {"h", "", false, handle_arg_help
,
3975 "", "print this help"},
3976 {"help", "", false, handle_arg_help
,
3978 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3979 "port", "wait gdb connection to 'port'"},
3980 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3981 "path", "set the elf interpreter prefix to 'path'"},
3982 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3983 "size", "set the stack size to 'size' bytes"},
3984 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3985 "model", "select CPU (-cpu help for list)"},
3986 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3987 "var=value", "sets targets environment variable (see below)"},
3988 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3989 "var", "unsets targets environment variable (see below)"},
3990 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3991 "argv0", "forces target process argv[0] to be 'argv0'"},
3992 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3993 "uname", "set qemu uname release string to 'uname'"},
3994 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3995 "address", "set guest_base address to 'address'"},
3996 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3997 "size", "reserve 'size' bytes for guest virtual address space"},
3998 {"d", "QEMU_LOG", true, handle_arg_log
,
3999 "item[,...]", "enable logging of specified items "
4000 "(use '-d help' for a list of items)"},
4001 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
4002 "logfile", "write logs to 'logfile' (default stderr)"},
4003 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
4004 "pagesize", "set the host page size to 'pagesize'"},
4005 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
4006 "", "run in singlestep mode"},
4007 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
4008 "", "log system calls"},
4009 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
4010 "", "Seed for pseudo-random number generator"},
4011 {"version", "QEMU_VERSION", false, handle_arg_version
,
4012 "", "display version information and exit"},
4013 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
4016 static void QEMU_NORETURN
usage(int exitcode
)
4018 const struct qemu_argument
*arginfo
;
4022 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
4023 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
4025 "Options and associated environment variables:\n"
4028 /* Calculate column widths. We must always have at least enough space
4029 * for the column header.
4031 maxarglen
= strlen("Argument");
4032 maxenvlen
= strlen("Env-variable");
4034 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4035 int arglen
= strlen(arginfo
->argv
);
4036 if (arginfo
->has_arg
) {
4037 arglen
+= strlen(arginfo
->example
) + 1;
4039 if (strlen(arginfo
->env
) > maxenvlen
) {
4040 maxenvlen
= strlen(arginfo
->env
);
4042 if (arglen
> maxarglen
) {
4047 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4048 maxenvlen
, "Env-variable");
4050 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4051 if (arginfo
->has_arg
) {
4052 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4053 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4054 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4056 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4057 maxenvlen
, arginfo
->env
,
4064 "QEMU_LD_PREFIX = %s\n"
4065 "QEMU_STACK_SIZE = %ld byte\n",
4070 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4071 "QEMU_UNSET_ENV environment variables to set and unset\n"
4072 "environment variables for the target process.\n"
4073 "It is possible to provide several variables by separating them\n"
4074 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4075 "provide the -E and -U options multiple times.\n"
4076 "The following lines are equivalent:\n"
4077 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4078 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4079 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4080 "Note that if you provide several changes to a single variable\n"
4081 "the last change will stay in effect.\n");
4086 static int parse_args(int argc
, char **argv
)
4090 const struct qemu_argument
*arginfo
;
4092 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4093 if (arginfo
->env
== NULL
) {
4097 r
= getenv(arginfo
->env
);
4099 arginfo
->handle_opt(r
);
4105 if (optind
>= argc
) {
4114 if (!strcmp(r
, "-")) {
4117 /* Treat --foo the same as -foo. */
4122 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4123 if (!strcmp(r
, arginfo
->argv
)) {
4124 if (arginfo
->has_arg
) {
4125 if (optind
>= argc
) {
4126 (void) fprintf(stderr
,
4127 "qemu: missing argument for option '%s'\n", r
);
4130 arginfo
->handle_opt(argv
[optind
]);
4133 arginfo
->handle_opt(NULL
);
4139 /* no option matched the current argv */
4140 if (arginfo
->handle_opt
== NULL
) {
4141 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4146 if (optind
>= argc
) {
4147 (void) fprintf(stderr
, "qemu: no user program specified\n");
4151 filename
= argv
[optind
];
4152 exec_path
= argv
[optind
];
4157 int main(int argc
, char **argv
)
4159 struct target_pt_regs regs1
, *regs
= ®s1
;
4160 struct image_info info1
, *info
= &info1
;
4161 struct linux_binprm bprm
;
4166 char **target_environ
, **wrk
;
4173 module_call_init(MODULE_INIT_QOM
);
4175 if ((envlist
= envlist_create()) == NULL
) {
4176 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4180 /* add current environment into the list */
4181 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4182 (void) envlist_setenv(envlist
, *wrk
);
4185 /* Read the stack limit from the kernel. If it's "unlimited",
4186 then we can do little else besides use the default. */
4189 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4190 && lim
.rlim_cur
!= RLIM_INFINITY
4191 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4192 guest_stack_size
= lim
.rlim_cur
;
4197 #if defined(cpudef_setup)
4198 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
4203 optind
= parse_args(argc
, argv
);
4206 memset(regs
, 0, sizeof(struct target_pt_regs
));
4208 /* Zero out image_info */
4209 memset(info
, 0, sizeof(struct image_info
));
4211 memset(&bprm
, 0, sizeof (bprm
));
4213 /* Scan interp_prefix dir for replacement files. */
4214 init_paths(interp_prefix
);
4216 init_qemu_uname_release();
4218 if (cpu_model
== NULL
) {
4219 #if defined(TARGET_I386)
4220 #ifdef TARGET_X86_64
4221 cpu_model
= "qemu64";
4223 cpu_model
= "qemu32";
4225 #elif defined(TARGET_ARM)
4227 #elif defined(TARGET_UNICORE32)
4229 #elif defined(TARGET_M68K)
4231 #elif defined(TARGET_SPARC)
4232 #ifdef TARGET_SPARC64
4233 cpu_model
= "TI UltraSparc II";
4235 cpu_model
= "Fujitsu MB86904";
4237 #elif defined(TARGET_MIPS)
4238 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4243 #elif defined TARGET_OPENRISC
4244 cpu_model
= "or1200";
4245 #elif defined(TARGET_PPC)
4246 # ifdef TARGET_PPC64
4247 cpu_model
= "POWER8";
4251 #elif defined TARGET_SH4
4252 cpu_model
= TYPE_SH7785_CPU
;
4258 /* NOTE: we need to init the CPU at this stage to get
4259 qemu_host_page_size */
4260 cpu
= cpu_init(cpu_model
);
4262 fprintf(stderr
, "Unable to find CPU definition\n");
4270 if (getenv("QEMU_STRACE")) {
4274 if (getenv("QEMU_RAND_SEED")) {
4275 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4278 target_environ
= envlist_to_environ(envlist
, NULL
);
4279 envlist_free(envlist
);
4282 * Now that page sizes are configured in cpu_init() we can do
4283 * proper page alignment for guest_base.
4285 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4287 if (reserved_va
|| have_guest_base
) {
4288 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4290 if (guest_base
== (unsigned long)-1) {
4291 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4292 "space for use as guest address space (check your virtual "
4293 "memory ulimit setting or reserve less using -R option)\n",
4299 mmap_next_start
= reserved_va
;
4304 * Read in mmap_min_addr kernel parameter. This value is used
4305 * When loading the ELF image to determine whether guest_base
4306 * is needed. It is also used in mmap_find_vma.
4311 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4313 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4314 mmap_min_addr
= tmp
;
4315 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4322 * Prepare copy of argv vector for target.
4324 target_argc
= argc
- optind
;
4325 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4326 if (target_argv
== NULL
) {
4327 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4332 * If argv0 is specified (using '-0' switch) we replace
4333 * argv[0] pointer with the given one.
4336 if (argv0
!= NULL
) {
4337 target_argv
[i
++] = strdup(argv0
);
4339 for (; i
< target_argc
; i
++) {
4340 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4342 target_argv
[target_argc
] = NULL
;
4344 ts
= g_new0(TaskState
, 1);
4345 init_task_state(ts
);
4346 /* build Task State */
4352 execfd
= qemu_getauxval(AT_EXECFD
);
4354 execfd
= open(filename
, O_RDONLY
);
4356 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4357 _exit(EXIT_FAILURE
);
4361 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4364 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4365 _exit(EXIT_FAILURE
);
4368 for (wrk
= target_environ
; *wrk
; wrk
++) {
4372 free(target_environ
);
4374 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4375 qemu_log("guest_base 0x%" PRIxPTR
"\n", guest_base
);
4378 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4379 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4380 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4382 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4384 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4385 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4387 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4388 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4391 target_set_brk(info
->brk
);
4395 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4396 generating the prologue until now so that the prologue can take
4397 the real value of GUEST_BASE into account. */
4398 tcg_prologue_init(&tcg_ctx
);
4400 #if defined(TARGET_I386)
4401 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4402 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4403 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4404 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4405 env
->hflags
|= HF_OSFXSR_MASK
;
4407 #ifndef TARGET_ABI32
4408 /* enable 64 bit mode if possible */
4409 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4410 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4413 env
->cr
[4] |= CR4_PAE_MASK
;
4414 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4415 env
->hflags
|= HF_LMA_MASK
;
4418 /* flags setup : we activate the IRQs by default as in user mode */
4419 env
->eflags
|= IF_MASK
;
4421 /* linux register setup */
4422 #ifndef TARGET_ABI32
4423 env
->regs
[R_EAX
] = regs
->rax
;
4424 env
->regs
[R_EBX
] = regs
->rbx
;
4425 env
->regs
[R_ECX
] = regs
->rcx
;
4426 env
->regs
[R_EDX
] = regs
->rdx
;
4427 env
->regs
[R_ESI
] = regs
->rsi
;
4428 env
->regs
[R_EDI
] = regs
->rdi
;
4429 env
->regs
[R_EBP
] = regs
->rbp
;
4430 env
->regs
[R_ESP
] = regs
->rsp
;
4431 env
->eip
= regs
->rip
;
4433 env
->regs
[R_EAX
] = regs
->eax
;
4434 env
->regs
[R_EBX
] = regs
->ebx
;
4435 env
->regs
[R_ECX
] = regs
->ecx
;
4436 env
->regs
[R_EDX
] = regs
->edx
;
4437 env
->regs
[R_ESI
] = regs
->esi
;
4438 env
->regs
[R_EDI
] = regs
->edi
;
4439 env
->regs
[R_EBP
] = regs
->ebp
;
4440 env
->regs
[R_ESP
] = regs
->esp
;
4441 env
->eip
= regs
->eip
;
4444 /* linux interrupt setup */
4445 #ifndef TARGET_ABI32
4446 env
->idt
.limit
= 511;
4448 env
->idt
.limit
= 255;
4450 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4451 PROT_READ
|PROT_WRITE
,
4452 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4453 idt_table
= g2h(env
->idt
.base
);
4476 /* linux segment setup */
4478 uint64_t *gdt_table
;
4479 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4480 PROT_READ
|PROT_WRITE
,
4481 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4482 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4483 gdt_table
= g2h(env
->gdt
.base
);
4485 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4486 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4487 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4489 /* 64 bit code segment */
4490 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4491 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4493 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4495 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4496 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4497 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4499 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4500 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4502 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4503 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4504 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4505 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4506 /* This hack makes Wine work... */
4507 env
->segs
[R_FS
].selector
= 0;
4509 cpu_x86_load_seg(env
, R_DS
, 0);
4510 cpu_x86_load_seg(env
, R_ES
, 0);
4511 cpu_x86_load_seg(env
, R_FS
, 0);
4512 cpu_x86_load_seg(env
, R_GS
, 0);
4514 #elif defined(TARGET_AARCH64)
4518 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4520 "The selected ARM CPU does not support 64 bit mode\n");
4524 for (i
= 0; i
< 31; i
++) {
4525 env
->xregs
[i
] = regs
->regs
[i
];
4528 env
->xregs
[31] = regs
->sp
;
4530 #elif defined(TARGET_ARM)
4533 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4535 for(i
= 0; i
< 16; i
++) {
4536 env
->regs
[i
] = regs
->uregs
[i
];
4538 #ifdef TARGET_WORDS_BIGENDIAN
4540 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4541 && (info
->elf_flags
& EF_ARM_BE8
)) {
4542 env
->uncached_cpsr
|= CPSR_E
;
4543 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4545 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4549 #elif defined(TARGET_UNICORE32)
4552 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4553 for (i
= 0; i
< 32; i
++) {
4554 env
->regs
[i
] = regs
->uregs
[i
];
4557 #elif defined(TARGET_SPARC)
4561 env
->npc
= regs
->npc
;
4563 for(i
= 0; i
< 8; i
++)
4564 env
->gregs
[i
] = regs
->u_regs
[i
];
4565 for(i
= 0; i
< 8; i
++)
4566 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4568 #elif defined(TARGET_PPC)
4572 #if defined(TARGET_PPC64)
4573 #if defined(TARGET_ABI32)
4574 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4576 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4579 env
->nip
= regs
->nip
;
4580 for(i
= 0; i
< 32; i
++) {
4581 env
->gpr
[i
] = regs
->gpr
[i
];
4584 #elif defined(TARGET_M68K)
4587 env
->dregs
[0] = regs
->d0
;
4588 env
->dregs
[1] = regs
->d1
;
4589 env
->dregs
[2] = regs
->d2
;
4590 env
->dregs
[3] = regs
->d3
;
4591 env
->dregs
[4] = regs
->d4
;
4592 env
->dregs
[5] = regs
->d5
;
4593 env
->dregs
[6] = regs
->d6
;
4594 env
->dregs
[7] = regs
->d7
;
4595 env
->aregs
[0] = regs
->a0
;
4596 env
->aregs
[1] = regs
->a1
;
4597 env
->aregs
[2] = regs
->a2
;
4598 env
->aregs
[3] = regs
->a3
;
4599 env
->aregs
[4] = regs
->a4
;
4600 env
->aregs
[5] = regs
->a5
;
4601 env
->aregs
[6] = regs
->a6
;
4602 env
->aregs
[7] = regs
->usp
;
4604 ts
->sim_syscalls
= 1;
4606 #elif defined(TARGET_MICROBLAZE)
4608 env
->regs
[0] = regs
->r0
;
4609 env
->regs
[1] = regs
->r1
;
4610 env
->regs
[2] = regs
->r2
;
4611 env
->regs
[3] = regs
->r3
;
4612 env
->regs
[4] = regs
->r4
;
4613 env
->regs
[5] = regs
->r5
;
4614 env
->regs
[6] = regs
->r6
;
4615 env
->regs
[7] = regs
->r7
;
4616 env
->regs
[8] = regs
->r8
;
4617 env
->regs
[9] = regs
->r9
;
4618 env
->regs
[10] = regs
->r10
;
4619 env
->regs
[11] = regs
->r11
;
4620 env
->regs
[12] = regs
->r12
;
4621 env
->regs
[13] = regs
->r13
;
4622 env
->regs
[14] = regs
->r14
;
4623 env
->regs
[15] = regs
->r15
;
4624 env
->regs
[16] = regs
->r16
;
4625 env
->regs
[17] = regs
->r17
;
4626 env
->regs
[18] = regs
->r18
;
4627 env
->regs
[19] = regs
->r19
;
4628 env
->regs
[20] = regs
->r20
;
4629 env
->regs
[21] = regs
->r21
;
4630 env
->regs
[22] = regs
->r22
;
4631 env
->regs
[23] = regs
->r23
;
4632 env
->regs
[24] = regs
->r24
;
4633 env
->regs
[25] = regs
->r25
;
4634 env
->regs
[26] = regs
->r26
;
4635 env
->regs
[27] = regs
->r27
;
4636 env
->regs
[28] = regs
->r28
;
4637 env
->regs
[29] = regs
->r29
;
4638 env
->regs
[30] = regs
->r30
;
4639 env
->regs
[31] = regs
->r31
;
4640 env
->sregs
[SR_PC
] = regs
->pc
;
4642 #elif defined(TARGET_MIPS)
4646 for(i
= 0; i
< 32; i
++) {
4647 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4649 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4650 if (regs
->cp0_epc
& 1) {
4651 env
->hflags
|= MIPS_HFLAG_M16
;
4654 #elif defined(TARGET_OPENRISC)
4658 for (i
= 0; i
< 32; i
++) {
4659 env
->gpr
[i
] = regs
->gpr
[i
];
4665 #elif defined(TARGET_SH4)
4669 for(i
= 0; i
< 16; i
++) {
4670 env
->gregs
[i
] = regs
->regs
[i
];
4674 #elif defined(TARGET_ALPHA)
4678 for(i
= 0; i
< 28; i
++) {
4679 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4681 env
->ir
[IR_SP
] = regs
->usp
;
4684 #elif defined(TARGET_CRIS)
4686 env
->regs
[0] = regs
->r0
;
4687 env
->regs
[1] = regs
->r1
;
4688 env
->regs
[2] = regs
->r2
;
4689 env
->regs
[3] = regs
->r3
;
4690 env
->regs
[4] = regs
->r4
;
4691 env
->regs
[5] = regs
->r5
;
4692 env
->regs
[6] = regs
->r6
;
4693 env
->regs
[7] = regs
->r7
;
4694 env
->regs
[8] = regs
->r8
;
4695 env
->regs
[9] = regs
->r9
;
4696 env
->regs
[10] = regs
->r10
;
4697 env
->regs
[11] = regs
->r11
;
4698 env
->regs
[12] = regs
->r12
;
4699 env
->regs
[13] = regs
->r13
;
4700 env
->regs
[14] = info
->start_stack
;
4701 env
->regs
[15] = regs
->acr
;
4702 env
->pc
= regs
->erp
;
4704 #elif defined(TARGET_S390X)
4707 for (i
= 0; i
< 16; i
++) {
4708 env
->regs
[i
] = regs
->gprs
[i
];
4710 env
->psw
.mask
= regs
->psw
.mask
;
4711 env
->psw
.addr
= regs
->psw
.addr
;
4713 #elif defined(TARGET_TILEGX)
4716 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4717 env
->regs
[i
] = regs
->regs
[i
];
4719 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4725 #error unsupported target CPU
4728 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4729 ts
->stack_base
= info
->start_stack
;
4730 ts
->heap_base
= info
->brk
;
4731 /* This will be filled in on the first SYS_HEAPINFO call. */
4736 if (gdbserver_start(gdbstub_port
) < 0) {
4737 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4741 gdb_handlesig(cpu
, 0);