4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
24 #include "qapi/error.h"
26 #include "qemu/path.h"
27 #include "qemu/config-file.h"
28 #include "qemu/cutils.h"
29 #include "qemu/help_option.h"
31 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
36 #include "qemu/timer.h"
37 #include "qemu/envlist.h"
40 #include "trace/control.h"
41 #include "glib-compat.h"
46 static const char *filename
;
47 static const char *argv0
;
48 static int gdbstub_port
;
49 static envlist_t
*envlist
;
50 static const char *cpu_model
;
51 unsigned long mmap_min_addr
;
55 #define EXCP_DUMP(env, fmt, ...) \
57 CPUState *cs = ENV_GET_CPU(env); \
58 fprintf(stderr, fmt , ## __VA_ARGS__); \
59 cpu_dump_state(cs, stderr, fprintf, 0); \
60 if (qemu_log_separate()) { \
61 qemu_log(fmt, ## __VA_ARGS__); \
62 log_cpu_state(cs, 0); \
66 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
68 * When running 32-on-64 we should make sure we can fit all of the possible
69 * guest address space into a contiguous chunk of virtual host memory.
71 * This way we will never overlap with our own libraries or binaries or stack
72 * or anything else that QEMU maps.
75 /* MIPS only supports 31 bits of virtual address space for user space */
76 uintptr_t reserved_va
= 0x77000000;
78 uintptr_t reserved_va
= 0xf7000000;
81 uintptr_t reserved_va
;
84 static void usage(int exitcode
);
86 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
87 const char *qemu_uname_release
;
89 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
90 we allocate a bigger stack. Need a better solution, for example
91 by remapping the process stack directly at the right place */
92 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
94 void gemu_log(const char *fmt
, ...)
99 vfprintf(stderr
, fmt
, ap
);
103 #if defined(TARGET_I386)
104 int cpu_get_pic_interrupt(CPUX86State
*env
)
110 /***********************************************************/
111 /* Helper routines for implementing atomic operations. */
113 /* To implement exclusive operations we force all cpus to syncronise.
114 We don't require a full sync, only that no cpus are executing guest code.
115 The alternative is to map target atomic ops onto host equivalents,
116 which requires quite a lot of per host/target work. */
117 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
118 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
119 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
120 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
121 static int pending_cpus
;
123 /* Make sure everything is in a consistent state for calling fork(). */
124 void fork_start(void)
126 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
127 pthread_mutex_lock(&exclusive_lock
);
131 void fork_end(int child
)
133 mmap_fork_end(child
);
135 CPUState
*cpu
, *next_cpu
;
136 /* Child processes created by fork() only have a single thread.
137 Discard information about the parent threads. */
138 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
139 if (cpu
!= thread_cpu
) {
140 QTAILQ_REMOVE(&cpus
, cpu
, node
);
144 pthread_mutex_init(&exclusive_lock
, NULL
);
145 pthread_mutex_init(&cpu_list_mutex
, NULL
);
146 pthread_cond_init(&exclusive_cond
, NULL
);
147 pthread_cond_init(&exclusive_resume
, NULL
);
148 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
149 gdbserver_fork(thread_cpu
);
151 pthread_mutex_unlock(&exclusive_lock
);
152 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
156 /* Wait for pending exclusive operations to complete. The exclusive lock
158 static inline void exclusive_idle(void)
160 while (pending_cpus
) {
161 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
165 /* Start an exclusive operation.
166 Must only be called from outside cpu_exec. */
167 static inline void start_exclusive(void)
171 pthread_mutex_lock(&exclusive_lock
);
175 /* Make all other cpus stop executing. */
176 CPU_FOREACH(other_cpu
) {
177 if (other_cpu
->running
) {
182 if (pending_cpus
> 1) {
183 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
187 /* Finish an exclusive operation. */
188 static inline void __attribute__((unused
)) end_exclusive(void)
191 pthread_cond_broadcast(&exclusive_resume
);
192 pthread_mutex_unlock(&exclusive_lock
);
195 /* Wait for exclusive ops to finish, and begin cpu execution. */
196 static inline void cpu_exec_start(CPUState
*cpu
)
198 pthread_mutex_lock(&exclusive_lock
);
201 pthread_mutex_unlock(&exclusive_lock
);
204 /* Mark cpu as not executing, and release pending exclusive ops. */
205 static inline void cpu_exec_end(CPUState
*cpu
)
207 pthread_mutex_lock(&exclusive_lock
);
208 cpu
->running
= false;
209 if (pending_cpus
> 1) {
211 if (pending_cpus
== 1) {
212 pthread_cond_signal(&exclusive_cond
);
216 pthread_mutex_unlock(&exclusive_lock
);
219 void cpu_list_lock(void)
221 pthread_mutex_lock(&cpu_list_mutex
);
224 void cpu_list_unlock(void)
226 pthread_mutex_unlock(&cpu_list_mutex
);
231 /***********************************************************/
232 /* CPUX86 core interface */
234 uint64_t cpu_get_tsc(CPUX86State
*env
)
236 return cpu_get_host_ticks();
239 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
244 e1
= (addr
<< 16) | (limit
& 0xffff);
245 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
252 static uint64_t *idt_table
;
254 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
255 uint64_t addr
, unsigned int sel
)
258 e1
= (addr
& 0xffff) | (sel
<< 16);
259 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
263 p
[2] = tswap32(addr
>> 32);
266 /* only dpl matters as we do only user space emulation */
267 static void set_idt(int n
, unsigned int dpl
)
269 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
272 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
273 uint32_t addr
, unsigned int sel
)
276 e1
= (addr
& 0xffff) | (sel
<< 16);
277 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
283 /* only dpl matters as we do only user space emulation */
284 static void set_idt(int n
, unsigned int dpl
)
286 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
290 void cpu_loop(CPUX86State
*env
)
292 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
296 target_siginfo_t info
;
304 trapnr
= cpu_exec(cs
);
308 /* linux syscall from int $0x80 */
309 ret
= do_syscall(env
,
318 if (ret
== -TARGET_ERESTARTSYS
) {
320 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
321 env
->regs
[R_EAX
] = ret
;
326 /* linux syscall from syscall instruction */
327 ret
= do_syscall(env
,
336 if (ret
== -TARGET_ERESTARTSYS
) {
338 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
339 env
->regs
[R_EAX
] = ret
;
346 case TARGET_VSYSCALL_ADDR(__NR_vgettimeofday
):
347 syscall_num
= __NR_gettimeofday
;
349 case TARGET_VSYSCALL_ADDR(__NR_vtime
):
351 syscall_num
= __NR_time
;
353 /* XXX: not yet implemented (arm eabi host) */
354 cpu_abort(cs
, "Unimplemented vsyscall vtime");
357 case TARGET_VSYSCALL_ADDR(__NR_vgetcpu
):
358 /* XXX: not yet implemented */
359 cpu_abort(cs
, "Unimplemented vsyscall vgetcpu");
363 "Invalid vsyscall to address " TARGET_FMT_lx
"\n",
366 env
->regs
[R_EAX
] = do_syscall(env
,
376 get_user_u64(val
, env
->regs
[R_ESP
]);
378 env
->regs
[R_ESP
] += 8;
383 info
.si_signo
= TARGET_SIGBUS
;
385 info
.si_code
= TARGET_SI_KERNEL
;
386 info
._sifields
._sigfault
._addr
= 0;
387 queue_signal(env
, info
.si_signo
, &info
);
390 /* XXX: potential problem if ABI32 */
391 #ifndef TARGET_X86_64
392 if (env
->eflags
& VM_MASK
) {
393 handle_vm86_fault(env
);
397 info
.si_signo
= TARGET_SIGSEGV
;
399 info
.si_code
= TARGET_SI_KERNEL
;
400 info
._sifields
._sigfault
._addr
= 0;
401 queue_signal(env
, info
.si_signo
, &info
);
405 info
.si_signo
= TARGET_SIGSEGV
;
407 if (!(env
->error_code
& 1))
408 info
.si_code
= TARGET_SEGV_MAPERR
;
410 info
.si_code
= TARGET_SEGV_ACCERR
;
411 info
._sifields
._sigfault
._addr
= env
->cr
[2];
412 queue_signal(env
, info
.si_signo
, &info
);
415 #ifndef TARGET_X86_64
416 if (env
->eflags
& VM_MASK
) {
417 handle_vm86_trap(env
, trapnr
);
421 /* division by zero */
422 info
.si_signo
= TARGET_SIGFPE
;
424 info
.si_code
= TARGET_FPE_INTDIV
;
425 info
._sifields
._sigfault
._addr
= env
->eip
;
426 queue_signal(env
, info
.si_signo
, &info
);
431 #ifndef TARGET_X86_64
432 if (env
->eflags
& VM_MASK
) {
433 handle_vm86_trap(env
, trapnr
);
437 info
.si_signo
= TARGET_SIGTRAP
;
439 if (trapnr
== EXCP01_DB
) {
440 info
.si_code
= TARGET_TRAP_BRKPT
;
441 info
._sifields
._sigfault
._addr
= env
->eip
;
443 info
.si_code
= TARGET_SI_KERNEL
;
444 info
._sifields
._sigfault
._addr
= 0;
446 queue_signal(env
, info
.si_signo
, &info
);
451 #ifndef TARGET_X86_64
452 if (env
->eflags
& VM_MASK
) {
453 handle_vm86_trap(env
, trapnr
);
457 info
.si_signo
= TARGET_SIGSEGV
;
459 info
.si_code
= TARGET_SI_KERNEL
;
460 info
._sifields
._sigfault
._addr
= 0;
461 queue_signal(env
, info
.si_signo
, &info
);
465 info
.si_signo
= TARGET_SIGILL
;
467 info
.si_code
= TARGET_ILL_ILLOPN
;
468 info
._sifields
._sigfault
._addr
= env
->eip
;
469 queue_signal(env
, info
.si_signo
, &info
);
472 /* just indicate that signals should be handled asap */
478 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
483 info
.si_code
= TARGET_TRAP_BRKPT
;
484 queue_signal(env
, info
.si_signo
, &info
);
489 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
490 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
494 process_pending_signals(env
);
501 #define get_user_code_u32(x, gaddr, env) \
502 ({ abi_long __r = get_user_u32((x), (gaddr)); \
503 if (!__r && bswap_code(arm_sctlr_b(env))) { \
509 #define get_user_code_u16(x, gaddr, env) \
510 ({ abi_long __r = get_user_u16((x), (gaddr)); \
511 if (!__r && bswap_code(arm_sctlr_b(env))) { \
517 #define get_user_data_u32(x, gaddr, env) \
518 ({ abi_long __r = get_user_u32((x), (gaddr)); \
519 if (!__r && arm_cpu_bswap_data(env)) { \
525 #define get_user_data_u16(x, gaddr, env) \
526 ({ abi_long __r = get_user_u16((x), (gaddr)); \
527 if (!__r && arm_cpu_bswap_data(env)) { \
533 #define put_user_data_u32(x, gaddr, env) \
534 ({ typeof(x) __x = (x); \
535 if (arm_cpu_bswap_data(env)) { \
536 __x = bswap32(__x); \
538 put_user_u32(__x, (gaddr)); \
541 #define put_user_data_u16(x, gaddr, env) \
542 ({ typeof(x) __x = (x); \
543 if (arm_cpu_bswap_data(env)) { \
544 __x = bswap16(__x); \
546 put_user_u16(__x, (gaddr)); \
550 /* Commpage handling -- there is no commpage for AArch64 */
553 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
555 * r0 = pointer to oldval
556 * r1 = pointer to newval
557 * r2 = pointer to target value
560 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
561 * C set if *ptr was changed, clear if no exchange happened
563 * Note segv's in kernel helpers are a bit tricky, we can set the
564 * data address sensibly but the PC address is just the entry point.
566 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
568 uint64_t oldval
, newval
, val
;
570 target_siginfo_t info
;
572 /* Based on the 32 bit code in do_kernel_trap */
574 /* XXX: This only works between threads, not between processes.
575 It's probably possible to implement this with native host
576 operations. However things like ldrex/strex are much harder so
577 there's not much point trying. */
579 cpsr
= cpsr_read(env
);
582 if (get_user_u64(oldval
, env
->regs
[0])) {
583 env
->exception
.vaddress
= env
->regs
[0];
587 if (get_user_u64(newval
, env
->regs
[1])) {
588 env
->exception
.vaddress
= env
->regs
[1];
592 if (get_user_u64(val
, addr
)) {
593 env
->exception
.vaddress
= addr
;
600 if (put_user_u64(val
, addr
)) {
601 env
->exception
.vaddress
= addr
;
611 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
617 /* We get the PC of the entry address - which is as good as anything,
618 on a real kernel what you get depends on which mode it uses. */
619 info
.si_signo
= TARGET_SIGSEGV
;
621 /* XXX: check env->error_code */
622 info
.si_code
= TARGET_SEGV_MAPERR
;
623 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
624 queue_signal(env
, info
.si_signo
, &info
);
627 /* Handle a jump to the kernel code page. */
629 do_kernel_trap(CPUARMState
*env
)
635 switch (env
->regs
[15]) {
636 case 0xffff0fa0: /* __kernel_memory_barrier */
637 /* ??? No-op. Will need to do better for SMP. */
639 case 0xffff0fc0: /* __kernel_cmpxchg */
640 /* XXX: This only works between threads, not between processes.
641 It's probably possible to implement this with native host
642 operations. However things like ldrex/strex are much harder so
643 there's not much point trying. */
645 cpsr
= cpsr_read(env
);
647 /* FIXME: This should SEGV if the access fails. */
648 if (get_user_u32(val
, addr
))
650 if (val
== env
->regs
[0]) {
652 /* FIXME: Check for segfaults. */
653 put_user_u32(val
, addr
);
660 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
663 case 0xffff0fe0: /* __kernel_get_tls */
664 env
->regs
[0] = cpu_get_tls(env
);
666 case 0xffff0f60: /* __kernel_cmpxchg64 */
667 arm_kernel_cmpxchg64_helper(env
);
673 /* Jump back to the caller. */
674 addr
= env
->regs
[14];
679 env
->regs
[15] = addr
;
684 /* Store exclusive handling for AArch32 */
685 static int do_strex(CPUARMState
*env
)
693 if (env
->exclusive_addr
!= env
->exclusive_test
) {
696 /* We know we're always AArch32 so the address is in uint32_t range
697 * unless it was the -1 exclusive-monitor-lost value (which won't
698 * match exclusive_test above).
700 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
701 addr
= env
->exclusive_addr
;
702 size
= env
->exclusive_info
& 0xf;
705 segv
= get_user_u8(val
, addr
);
708 segv
= get_user_data_u16(val
, addr
, env
);
712 segv
= get_user_data_u32(val
, addr
, env
);
718 env
->exception
.vaddress
= addr
;
723 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
725 env
->exception
.vaddress
= addr
+ 4;
728 if (arm_cpu_bswap_data(env
)) {
729 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
731 val
= deposit64(val
, 32, 32, valhi
);
734 if (val
!= env
->exclusive_val
) {
738 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
741 segv
= put_user_u8(val
, addr
);
744 segv
= put_user_data_u16(val
, addr
, env
);
748 segv
= put_user_data_u32(val
, addr
, env
);
752 env
->exception
.vaddress
= addr
;
756 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
757 segv
= put_user_data_u32(val
, addr
+ 4, env
);
759 env
->exception
.vaddress
= addr
+ 4;
766 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
772 void cpu_loop(CPUARMState
*env
)
774 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
776 unsigned int n
, insn
;
777 target_siginfo_t info
;
783 trapnr
= cpu_exec(cs
);
788 TaskState
*ts
= cs
->opaque
;
792 /* we handle the FPU emulation here, as Linux */
793 /* we get the opcode */
794 /* FIXME - what to do if get_user() fails? */
795 get_user_code_u32(opcode
, env
->regs
[15], env
);
797 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
798 if (rc
== 0) { /* illegal instruction */
799 info
.si_signo
= TARGET_SIGILL
;
801 info
.si_code
= TARGET_ILL_ILLOPN
;
802 info
._sifields
._sigfault
._addr
= env
->regs
[15];
803 queue_signal(env
, info
.si_signo
, &info
);
804 } else if (rc
< 0) { /* FP exception */
807 /* translate softfloat flags to FPSR flags */
808 if (-rc
& float_flag_invalid
)
810 if (-rc
& float_flag_divbyzero
)
812 if (-rc
& float_flag_overflow
)
814 if (-rc
& float_flag_underflow
)
816 if (-rc
& float_flag_inexact
)
819 FPSR fpsr
= ts
->fpa
.fpsr
;
820 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
822 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
823 info
.si_signo
= TARGET_SIGFPE
;
826 /* ordered by priority, least first */
827 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
828 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
829 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
830 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
831 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
833 info
._sifields
._sigfault
._addr
= env
->regs
[15];
834 queue_signal(env
, info
.si_signo
, &info
);
839 /* accumulate unenabled exceptions */
840 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
842 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
844 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
846 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
848 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
851 } else { /* everything OK */
862 if (trapnr
== EXCP_BKPT
) {
864 /* FIXME - what to do if get_user() fails? */
865 get_user_code_u16(insn
, env
->regs
[15], env
);
869 /* FIXME - what to do if get_user() fails? */
870 get_user_code_u32(insn
, env
->regs
[15], env
);
871 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
876 /* FIXME - what to do if get_user() fails? */
877 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
880 /* FIXME - what to do if get_user() fails? */
881 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
886 if (n
== ARM_NR_cacheflush
) {
888 } else if (n
== ARM_NR_semihosting
889 || n
== ARM_NR_thumb_semihosting
) {
890 env
->regs
[0] = do_arm_semihosting (env
);
891 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
893 if (env
->thumb
|| n
== 0) {
896 n
-= ARM_SYSCALL_BASE
;
899 if ( n
> ARM_NR_BASE
) {
901 case ARM_NR_cacheflush
:
905 cpu_set_tls(env
, env
->regs
[0]);
908 case ARM_NR_breakpoint
:
909 env
->regs
[15] -= env
->thumb
? 2 : 4;
912 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
914 env
->regs
[0] = -TARGET_ENOSYS
;
918 ret
= do_syscall(env
,
927 if (ret
== -TARGET_ERESTARTSYS
) {
928 env
->regs
[15] -= env
->thumb
? 2 : 4;
929 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
939 /* just indicate that signals should be handled asap */
942 if (!do_strex(env
)) {
945 /* fall through for segv */
946 case EXCP_PREFETCH_ABORT
:
947 case EXCP_DATA_ABORT
:
948 addr
= env
->exception
.vaddress
;
950 info
.si_signo
= TARGET_SIGSEGV
;
952 /* XXX: check env->error_code */
953 info
.si_code
= TARGET_SEGV_MAPERR
;
954 info
._sifields
._sigfault
._addr
= addr
;
955 queue_signal(env
, info
.si_signo
, &info
);
963 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
968 info
.si_code
= TARGET_TRAP_BRKPT
;
969 queue_signal(env
, info
.si_signo
, &info
);
973 case EXCP_KERNEL_TRAP
:
974 if (do_kernel_trap(env
))
978 /* nothing to do here for user-mode, just resume guest code */
982 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
985 process_pending_signals(env
);
992 * Handle AArch64 store-release exclusive
994 * rs = gets the status result of store exclusive
995 * rt = is the register that is stored
996 * rt2 = is the second register store (in STP)
999 static int do_strex_a64(CPUARMState
*env
)
1010 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
1011 size
= extract32(env
->exclusive_info
, 0, 2);
1012 is_pair
= extract32(env
->exclusive_info
, 2, 1);
1013 rs
= extract32(env
->exclusive_info
, 4, 5);
1014 rt
= extract32(env
->exclusive_info
, 9, 5);
1015 rt2
= extract32(env
->exclusive_info
, 14, 5);
1017 addr
= env
->exclusive_addr
;
1019 if (addr
!= env
->exclusive_test
) {
1025 segv
= get_user_u8(val
, addr
);
1028 segv
= get_user_u16(val
, addr
);
1031 segv
= get_user_u32(val
, addr
);
1034 segv
= get_user_u64(val
, addr
);
1040 env
->exception
.vaddress
= addr
;
1043 if (val
!= env
->exclusive_val
) {
1048 segv
= get_user_u32(val
, addr
+ 4);
1050 segv
= get_user_u64(val
, addr
+ 8);
1053 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1056 if (val
!= env
->exclusive_high
) {
1060 /* handle the zero register */
1061 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
1064 segv
= put_user_u8(val
, addr
);
1067 segv
= put_user_u16(val
, addr
);
1070 segv
= put_user_u32(val
, addr
);
1073 segv
= put_user_u64(val
, addr
);
1080 /* handle the zero register */
1081 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1083 segv
= put_user_u32(val
, addr
+ 4);
1085 segv
= put_user_u64(val
, addr
+ 8);
1088 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1095 /* rs == 31 encodes a write to the ZR, thus throwing away
1096 * the status return. This is rather silly but valid.
1099 env
->xregs
[rs
] = rc
;
1102 /* instruction faulted, PC does not advance */
1103 /* either way a strex releases any exclusive lock we have */
1104 env
->exclusive_addr
= -1;
1109 /* AArch64 main loop */
1110 void cpu_loop(CPUARMState
*env
)
1112 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1115 target_siginfo_t info
;
1119 trapnr
= cpu_exec(cs
);
1124 ret
= do_syscall(env
,
1133 if (ret
== -TARGET_ERESTARTSYS
) {
1135 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1136 env
->xregs
[0] = ret
;
1139 case EXCP_INTERRUPT
:
1140 /* just indicate that signals should be handled asap */
1143 info
.si_signo
= TARGET_SIGILL
;
1145 info
.si_code
= TARGET_ILL_ILLOPN
;
1146 info
._sifields
._sigfault
._addr
= env
->pc
;
1147 queue_signal(env
, info
.si_signo
, &info
);
1150 if (!do_strex_a64(env
)) {
1153 /* fall through for segv */
1154 case EXCP_PREFETCH_ABORT
:
1155 case EXCP_DATA_ABORT
:
1156 info
.si_signo
= TARGET_SIGSEGV
;
1158 /* XXX: check env->error_code */
1159 info
.si_code
= TARGET_SEGV_MAPERR
;
1160 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1161 queue_signal(env
, info
.si_signo
, &info
);
1165 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1167 info
.si_signo
= sig
;
1169 info
.si_code
= TARGET_TRAP_BRKPT
;
1170 queue_signal(env
, info
.si_signo
, &info
);
1174 env
->xregs
[0] = do_arm_semihosting(env
);
1177 /* nothing to do here for user-mode, just resume guest code */
1180 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1183 process_pending_signals(env
);
1184 /* Exception return on AArch64 always clears the exclusive monitor,
1185 * so any return to running guest code implies this.
1186 * A strex (successful or otherwise) also clears the monitor, so
1187 * we don't need to specialcase EXCP_STREX.
1189 env
->exclusive_addr
= -1;
1192 #endif /* ndef TARGET_ABI32 */
1196 #ifdef TARGET_UNICORE32
1198 void cpu_loop(CPUUniCore32State
*env
)
1200 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1202 unsigned int n
, insn
;
1203 target_siginfo_t info
;
1207 trapnr
= cpu_exec(cs
);
1210 case UC32_EXCP_PRIV
:
1213 get_user_u32(insn
, env
->regs
[31] - 4);
1214 n
= insn
& 0xffffff;
1216 if (n
>= UC32_SYSCALL_BASE
) {
1218 n
-= UC32_SYSCALL_BASE
;
1219 if (n
== UC32_SYSCALL_NR_set_tls
) {
1220 cpu_set_tls(env
, env
->regs
[0]);
1223 abi_long ret
= do_syscall(env
,
1232 if (ret
== -TARGET_ERESTARTSYS
) {
1234 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1243 case UC32_EXCP_DTRAP
:
1244 case UC32_EXCP_ITRAP
:
1245 info
.si_signo
= TARGET_SIGSEGV
;
1247 /* XXX: check env->error_code */
1248 info
.si_code
= TARGET_SEGV_MAPERR
;
1249 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1250 queue_signal(env
, info
.si_signo
, &info
);
1252 case EXCP_INTERRUPT
:
1253 /* just indicate that signals should be handled asap */
1259 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1261 info
.si_signo
= sig
;
1263 info
.si_code
= TARGET_TRAP_BRKPT
;
1264 queue_signal(env
, info
.si_signo
, &info
);
1271 process_pending_signals(env
);
1275 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1281 #define SPARC64_STACK_BIAS 2047
1285 /* WARNING: dealing with register windows _is_ complicated. More info
1286 can be found at http://www.sics.se/~psm/sparcstack.html */
1287 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1289 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1290 /* wrap handling : if cwp is on the last window, then we use the
1291 registers 'after' the end */
1292 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1293 index
+= 16 * env
->nwindows
;
1297 /* save the register window 'cwp1' */
1298 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1303 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1304 #ifdef TARGET_SPARC64
1306 sp_ptr
+= SPARC64_STACK_BIAS
;
1308 #if defined(DEBUG_WIN)
1309 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1312 for(i
= 0; i
< 16; i
++) {
1313 /* FIXME - what to do if put_user() fails? */
1314 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1315 sp_ptr
+= sizeof(abi_ulong
);
1319 static void save_window(CPUSPARCState
*env
)
1321 #ifndef TARGET_SPARC64
1322 unsigned int new_wim
;
1323 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1324 ((1LL << env
->nwindows
) - 1);
1325 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1328 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1334 static void restore_window(CPUSPARCState
*env
)
1336 #ifndef TARGET_SPARC64
1337 unsigned int new_wim
;
1339 unsigned int i
, cwp1
;
1342 #ifndef TARGET_SPARC64
1343 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1344 ((1LL << env
->nwindows
) - 1);
1347 /* restore the invalid window */
1348 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1349 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1350 #ifdef TARGET_SPARC64
1352 sp_ptr
+= SPARC64_STACK_BIAS
;
1354 #if defined(DEBUG_WIN)
1355 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1358 for(i
= 0; i
< 16; i
++) {
1359 /* FIXME - what to do if get_user() fails? */
1360 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1361 sp_ptr
+= sizeof(abi_ulong
);
1363 #ifdef TARGET_SPARC64
1365 if (env
->cleanwin
< env
->nwindows
- 1)
1373 static void flush_windows(CPUSPARCState
*env
)
1379 /* if restore would invoke restore_window(), then we can stop */
1380 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1381 #ifndef TARGET_SPARC64
1382 if (env
->wim
& (1 << cwp1
))
1385 if (env
->canrestore
== 0)
1390 save_window_offset(env
, cwp1
);
1393 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1394 #ifndef TARGET_SPARC64
1395 /* set wim so that restore will reload the registers */
1396 env
->wim
= 1 << cwp1
;
1398 #if defined(DEBUG_WIN)
1399 printf("flush_windows: nb=%d\n", offset
- 1);
1403 void cpu_loop (CPUSPARCState
*env
)
1405 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1408 target_siginfo_t info
;
1412 trapnr
= cpu_exec(cs
);
1415 /* Compute PSR before exposing state. */
1416 if (env
->cc_op
!= CC_OP_FLAGS
) {
1421 #ifndef TARGET_SPARC64
1428 ret
= do_syscall (env
, env
->gregs
[1],
1429 env
->regwptr
[0], env
->regwptr
[1],
1430 env
->regwptr
[2], env
->regwptr
[3],
1431 env
->regwptr
[4], env
->regwptr
[5],
1433 if (ret
== -TARGET_ERESTARTSYS
|| ret
== -TARGET_QEMU_ESIGRETURN
) {
1436 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1437 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1438 env
->xcc
|= PSR_CARRY
;
1440 env
->psr
|= PSR_CARRY
;
1444 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1445 env
->xcc
&= ~PSR_CARRY
;
1447 env
->psr
&= ~PSR_CARRY
;
1450 env
->regwptr
[0] = ret
;
1451 /* next instruction */
1453 env
->npc
= env
->npc
+ 4;
1455 case 0x83: /* flush windows */
1460 /* next instruction */
1462 env
->npc
= env
->npc
+ 4;
1464 #ifndef TARGET_SPARC64
1465 case TT_WIN_OVF
: /* window overflow */
1468 case TT_WIN_UNF
: /* window underflow */
1469 restore_window(env
);
1474 info
.si_signo
= TARGET_SIGSEGV
;
1476 /* XXX: check env->error_code */
1477 info
.si_code
= TARGET_SEGV_MAPERR
;
1478 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1479 queue_signal(env
, info
.si_signo
, &info
);
1483 case TT_SPILL
: /* window overflow */
1486 case TT_FILL
: /* window underflow */
1487 restore_window(env
);
1492 info
.si_signo
= TARGET_SIGSEGV
;
1494 /* XXX: check env->error_code */
1495 info
.si_code
= TARGET_SEGV_MAPERR
;
1496 if (trapnr
== TT_DFAULT
)
1497 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1499 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1500 queue_signal(env
, info
.si_signo
, &info
);
1503 #ifndef TARGET_ABI32
1506 sparc64_get_context(env
);
1510 sparc64_set_context(env
);
1514 case EXCP_INTERRUPT
:
1515 /* just indicate that signals should be handled asap */
1519 info
.si_signo
= TARGET_SIGILL
;
1521 info
.si_code
= TARGET_ILL_ILLOPC
;
1522 info
._sifields
._sigfault
._addr
= env
->pc
;
1523 queue_signal(env
, info
.si_signo
, &info
);
1530 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1533 info
.si_signo
= sig
;
1535 info
.si_code
= TARGET_TRAP_BRKPT
;
1536 queue_signal(env
, info
.si_signo
, &info
);
1541 printf ("Unhandled trap: 0x%x\n", trapnr
);
1542 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1545 process_pending_signals (env
);
1552 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1554 return cpu_get_host_ticks();
1557 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1559 return cpu_ppc_get_tb(env
);
1562 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1564 return cpu_ppc_get_tb(env
) >> 32;
1567 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1569 return cpu_ppc_get_tb(env
);
1572 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1574 return cpu_ppc_get_tb(env
) >> 32;
1577 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1578 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1580 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1582 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1585 /* XXX: to be fixed */
1586 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1591 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1596 static int do_store_exclusive(CPUPPCState
*env
)
1599 target_ulong page_addr
;
1600 target_ulong val
, val2
__attribute__((unused
)) = 0;
1604 addr
= env
->reserve_ea
;
1605 page_addr
= addr
& TARGET_PAGE_MASK
;
1608 flags
= page_get_flags(page_addr
);
1609 if ((flags
& PAGE_READ
) == 0) {
1612 int reg
= env
->reserve_info
& 0x1f;
1613 int size
= env
->reserve_info
>> 5;
1616 if (addr
== env
->reserve_addr
) {
1618 case 1: segv
= get_user_u8(val
, addr
); break;
1619 case 2: segv
= get_user_u16(val
, addr
); break;
1620 case 4: segv
= get_user_u32(val
, addr
); break;
1621 #if defined(TARGET_PPC64)
1622 case 8: segv
= get_user_u64(val
, addr
); break;
1624 segv
= get_user_u64(val
, addr
);
1626 segv
= get_user_u64(val2
, addr
+ 8);
1633 if (!segv
&& val
== env
->reserve_val
) {
1634 val
= env
->gpr
[reg
];
1636 case 1: segv
= put_user_u8(val
, addr
); break;
1637 case 2: segv
= put_user_u16(val
, addr
); break;
1638 case 4: segv
= put_user_u32(val
, addr
); break;
1639 #if defined(TARGET_PPC64)
1640 case 8: segv
= put_user_u64(val
, addr
); break;
1642 if (val2
== env
->reserve_val2
) {
1645 val
= env
->gpr
[reg
+1];
1647 val2
= env
->gpr
[reg
+1];
1649 segv
= put_user_u64(val
, addr
);
1651 segv
= put_user_u64(val2
, addr
+ 8);
1664 env
->crf
[0] = (stored
<< 1) | xer_so
;
1665 env
->reserve_addr
= (target_ulong
)-1;
1675 void cpu_loop(CPUPPCState
*env
)
1677 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1678 target_siginfo_t info
;
1684 trapnr
= cpu_exec(cs
);
1687 case POWERPC_EXCP_NONE
:
1690 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1691 cpu_abort(cs
, "Critical interrupt while in user mode. "
1694 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1695 cpu_abort(cs
, "Machine check exception while in user mode. "
1698 case POWERPC_EXCP_DSI
: /* Data storage exception */
1699 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1701 /* XXX: check this. Seems bugged */
1702 switch (env
->error_code
& 0xFF000000) {
1704 info
.si_signo
= TARGET_SIGSEGV
;
1706 info
.si_code
= TARGET_SEGV_MAPERR
;
1709 info
.si_signo
= TARGET_SIGILL
;
1711 info
.si_code
= TARGET_ILL_ILLADR
;
1714 info
.si_signo
= TARGET_SIGSEGV
;
1716 info
.si_code
= TARGET_SEGV_ACCERR
;
1719 /* Let's send a regular segfault... */
1720 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1722 info
.si_signo
= TARGET_SIGSEGV
;
1724 info
.si_code
= TARGET_SEGV_MAPERR
;
1727 info
._sifields
._sigfault
._addr
= env
->nip
;
1728 queue_signal(env
, info
.si_signo
, &info
);
1730 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1731 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1732 "\n", env
->spr
[SPR_SRR0
]);
1733 /* XXX: check this */
1734 switch (env
->error_code
& 0xFF000000) {
1736 info
.si_signo
= TARGET_SIGSEGV
;
1738 info
.si_code
= TARGET_SEGV_MAPERR
;
1742 info
.si_signo
= TARGET_SIGSEGV
;
1744 info
.si_code
= TARGET_SEGV_ACCERR
;
1747 /* Let's send a regular segfault... */
1748 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1750 info
.si_signo
= TARGET_SIGSEGV
;
1752 info
.si_code
= TARGET_SEGV_MAPERR
;
1755 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1756 queue_signal(env
, info
.si_signo
, &info
);
1758 case POWERPC_EXCP_EXTERNAL
: /* External input */
1759 cpu_abort(cs
, "External interrupt while in user mode. "
1762 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1763 EXCP_DUMP(env
, "Unaligned memory access\n");
1764 /* XXX: check this */
1765 info
.si_signo
= TARGET_SIGBUS
;
1767 info
.si_code
= TARGET_BUS_ADRALN
;
1768 info
._sifields
._sigfault
._addr
= env
->nip
;
1769 queue_signal(env
, info
.si_signo
, &info
);
1771 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1772 case POWERPC_EXCP_HV_EMU
: /* HV emulation */
1773 /* XXX: check this */
1774 switch (env
->error_code
& ~0xF) {
1775 case POWERPC_EXCP_FP
:
1776 EXCP_DUMP(env
, "Floating point program exception\n");
1777 info
.si_signo
= TARGET_SIGFPE
;
1779 switch (env
->error_code
& 0xF) {
1780 case POWERPC_EXCP_FP_OX
:
1781 info
.si_code
= TARGET_FPE_FLTOVF
;
1783 case POWERPC_EXCP_FP_UX
:
1784 info
.si_code
= TARGET_FPE_FLTUND
;
1786 case POWERPC_EXCP_FP_ZX
:
1787 case POWERPC_EXCP_FP_VXZDZ
:
1788 info
.si_code
= TARGET_FPE_FLTDIV
;
1790 case POWERPC_EXCP_FP_XX
:
1791 info
.si_code
= TARGET_FPE_FLTRES
;
1793 case POWERPC_EXCP_FP_VXSOFT
:
1794 info
.si_code
= TARGET_FPE_FLTINV
;
1796 case POWERPC_EXCP_FP_VXSNAN
:
1797 case POWERPC_EXCP_FP_VXISI
:
1798 case POWERPC_EXCP_FP_VXIDI
:
1799 case POWERPC_EXCP_FP_VXIMZ
:
1800 case POWERPC_EXCP_FP_VXVC
:
1801 case POWERPC_EXCP_FP_VXSQRT
:
1802 case POWERPC_EXCP_FP_VXCVI
:
1803 info
.si_code
= TARGET_FPE_FLTSUB
;
1806 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1811 case POWERPC_EXCP_INVAL
:
1812 EXCP_DUMP(env
, "Invalid instruction\n");
1813 info
.si_signo
= TARGET_SIGILL
;
1815 switch (env
->error_code
& 0xF) {
1816 case POWERPC_EXCP_INVAL_INVAL
:
1817 info
.si_code
= TARGET_ILL_ILLOPC
;
1819 case POWERPC_EXCP_INVAL_LSWX
:
1820 info
.si_code
= TARGET_ILL_ILLOPN
;
1822 case POWERPC_EXCP_INVAL_SPR
:
1823 info
.si_code
= TARGET_ILL_PRVREG
;
1825 case POWERPC_EXCP_INVAL_FP
:
1826 info
.si_code
= TARGET_ILL_COPROC
;
1829 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1830 env
->error_code
& 0xF);
1831 info
.si_code
= TARGET_ILL_ILLADR
;
1835 case POWERPC_EXCP_PRIV
:
1836 EXCP_DUMP(env
, "Privilege violation\n");
1837 info
.si_signo
= TARGET_SIGILL
;
1839 switch (env
->error_code
& 0xF) {
1840 case POWERPC_EXCP_PRIV_OPC
:
1841 info
.si_code
= TARGET_ILL_PRVOPC
;
1843 case POWERPC_EXCP_PRIV_REG
:
1844 info
.si_code
= TARGET_ILL_PRVREG
;
1847 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1848 env
->error_code
& 0xF);
1849 info
.si_code
= TARGET_ILL_PRVOPC
;
1853 case POWERPC_EXCP_TRAP
:
1854 cpu_abort(cs
, "Tried to call a TRAP\n");
1857 /* Should not happen ! */
1858 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1862 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1863 queue_signal(env
, info
.si_signo
, &info
);
1865 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1866 EXCP_DUMP(env
, "No floating point allowed\n");
1867 info
.si_signo
= TARGET_SIGILL
;
1869 info
.si_code
= TARGET_ILL_COPROC
;
1870 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1871 queue_signal(env
, info
.si_signo
, &info
);
1873 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1874 cpu_abort(cs
, "Syscall exception while in user mode. "
1877 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1878 EXCP_DUMP(env
, "No APU instruction allowed\n");
1879 info
.si_signo
= TARGET_SIGILL
;
1881 info
.si_code
= TARGET_ILL_COPROC
;
1882 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1883 queue_signal(env
, info
.si_signo
, &info
);
1885 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1886 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1889 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1890 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1893 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1894 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1897 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1898 cpu_abort(cs
, "Data TLB exception while in user mode. "
1901 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1902 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1905 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1906 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1907 info
.si_signo
= TARGET_SIGILL
;
1909 info
.si_code
= TARGET_ILL_COPROC
;
1910 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1911 queue_signal(env
, info
.si_signo
, &info
);
1913 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1914 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1916 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1917 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1919 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1920 cpu_abort(cs
, "Performance monitor exception not handled\n");
1922 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1923 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1926 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1927 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1930 case POWERPC_EXCP_RESET
: /* System reset exception */
1931 cpu_abort(cs
, "Reset interrupt while in user mode. "
1934 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1935 cpu_abort(cs
, "Data segment exception while in user mode. "
1938 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1939 cpu_abort(cs
, "Instruction segment exception "
1940 "while in user mode. Aborting\n");
1942 /* PowerPC 64 with hypervisor mode support */
1943 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1944 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1945 "while in user mode. Aborting\n");
1947 case POWERPC_EXCP_TRACE
: /* Trace exception */
1949 * we use this exception to emulate step-by-step execution mode.
1952 /* PowerPC 64 with hypervisor mode support */
1953 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1954 cpu_abort(cs
, "Hypervisor data storage exception "
1955 "while in user mode. Aborting\n");
1957 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1958 cpu_abort(cs
, "Hypervisor instruction storage exception "
1959 "while in user mode. Aborting\n");
1961 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1962 cpu_abort(cs
, "Hypervisor data segment exception "
1963 "while in user mode. Aborting\n");
1965 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1966 cpu_abort(cs
, "Hypervisor instruction segment exception "
1967 "while in user mode. Aborting\n");
1969 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1970 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1971 info
.si_signo
= TARGET_SIGILL
;
1973 info
.si_code
= TARGET_ILL_COPROC
;
1974 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1975 queue_signal(env
, info
.si_signo
, &info
);
1977 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1978 cpu_abort(cs
, "Programmable interval timer interrupt "
1979 "while in user mode. Aborting\n");
1981 case POWERPC_EXCP_IO
: /* IO error exception */
1982 cpu_abort(cs
, "IO error exception while in user mode. "
1985 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1986 cpu_abort(cs
, "Run mode exception while in user mode. "
1989 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1990 cpu_abort(cs
, "Emulation trap exception not handled\n");
1992 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1993 cpu_abort(cs
, "Instruction fetch TLB exception "
1994 "while in user-mode. Aborting");
1996 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1997 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
2000 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
2001 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
2004 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
2005 cpu_abort(cs
, "Floating-point assist exception not handled\n");
2007 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
2008 cpu_abort(cs
, "Instruction address breakpoint exception "
2011 case POWERPC_EXCP_SMI
: /* System management interrupt */
2012 cpu_abort(cs
, "System management interrupt while in user mode. "
2015 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
2016 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
2019 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
2020 cpu_abort(cs
, "Performance monitor exception not handled\n");
2022 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
2023 cpu_abort(cs
, "Vector assist exception not handled\n");
2025 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
2026 cpu_abort(cs
, "Soft patch exception not handled\n");
2028 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
2029 cpu_abort(cs
, "Maintenance exception while in user mode. "
2032 case POWERPC_EXCP_STOP
: /* stop translation */
2033 /* We did invalidate the instruction cache. Go on */
2035 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
2036 /* We just stopped because of a branch. Go on */
2038 case POWERPC_EXCP_SYSCALL_USER
:
2039 /* system call in user-mode emulation */
2041 * PPC ABI uses overflow flag in cr0 to signal an error
2044 env
->crf
[0] &= ~0x1;
2045 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
2046 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
2048 if (ret
== -TARGET_ERESTARTSYS
) {
2052 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
2053 /* Returning from a successful sigreturn syscall.
2054 Avoid corrupting register state. */
2057 if (ret
> (target_ulong
)(-515)) {
2063 case POWERPC_EXCP_STCX
:
2064 if (do_store_exclusive(env
)) {
2065 info
.si_signo
= TARGET_SIGSEGV
;
2067 info
.si_code
= TARGET_SEGV_MAPERR
;
2068 info
._sifields
._sigfault
._addr
= env
->nip
;
2069 queue_signal(env
, info
.si_signo
, &info
);
2076 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2078 info
.si_signo
= sig
;
2080 info
.si_code
= TARGET_TRAP_BRKPT
;
2081 queue_signal(env
, info
.si_signo
, &info
);
2085 case EXCP_INTERRUPT
:
2086 /* just indicate that signals should be handled asap */
2089 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2092 process_pending_signals(env
);
2099 # ifdef TARGET_ABI_MIPSO32
2100 # define MIPS_SYS(name, args) args,
2101 static const uint8_t mips_syscall_args
[] = {
2102 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2103 MIPS_SYS(sys_exit
, 1)
2104 MIPS_SYS(sys_fork
, 0)
2105 MIPS_SYS(sys_read
, 3)
2106 MIPS_SYS(sys_write
, 3)
2107 MIPS_SYS(sys_open
, 3) /* 4005 */
2108 MIPS_SYS(sys_close
, 1)
2109 MIPS_SYS(sys_waitpid
, 3)
2110 MIPS_SYS(sys_creat
, 2)
2111 MIPS_SYS(sys_link
, 2)
2112 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2113 MIPS_SYS(sys_execve
, 0)
2114 MIPS_SYS(sys_chdir
, 1)
2115 MIPS_SYS(sys_time
, 1)
2116 MIPS_SYS(sys_mknod
, 3)
2117 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2118 MIPS_SYS(sys_lchown
, 3)
2119 MIPS_SYS(sys_ni_syscall
, 0)
2120 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2121 MIPS_SYS(sys_lseek
, 3)
2122 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2123 MIPS_SYS(sys_mount
, 5)
2124 MIPS_SYS(sys_umount
, 1)
2125 MIPS_SYS(sys_setuid
, 1)
2126 MIPS_SYS(sys_getuid
, 0)
2127 MIPS_SYS(sys_stime
, 1) /* 4025 */
2128 MIPS_SYS(sys_ptrace
, 4)
2129 MIPS_SYS(sys_alarm
, 1)
2130 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2131 MIPS_SYS(sys_pause
, 0)
2132 MIPS_SYS(sys_utime
, 2) /* 4030 */
2133 MIPS_SYS(sys_ni_syscall
, 0)
2134 MIPS_SYS(sys_ni_syscall
, 0)
2135 MIPS_SYS(sys_access
, 2)
2136 MIPS_SYS(sys_nice
, 1)
2137 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2138 MIPS_SYS(sys_sync
, 0)
2139 MIPS_SYS(sys_kill
, 2)
2140 MIPS_SYS(sys_rename
, 2)
2141 MIPS_SYS(sys_mkdir
, 2)
2142 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2143 MIPS_SYS(sys_dup
, 1)
2144 MIPS_SYS(sys_pipe
, 0)
2145 MIPS_SYS(sys_times
, 1)
2146 MIPS_SYS(sys_ni_syscall
, 0)
2147 MIPS_SYS(sys_brk
, 1) /* 4045 */
2148 MIPS_SYS(sys_setgid
, 1)
2149 MIPS_SYS(sys_getgid
, 0)
2150 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2151 MIPS_SYS(sys_geteuid
, 0)
2152 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2153 MIPS_SYS(sys_acct
, 0)
2154 MIPS_SYS(sys_umount2
, 2)
2155 MIPS_SYS(sys_ni_syscall
, 0)
2156 MIPS_SYS(sys_ioctl
, 3)
2157 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2158 MIPS_SYS(sys_ni_syscall
, 2)
2159 MIPS_SYS(sys_setpgid
, 2)
2160 MIPS_SYS(sys_ni_syscall
, 0)
2161 MIPS_SYS(sys_olduname
, 1)
2162 MIPS_SYS(sys_umask
, 1) /* 4060 */
2163 MIPS_SYS(sys_chroot
, 1)
2164 MIPS_SYS(sys_ustat
, 2)
2165 MIPS_SYS(sys_dup2
, 2)
2166 MIPS_SYS(sys_getppid
, 0)
2167 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2168 MIPS_SYS(sys_setsid
, 0)
2169 MIPS_SYS(sys_sigaction
, 3)
2170 MIPS_SYS(sys_sgetmask
, 0)
2171 MIPS_SYS(sys_ssetmask
, 1)
2172 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2173 MIPS_SYS(sys_setregid
, 2)
2174 MIPS_SYS(sys_sigsuspend
, 0)
2175 MIPS_SYS(sys_sigpending
, 1)
2176 MIPS_SYS(sys_sethostname
, 2)
2177 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2178 MIPS_SYS(sys_getrlimit
, 2)
2179 MIPS_SYS(sys_getrusage
, 2)
2180 MIPS_SYS(sys_gettimeofday
, 2)
2181 MIPS_SYS(sys_settimeofday
, 2)
2182 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2183 MIPS_SYS(sys_setgroups
, 2)
2184 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2185 MIPS_SYS(sys_symlink
, 2)
2186 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2187 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2188 MIPS_SYS(sys_uselib
, 1)
2189 MIPS_SYS(sys_swapon
, 2)
2190 MIPS_SYS(sys_reboot
, 3)
2191 MIPS_SYS(old_readdir
, 3)
2192 MIPS_SYS(old_mmap
, 6) /* 4090 */
2193 MIPS_SYS(sys_munmap
, 2)
2194 MIPS_SYS(sys_truncate
, 2)
2195 MIPS_SYS(sys_ftruncate
, 2)
2196 MIPS_SYS(sys_fchmod
, 2)
2197 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2198 MIPS_SYS(sys_getpriority
, 2)
2199 MIPS_SYS(sys_setpriority
, 3)
2200 MIPS_SYS(sys_ni_syscall
, 0)
2201 MIPS_SYS(sys_statfs
, 2)
2202 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2203 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2204 MIPS_SYS(sys_socketcall
, 2)
2205 MIPS_SYS(sys_syslog
, 3)
2206 MIPS_SYS(sys_setitimer
, 3)
2207 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2208 MIPS_SYS(sys_newstat
, 2)
2209 MIPS_SYS(sys_newlstat
, 2)
2210 MIPS_SYS(sys_newfstat
, 2)
2211 MIPS_SYS(sys_uname
, 1)
2212 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2213 MIPS_SYS(sys_vhangup
, 0)
2214 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2215 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2216 MIPS_SYS(sys_wait4
, 4)
2217 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2218 MIPS_SYS(sys_sysinfo
, 1)
2219 MIPS_SYS(sys_ipc
, 6)
2220 MIPS_SYS(sys_fsync
, 1)
2221 MIPS_SYS(sys_sigreturn
, 0)
2222 MIPS_SYS(sys_clone
, 6) /* 4120 */
2223 MIPS_SYS(sys_setdomainname
, 2)
2224 MIPS_SYS(sys_newuname
, 1)
2225 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2226 MIPS_SYS(sys_adjtimex
, 1)
2227 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2228 MIPS_SYS(sys_sigprocmask
, 3)
2229 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2230 MIPS_SYS(sys_init_module
, 5)
2231 MIPS_SYS(sys_delete_module
, 1)
2232 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2233 MIPS_SYS(sys_quotactl
, 0)
2234 MIPS_SYS(sys_getpgid
, 1)
2235 MIPS_SYS(sys_fchdir
, 1)
2236 MIPS_SYS(sys_bdflush
, 2)
2237 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2238 MIPS_SYS(sys_personality
, 1)
2239 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2240 MIPS_SYS(sys_setfsuid
, 1)
2241 MIPS_SYS(sys_setfsgid
, 1)
2242 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2243 MIPS_SYS(sys_getdents
, 3)
2244 MIPS_SYS(sys_select
, 5)
2245 MIPS_SYS(sys_flock
, 2)
2246 MIPS_SYS(sys_msync
, 3)
2247 MIPS_SYS(sys_readv
, 3) /* 4145 */
2248 MIPS_SYS(sys_writev
, 3)
2249 MIPS_SYS(sys_cacheflush
, 3)
2250 MIPS_SYS(sys_cachectl
, 3)
2251 MIPS_SYS(sys_sysmips
, 4)
2252 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2253 MIPS_SYS(sys_getsid
, 1)
2254 MIPS_SYS(sys_fdatasync
, 0)
2255 MIPS_SYS(sys_sysctl
, 1)
2256 MIPS_SYS(sys_mlock
, 2)
2257 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2258 MIPS_SYS(sys_mlockall
, 1)
2259 MIPS_SYS(sys_munlockall
, 0)
2260 MIPS_SYS(sys_sched_setparam
, 2)
2261 MIPS_SYS(sys_sched_getparam
, 2)
2262 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2263 MIPS_SYS(sys_sched_getscheduler
, 1)
2264 MIPS_SYS(sys_sched_yield
, 0)
2265 MIPS_SYS(sys_sched_get_priority_max
, 1)
2266 MIPS_SYS(sys_sched_get_priority_min
, 1)
2267 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2268 MIPS_SYS(sys_nanosleep
, 2)
2269 MIPS_SYS(sys_mremap
, 5)
2270 MIPS_SYS(sys_accept
, 3)
2271 MIPS_SYS(sys_bind
, 3)
2272 MIPS_SYS(sys_connect
, 3) /* 4170 */
2273 MIPS_SYS(sys_getpeername
, 3)
2274 MIPS_SYS(sys_getsockname
, 3)
2275 MIPS_SYS(sys_getsockopt
, 5)
2276 MIPS_SYS(sys_listen
, 2)
2277 MIPS_SYS(sys_recv
, 4) /* 4175 */
2278 MIPS_SYS(sys_recvfrom
, 6)
2279 MIPS_SYS(sys_recvmsg
, 3)
2280 MIPS_SYS(sys_send
, 4)
2281 MIPS_SYS(sys_sendmsg
, 3)
2282 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2283 MIPS_SYS(sys_setsockopt
, 5)
2284 MIPS_SYS(sys_shutdown
, 2)
2285 MIPS_SYS(sys_socket
, 3)
2286 MIPS_SYS(sys_socketpair
, 4)
2287 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2288 MIPS_SYS(sys_getresuid
, 3)
2289 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2290 MIPS_SYS(sys_poll
, 3)
2291 MIPS_SYS(sys_nfsservctl
, 3)
2292 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2293 MIPS_SYS(sys_getresgid
, 3)
2294 MIPS_SYS(sys_prctl
, 5)
2295 MIPS_SYS(sys_rt_sigreturn
, 0)
2296 MIPS_SYS(sys_rt_sigaction
, 4)
2297 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2298 MIPS_SYS(sys_rt_sigpending
, 2)
2299 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2300 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2301 MIPS_SYS(sys_rt_sigsuspend
, 0)
2302 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2303 MIPS_SYS(sys_pwrite64
, 6)
2304 MIPS_SYS(sys_chown
, 3)
2305 MIPS_SYS(sys_getcwd
, 2)
2306 MIPS_SYS(sys_capget
, 2)
2307 MIPS_SYS(sys_capset
, 2) /* 4205 */
2308 MIPS_SYS(sys_sigaltstack
, 2)
2309 MIPS_SYS(sys_sendfile
, 4)
2310 MIPS_SYS(sys_ni_syscall
, 0)
2311 MIPS_SYS(sys_ni_syscall
, 0)
2312 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2313 MIPS_SYS(sys_truncate64
, 4)
2314 MIPS_SYS(sys_ftruncate64
, 4)
2315 MIPS_SYS(sys_stat64
, 2)
2316 MIPS_SYS(sys_lstat64
, 2)
2317 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2318 MIPS_SYS(sys_pivot_root
, 2)
2319 MIPS_SYS(sys_mincore
, 3)
2320 MIPS_SYS(sys_madvise
, 3)
2321 MIPS_SYS(sys_getdents64
, 3)
2322 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2323 MIPS_SYS(sys_ni_syscall
, 0)
2324 MIPS_SYS(sys_gettid
, 0)
2325 MIPS_SYS(sys_readahead
, 5)
2326 MIPS_SYS(sys_setxattr
, 5)
2327 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2328 MIPS_SYS(sys_fsetxattr
, 5)
2329 MIPS_SYS(sys_getxattr
, 4)
2330 MIPS_SYS(sys_lgetxattr
, 4)
2331 MIPS_SYS(sys_fgetxattr
, 4)
2332 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2333 MIPS_SYS(sys_llistxattr
, 3)
2334 MIPS_SYS(sys_flistxattr
, 3)
2335 MIPS_SYS(sys_removexattr
, 2)
2336 MIPS_SYS(sys_lremovexattr
, 2)
2337 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2338 MIPS_SYS(sys_tkill
, 2)
2339 MIPS_SYS(sys_sendfile64
, 5)
2340 MIPS_SYS(sys_futex
, 6)
2341 MIPS_SYS(sys_sched_setaffinity
, 3)
2342 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2343 MIPS_SYS(sys_io_setup
, 2)
2344 MIPS_SYS(sys_io_destroy
, 1)
2345 MIPS_SYS(sys_io_getevents
, 5)
2346 MIPS_SYS(sys_io_submit
, 3)
2347 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2348 MIPS_SYS(sys_exit_group
, 1)
2349 MIPS_SYS(sys_lookup_dcookie
, 3)
2350 MIPS_SYS(sys_epoll_create
, 1)
2351 MIPS_SYS(sys_epoll_ctl
, 4)
2352 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2353 MIPS_SYS(sys_remap_file_pages
, 5)
2354 MIPS_SYS(sys_set_tid_address
, 1)
2355 MIPS_SYS(sys_restart_syscall
, 0)
2356 MIPS_SYS(sys_fadvise64_64
, 7)
2357 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2358 MIPS_SYS(sys_fstatfs64
, 2)
2359 MIPS_SYS(sys_timer_create
, 3)
2360 MIPS_SYS(sys_timer_settime
, 4)
2361 MIPS_SYS(sys_timer_gettime
, 2)
2362 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2363 MIPS_SYS(sys_timer_delete
, 1)
2364 MIPS_SYS(sys_clock_settime
, 2)
2365 MIPS_SYS(sys_clock_gettime
, 2)
2366 MIPS_SYS(sys_clock_getres
, 2)
2367 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2368 MIPS_SYS(sys_tgkill
, 3)
2369 MIPS_SYS(sys_utimes
, 2)
2370 MIPS_SYS(sys_mbind
, 4)
2371 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2372 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2373 MIPS_SYS(sys_mq_open
, 4)
2374 MIPS_SYS(sys_mq_unlink
, 1)
2375 MIPS_SYS(sys_mq_timedsend
, 5)
2376 MIPS_SYS(sys_mq_timedreceive
, 5)
2377 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2378 MIPS_SYS(sys_mq_getsetattr
, 3)
2379 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2380 MIPS_SYS(sys_waitid
, 4)
2381 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2382 MIPS_SYS(sys_add_key
, 5)
2383 MIPS_SYS(sys_request_key
, 4)
2384 MIPS_SYS(sys_keyctl
, 5)
2385 MIPS_SYS(sys_set_thread_area
, 1)
2386 MIPS_SYS(sys_inotify_init
, 0)
2387 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2388 MIPS_SYS(sys_inotify_rm_watch
, 2)
2389 MIPS_SYS(sys_migrate_pages
, 4)
2390 MIPS_SYS(sys_openat
, 4)
2391 MIPS_SYS(sys_mkdirat
, 3)
2392 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2393 MIPS_SYS(sys_fchownat
, 5)
2394 MIPS_SYS(sys_futimesat
, 3)
2395 MIPS_SYS(sys_fstatat64
, 4)
2396 MIPS_SYS(sys_unlinkat
, 3)
2397 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2398 MIPS_SYS(sys_linkat
, 5)
2399 MIPS_SYS(sys_symlinkat
, 3)
2400 MIPS_SYS(sys_readlinkat
, 4)
2401 MIPS_SYS(sys_fchmodat
, 3)
2402 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2403 MIPS_SYS(sys_pselect6
, 6)
2404 MIPS_SYS(sys_ppoll
, 5)
2405 MIPS_SYS(sys_unshare
, 1)
2406 MIPS_SYS(sys_splice
, 6)
2407 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2408 MIPS_SYS(sys_tee
, 4)
2409 MIPS_SYS(sys_vmsplice
, 4)
2410 MIPS_SYS(sys_move_pages
, 6)
2411 MIPS_SYS(sys_set_robust_list
, 2)
2412 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2413 MIPS_SYS(sys_kexec_load
, 4)
2414 MIPS_SYS(sys_getcpu
, 3)
2415 MIPS_SYS(sys_epoll_pwait
, 6)
2416 MIPS_SYS(sys_ioprio_set
, 3)
2417 MIPS_SYS(sys_ioprio_get
, 2)
2418 MIPS_SYS(sys_utimensat
, 4)
2419 MIPS_SYS(sys_signalfd
, 3)
2420 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2421 MIPS_SYS(sys_eventfd
, 1)
2422 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2423 MIPS_SYS(sys_timerfd_create
, 2)
2424 MIPS_SYS(sys_timerfd_gettime
, 2)
2425 MIPS_SYS(sys_timerfd_settime
, 4)
2426 MIPS_SYS(sys_signalfd4
, 4)
2427 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2428 MIPS_SYS(sys_epoll_create1
, 1)
2429 MIPS_SYS(sys_dup3
, 3)
2430 MIPS_SYS(sys_pipe2
, 2)
2431 MIPS_SYS(sys_inotify_init1
, 1)
2432 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2433 MIPS_SYS(sys_pwritev
, 6)
2434 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2435 MIPS_SYS(sys_perf_event_open
, 5)
2436 MIPS_SYS(sys_accept4
, 4)
2437 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2438 MIPS_SYS(sys_fanotify_init
, 2)
2439 MIPS_SYS(sys_fanotify_mark
, 6)
2440 MIPS_SYS(sys_prlimit64
, 4)
2441 MIPS_SYS(sys_name_to_handle_at
, 5)
2442 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2443 MIPS_SYS(sys_clock_adjtime
, 2)
2444 MIPS_SYS(sys_syncfs
, 1)
2449 static int do_store_exclusive(CPUMIPSState
*env
)
2452 target_ulong page_addr
;
2460 page_addr
= addr
& TARGET_PAGE_MASK
;
2463 flags
= page_get_flags(page_addr
);
2464 if ((flags
& PAGE_READ
) == 0) {
2467 reg
= env
->llreg
& 0x1f;
2468 d
= (env
->llreg
& 0x20) != 0;
2470 segv
= get_user_s64(val
, addr
);
2472 segv
= get_user_s32(val
, addr
);
2475 if (val
!= env
->llval
) {
2476 env
->active_tc
.gpr
[reg
] = 0;
2479 segv
= put_user_u64(env
->llnewval
, addr
);
2481 segv
= put_user_u32(env
->llnewval
, addr
);
2484 env
->active_tc
.gpr
[reg
] = 1;
2491 env
->active_tc
.PC
+= 4;
2504 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2512 info
->si_signo
= TARGET_SIGFPE
;
2514 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2515 queue_signal(env
, info
->si_signo
, &*info
);
2519 info
->si_signo
= TARGET_SIGTRAP
;
2521 queue_signal(env
, info
->si_signo
, &*info
);
2529 void cpu_loop(CPUMIPSState
*env
)
2531 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2532 target_siginfo_t info
;
2535 # ifdef TARGET_ABI_MIPSO32
2536 unsigned int syscall_num
;
2541 trapnr
= cpu_exec(cs
);
2545 env
->active_tc
.PC
+= 4;
2546 # ifdef TARGET_ABI_MIPSO32
2547 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2548 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2549 ret
= -TARGET_ENOSYS
;
2553 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2555 nb_args
= mips_syscall_args
[syscall_num
];
2556 sp_reg
= env
->active_tc
.gpr
[29];
2558 /* these arguments are taken from the stack */
2560 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2564 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2568 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2572 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2578 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2579 env
->active_tc
.gpr
[4],
2580 env
->active_tc
.gpr
[5],
2581 env
->active_tc
.gpr
[6],
2582 env
->active_tc
.gpr
[7],
2583 arg5
, arg6
, arg7
, arg8
);
2587 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2588 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2589 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2590 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2591 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2593 if (ret
== -TARGET_ERESTARTSYS
) {
2594 env
->active_tc
.PC
-= 4;
2597 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2598 /* Returning from a successful sigreturn syscall.
2599 Avoid clobbering register state. */
2602 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2603 env
->active_tc
.gpr
[7] = 1; /* error flag */
2606 env
->active_tc
.gpr
[7] = 0; /* error flag */
2608 env
->active_tc
.gpr
[2] = ret
;
2614 info
.si_signo
= TARGET_SIGSEGV
;
2616 /* XXX: check env->error_code */
2617 info
.si_code
= TARGET_SEGV_MAPERR
;
2618 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2619 queue_signal(env
, info
.si_signo
, &info
);
2623 info
.si_signo
= TARGET_SIGILL
;
2626 queue_signal(env
, info
.si_signo
, &info
);
2628 case EXCP_INTERRUPT
:
2629 /* just indicate that signals should be handled asap */
2635 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2638 info
.si_signo
= sig
;
2640 info
.si_code
= TARGET_TRAP_BRKPT
;
2641 queue_signal(env
, info
.si_signo
, &info
);
2646 if (do_store_exclusive(env
)) {
2647 info
.si_signo
= TARGET_SIGSEGV
;
2649 info
.si_code
= TARGET_SEGV_MAPERR
;
2650 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2651 queue_signal(env
, info
.si_signo
, &info
);
2655 info
.si_signo
= TARGET_SIGILL
;
2657 info
.si_code
= TARGET_ILL_ILLOPC
;
2658 queue_signal(env
, info
.si_signo
, &info
);
2660 /* The code below was inspired by the MIPS Linux kernel trap
2661 * handling code in arch/mips/kernel/traps.c.
2665 abi_ulong trap_instr
;
2668 if (env
->hflags
& MIPS_HFLAG_M16
) {
2669 if (env
->insn_flags
& ASE_MICROMIPS
) {
2670 /* microMIPS mode */
2671 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2676 if ((trap_instr
>> 10) == 0x11) {
2677 /* 16-bit instruction */
2678 code
= trap_instr
& 0xf;
2680 /* 32-bit instruction */
2683 ret
= get_user_u16(instr_lo
,
2684 env
->active_tc
.PC
+ 2);
2688 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2689 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2690 /* Unfortunately, microMIPS also suffers from
2691 the old assembler bug... */
2692 if (code
>= (1 << 10)) {
2698 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2702 code
= (trap_instr
>> 6) & 0x3f;
2705 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2710 /* As described in the original Linux kernel code, the
2711 * below checks on 'code' are to work around an old
2714 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2715 if (code
>= (1 << 10)) {
2720 if (do_break(env
, &info
, code
) != 0) {
2727 abi_ulong trap_instr
;
2728 unsigned int code
= 0;
2730 if (env
->hflags
& MIPS_HFLAG_M16
) {
2731 /* microMIPS mode */
2734 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2735 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2737 trap_instr
= (instr
[0] << 16) | instr
[1];
2739 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2746 /* The immediate versions don't provide a code. */
2747 if (!(trap_instr
& 0xFC000000)) {
2748 if (env
->hflags
& MIPS_HFLAG_M16
) {
2749 /* microMIPS mode */
2750 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2752 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2756 if (do_break(env
, &info
, code
) != 0) {
2763 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2766 process_pending_signals(env
);
2771 #ifdef TARGET_OPENRISC
2773 void cpu_loop(CPUOpenRISCState
*env
)
2775 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2781 trapnr
= cpu_exec(cs
);
2787 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2791 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2792 gdbsig
= TARGET_SIGBUS
;
2796 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2797 gdbsig
= TARGET_SIGSEGV
;
2800 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2803 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2804 gdbsig
= TARGET_SIGBUS
;
2807 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2808 gdbsig
= TARGET_SIGILL
;
2811 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2815 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2818 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2819 gdbsig
= TARGET_SIGSEGV
;
2822 env
->pc
+= 4; /* 0xc00; */
2823 ret
= do_syscall(env
,
2824 env
->gpr
[11], /* return value */
2825 env
->gpr
[3], /* r3 - r7 are params */
2831 if (ret
== -TARGET_ERESTARTSYS
) {
2833 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2838 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2841 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2842 gdbsig
= TARGET_SIGTRAP
;
2845 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2848 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2850 gdbsig
= TARGET_SIGILL
;
2854 gdb_handlesig(cs
, gdbsig
);
2855 if (gdbsig
!= TARGET_SIGTRAP
) {
2860 process_pending_signals(env
);
2864 #endif /* TARGET_OPENRISC */
2867 void cpu_loop(CPUSH4State
*env
)
2869 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2871 target_siginfo_t info
;
2875 trapnr
= cpu_exec(cs
);
2881 ret
= do_syscall(env
,
2890 if (ret
== -TARGET_ERESTARTSYS
) {
2892 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2893 env
->gregs
[0] = ret
;
2896 case EXCP_INTERRUPT
:
2897 /* just indicate that signals should be handled asap */
2903 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2906 info
.si_signo
= sig
;
2908 info
.si_code
= TARGET_TRAP_BRKPT
;
2909 queue_signal(env
, info
.si_signo
, &info
);
2915 info
.si_signo
= TARGET_SIGSEGV
;
2917 info
.si_code
= TARGET_SEGV_MAPERR
;
2918 info
._sifields
._sigfault
._addr
= env
->tea
;
2919 queue_signal(env
, info
.si_signo
, &info
);
2923 printf ("Unhandled trap: 0x%x\n", trapnr
);
2924 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2927 process_pending_signals (env
);
2933 void cpu_loop(CPUCRISState
*env
)
2935 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2937 target_siginfo_t info
;
2941 trapnr
= cpu_exec(cs
);
2946 info
.si_signo
= TARGET_SIGSEGV
;
2948 /* XXX: check env->error_code */
2949 info
.si_code
= TARGET_SEGV_MAPERR
;
2950 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2951 queue_signal(env
, info
.si_signo
, &info
);
2954 case EXCP_INTERRUPT
:
2955 /* just indicate that signals should be handled asap */
2958 ret
= do_syscall(env
,
2967 if (ret
== -TARGET_ERESTARTSYS
) {
2969 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2970 env
->regs
[10] = ret
;
2977 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2980 info
.si_signo
= sig
;
2982 info
.si_code
= TARGET_TRAP_BRKPT
;
2983 queue_signal(env
, info
.si_signo
, &info
);
2988 printf ("Unhandled trap: 0x%x\n", trapnr
);
2989 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2992 process_pending_signals (env
);
2997 #ifdef TARGET_MICROBLAZE
2998 void cpu_loop(CPUMBState
*env
)
3000 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
3002 target_siginfo_t info
;
3006 trapnr
= cpu_exec(cs
);
3011 info
.si_signo
= TARGET_SIGSEGV
;
3013 /* XXX: check env->error_code */
3014 info
.si_code
= TARGET_SEGV_MAPERR
;
3015 info
._sifields
._sigfault
._addr
= 0;
3016 queue_signal(env
, info
.si_signo
, &info
);
3019 case EXCP_INTERRUPT
:
3020 /* just indicate that signals should be handled asap */
3023 /* Return address is 4 bytes after the call. */
3025 env
->sregs
[SR_PC
] = env
->regs
[14];
3026 ret
= do_syscall(env
,
3035 if (ret
== -TARGET_ERESTARTSYS
) {
3036 /* Wind back to before the syscall. */
3037 env
->sregs
[SR_PC
] -= 4;
3038 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3041 /* All syscall exits result in guest r14 being equal to the
3042 * PC we return to, because the kernel syscall exit "rtbd" does
3043 * this. (This is true even for sigreturn(); note that r14 is
3044 * not a userspace-usable register, as the kernel may clobber it
3047 env
->regs
[14] = env
->sregs
[SR_PC
];
3050 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
3051 if (env
->iflags
& D_FLAG
) {
3052 env
->sregs
[SR_ESR
] |= 1 << 12;
3053 env
->sregs
[SR_PC
] -= 4;
3054 /* FIXME: if branch was immed, replay the imm as well. */
3057 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
3059 switch (env
->sregs
[SR_ESR
] & 31) {
3060 case ESR_EC_DIVZERO
:
3061 info
.si_signo
= TARGET_SIGFPE
;
3063 info
.si_code
= TARGET_FPE_FLTDIV
;
3064 info
._sifields
._sigfault
._addr
= 0;
3065 queue_signal(env
, info
.si_signo
, &info
);
3068 info
.si_signo
= TARGET_SIGFPE
;
3070 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
3071 info
.si_code
= TARGET_FPE_FLTINV
;
3073 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
3074 info
.si_code
= TARGET_FPE_FLTDIV
;
3076 info
._sifields
._sigfault
._addr
= 0;
3077 queue_signal(env
, info
.si_signo
, &info
);
3080 printf ("Unhandled hw-exception: 0x%x\n",
3081 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
3082 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3091 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3094 info
.si_signo
= sig
;
3096 info
.si_code
= TARGET_TRAP_BRKPT
;
3097 queue_signal(env
, info
.si_signo
, &info
);
3102 printf ("Unhandled trap: 0x%x\n", trapnr
);
3103 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3106 process_pending_signals (env
);
3113 void cpu_loop(CPUM68KState
*env
)
3115 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3118 target_siginfo_t info
;
3119 TaskState
*ts
= cs
->opaque
;
3123 trapnr
= cpu_exec(cs
);
3128 if (ts
->sim_syscalls
) {
3130 get_user_u16(nr
, env
->pc
+ 2);
3132 do_m68k_simcall(env
, nr
);
3138 case EXCP_HALT_INSN
:
3139 /* Semihosing syscall. */
3141 do_m68k_semihosting(env
, env
->dregs
[0]);
3145 case EXCP_UNSUPPORTED
:
3147 info
.si_signo
= TARGET_SIGILL
;
3149 info
.si_code
= TARGET_ILL_ILLOPN
;
3150 info
._sifields
._sigfault
._addr
= env
->pc
;
3151 queue_signal(env
, info
.si_signo
, &info
);
3156 ts
->sim_syscalls
= 0;
3159 ret
= do_syscall(env
,
3168 if (ret
== -TARGET_ERESTARTSYS
) {
3170 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3171 env
->dregs
[0] = ret
;
3175 case EXCP_INTERRUPT
:
3176 /* just indicate that signals should be handled asap */
3180 info
.si_signo
= TARGET_SIGSEGV
;
3182 /* XXX: check env->error_code */
3183 info
.si_code
= TARGET_SEGV_MAPERR
;
3184 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3185 queue_signal(env
, info
.si_signo
, &info
);
3192 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3195 info
.si_signo
= sig
;
3197 info
.si_code
= TARGET_TRAP_BRKPT
;
3198 queue_signal(env
, info
.si_signo
, &info
);
3203 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3206 process_pending_signals(env
);
3209 #endif /* TARGET_M68K */
3212 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3214 target_ulong addr
, val
, tmp
;
3215 target_siginfo_t info
;
3218 addr
= env
->lock_addr
;
3219 tmp
= env
->lock_st_addr
;
3220 env
->lock_addr
= -1;
3221 env
->lock_st_addr
= 0;
3227 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3231 if (val
== env
->lock_value
) {
3233 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3250 info
.si_signo
= TARGET_SIGSEGV
;
3252 info
.si_code
= TARGET_SEGV_MAPERR
;
3253 info
._sifields
._sigfault
._addr
= addr
;
3254 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3257 void cpu_loop(CPUAlphaState
*env
)
3259 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3261 target_siginfo_t info
;
3266 trapnr
= cpu_exec(cs
);
3269 /* All of the traps imply a transition through PALcode, which
3270 implies an REI instruction has been executed. Which means
3271 that the intr_flag should be cleared. */
3276 fprintf(stderr
, "Reset requested. Exit\n");
3280 fprintf(stderr
, "Machine check exception. Exit\n");
3283 case EXCP_SMP_INTERRUPT
:
3284 case EXCP_CLK_INTERRUPT
:
3285 case EXCP_DEV_INTERRUPT
:
3286 fprintf(stderr
, "External interrupt. Exit\n");
3290 env
->lock_addr
= -1;
3291 info
.si_signo
= TARGET_SIGSEGV
;
3293 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3294 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3295 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3296 queue_signal(env
, info
.si_signo
, &info
);
3299 env
->lock_addr
= -1;
3300 info
.si_signo
= TARGET_SIGBUS
;
3302 info
.si_code
= TARGET_BUS_ADRALN
;
3303 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3304 queue_signal(env
, info
.si_signo
, &info
);
3308 env
->lock_addr
= -1;
3309 info
.si_signo
= TARGET_SIGILL
;
3311 info
.si_code
= TARGET_ILL_ILLOPC
;
3312 info
._sifields
._sigfault
._addr
= env
->pc
;
3313 queue_signal(env
, info
.si_signo
, &info
);
3316 env
->lock_addr
= -1;
3317 info
.si_signo
= TARGET_SIGFPE
;
3319 info
.si_code
= TARGET_FPE_FLTINV
;
3320 info
._sifields
._sigfault
._addr
= env
->pc
;
3321 queue_signal(env
, info
.si_signo
, &info
);
3324 /* No-op. Linux simply re-enables the FPU. */
3327 env
->lock_addr
= -1;
3328 switch (env
->error_code
) {
3331 info
.si_signo
= TARGET_SIGTRAP
;
3333 info
.si_code
= TARGET_TRAP_BRKPT
;
3334 info
._sifields
._sigfault
._addr
= env
->pc
;
3335 queue_signal(env
, info
.si_signo
, &info
);
3339 info
.si_signo
= TARGET_SIGTRAP
;
3342 info
._sifields
._sigfault
._addr
= env
->pc
;
3343 queue_signal(env
, info
.si_signo
, &info
);
3347 trapnr
= env
->ir
[IR_V0
];
3348 sysret
= do_syscall(env
, trapnr
,
3349 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3350 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3351 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3353 if (sysret
== -TARGET_ERESTARTSYS
) {
3357 if (sysret
== -TARGET_QEMU_ESIGRETURN
) {
3360 /* Syscall writes 0 to V0 to bypass error check, similar
3361 to how this is handled internal to Linux kernel.
3362 (Ab)use trapnr temporarily as boolean indicating error. */
3363 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3364 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3365 env
->ir
[IR_A3
] = trapnr
;
3369 /* ??? We can probably elide the code using page_unprotect
3370 that is checking for self-modifying code. Instead we
3371 could simply call tb_flush here. Until we work out the
3372 changes required to turn off the extra write protection,
3373 this can be a no-op. */
3377 /* Handled in the translator for usermode. */
3381 /* Handled in the translator for usermode. */
3385 info
.si_signo
= TARGET_SIGFPE
;
3386 switch (env
->ir
[IR_A0
]) {
3387 case TARGET_GEN_INTOVF
:
3388 info
.si_code
= TARGET_FPE_INTOVF
;
3390 case TARGET_GEN_INTDIV
:
3391 info
.si_code
= TARGET_FPE_INTDIV
;
3393 case TARGET_GEN_FLTOVF
:
3394 info
.si_code
= TARGET_FPE_FLTOVF
;
3396 case TARGET_GEN_FLTUND
:
3397 info
.si_code
= TARGET_FPE_FLTUND
;
3399 case TARGET_GEN_FLTINV
:
3400 info
.si_code
= TARGET_FPE_FLTINV
;
3402 case TARGET_GEN_FLTINE
:
3403 info
.si_code
= TARGET_FPE_FLTRES
;
3405 case TARGET_GEN_ROPRAND
:
3409 info
.si_signo
= TARGET_SIGTRAP
;
3414 info
._sifields
._sigfault
._addr
= env
->pc
;
3415 queue_signal(env
, info
.si_signo
, &info
);
3422 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3423 if (info
.si_signo
) {
3424 env
->lock_addr
= -1;
3426 info
.si_code
= TARGET_TRAP_BRKPT
;
3427 queue_signal(env
, info
.si_signo
, &info
);
3432 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3434 case EXCP_INTERRUPT
:
3435 /* Just indicate that signals should be handled asap. */
3438 printf ("Unhandled trap: 0x%x\n", trapnr
);
3439 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3442 process_pending_signals (env
);
3445 #endif /* TARGET_ALPHA */
3448 void cpu_loop(CPUS390XState
*env
)
3450 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3452 target_siginfo_t info
;
3458 trapnr
= cpu_exec(cs
);
3461 case EXCP_INTERRUPT
:
3462 /* Just indicate that signals should be handled asap. */
3466 n
= env
->int_svc_code
;
3468 /* syscalls > 255 */
3471 env
->psw
.addr
+= env
->int_svc_ilen
;
3472 ret
= do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3473 env
->regs
[4], env
->regs
[5],
3474 env
->regs
[6], env
->regs
[7], 0, 0);
3475 if (ret
== -TARGET_ERESTARTSYS
) {
3476 env
->psw
.addr
-= env
->int_svc_ilen
;
3477 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3483 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3485 n
= TARGET_TRAP_BRKPT
;
3490 n
= env
->int_pgm_code
;
3493 case PGM_PRIVILEGED
:
3494 sig
= TARGET_SIGILL
;
3495 n
= TARGET_ILL_ILLOPC
;
3497 case PGM_PROTECTION
:
3498 case PGM_ADDRESSING
:
3499 sig
= TARGET_SIGSEGV
;
3500 /* XXX: check env->error_code */
3501 n
= TARGET_SEGV_MAPERR
;
3502 addr
= env
->__excp_addr
;
3505 case PGM_SPECIFICATION
:
3506 case PGM_SPECIAL_OP
:
3509 sig
= TARGET_SIGILL
;
3510 n
= TARGET_ILL_ILLOPN
;
3513 case PGM_FIXPT_OVERFLOW
:
3514 sig
= TARGET_SIGFPE
;
3515 n
= TARGET_FPE_INTOVF
;
3517 case PGM_FIXPT_DIVIDE
:
3518 sig
= TARGET_SIGFPE
;
3519 n
= TARGET_FPE_INTDIV
;
3523 n
= (env
->fpc
>> 8) & 0xff;
3525 /* compare-and-trap */
3528 /* An IEEE exception, simulated or otherwise. */
3530 n
= TARGET_FPE_FLTINV
;
3531 } else if (n
& 0x40) {
3532 n
= TARGET_FPE_FLTDIV
;
3533 } else if (n
& 0x20) {
3534 n
= TARGET_FPE_FLTOVF
;
3535 } else if (n
& 0x10) {
3536 n
= TARGET_FPE_FLTUND
;
3537 } else if (n
& 0x08) {
3538 n
= TARGET_FPE_FLTRES
;
3540 /* ??? Quantum exception; BFP, DFP error. */
3543 sig
= TARGET_SIGFPE
;
3548 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3549 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3555 addr
= env
->psw
.addr
;
3557 info
.si_signo
= sig
;
3560 info
._sifields
._sigfault
._addr
= addr
;
3561 queue_signal(env
, info
.si_signo
, &info
);
3565 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3566 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3569 process_pending_signals (env
);
3573 #endif /* TARGET_S390X */
3575 #ifdef TARGET_TILEGX
3577 static void gen_sigill_reg(CPUTLGState
*env
)
3579 target_siginfo_t info
;
3581 info
.si_signo
= TARGET_SIGILL
;
3583 info
.si_code
= TARGET_ILL_PRVREG
;
3584 info
._sifields
._sigfault
._addr
= env
->pc
;
3585 queue_signal(env
, info
.si_signo
, &info
);
3588 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3590 target_siginfo_t info
;
3592 info
.si_signo
= signo
;
3594 info
._sifields
._sigfault
._addr
= env
->pc
;
3596 if (signo
== TARGET_SIGSEGV
) {
3597 /* The passed in sigcode is a dummy; check for a page mapping
3598 and pass either MAPERR or ACCERR. */
3599 target_ulong addr
= env
->excaddr
;
3600 info
._sifields
._sigfault
._addr
= addr
;
3601 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3602 sigcode
= TARGET_SEGV_MAPERR
;
3604 sigcode
= TARGET_SEGV_ACCERR
;
3607 info
.si_code
= sigcode
;
3609 queue_signal(env
, info
.si_signo
, &info
);
3612 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3614 env
->excaddr
= addr
;
3615 do_signal(env
, TARGET_SIGSEGV
, 0);
3618 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3620 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3631 gen_sigill_reg(env
);
3634 g_assert_not_reached();
3637 env
->regs
[reg
] = val
;
3641 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3642 * memory at the address held in the first source register. If the values are
3643 * not equal, then no memory operation is performed. If the values are equal,
3644 * the 8-byte quantity from the second source register is written into memory
3645 * at the address held in the first source register. In either case, the result
3646 * of the instruction is the value read from memory. The compare and write to
3647 * memory are atomic and thus can be used for synchronization purposes. This
3648 * instruction only operates for addresses aligned to a 8-byte boundary.
3649 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3651 * Functional Description (64-bit)
3652 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3653 * rf[Dest] = memVal;
3654 * if (memVal == SPR[CmpValueSPR])
3655 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3657 * Functional Description (32-bit)
3658 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3659 * rf[Dest] = memVal;
3660 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3661 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3664 * This function also processes exch and exch4 which need not process SPR.
3666 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3669 target_long val
, sprval
;
3673 addr
= env
->atomic_srca
;
3674 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3675 goto sigsegv_maperr
;
3680 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3682 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3686 if (!cmp
|| val
== sprval
) {
3687 target_long valb
= env
->atomic_srcb
;
3688 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3689 goto sigsegv_maperr
;
3693 set_regval(env
, env
->atomic_dstr
, val
);
3699 gen_sigsegv_maperr(env
, addr
);
3702 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3706 target_long val
, valb
;
3710 addr
= env
->atomic_srca
;
3711 valb
= env
->atomic_srcb
;
3712 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3713 goto sigsegv_maperr
;
3717 case TILEGX_EXCP_OPCODE_FETCHADD
:
3718 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3721 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3727 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3729 if ((int32_t)valb
< 0) {
3733 case TILEGX_EXCP_OPCODE_FETCHAND
:
3734 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3737 case TILEGX_EXCP_OPCODE_FETCHOR
:
3738 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3742 g_assert_not_reached();
3746 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3747 goto sigsegv_maperr
;
3751 set_regval(env
, env
->atomic_dstr
, val
);
3757 gen_sigsegv_maperr(env
, addr
);
3760 void cpu_loop(CPUTLGState
*env
)
3762 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3767 trapnr
= cpu_exec(cs
);
3770 case TILEGX_EXCP_SYSCALL
:
3772 abi_ulong ret
= do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3773 env
->regs
[0], env
->regs
[1],
3774 env
->regs
[2], env
->regs
[3],
3775 env
->regs
[4], env
->regs
[5],
3776 env
->regs
[6], env
->regs
[7]);
3777 if (ret
== -TARGET_ERESTARTSYS
) {
3779 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3780 env
->regs
[TILEGX_R_RE
] = ret
;
3781 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(ret
) ? -ret
: 0;
3785 case TILEGX_EXCP_OPCODE_EXCH
:
3786 do_exch(env
, true, false);
3788 case TILEGX_EXCP_OPCODE_EXCH4
:
3789 do_exch(env
, false, false);
3791 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3792 do_exch(env
, true, true);
3794 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3795 do_exch(env
, false, true);
3797 case TILEGX_EXCP_OPCODE_FETCHADD
:
3798 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3799 case TILEGX_EXCP_OPCODE_FETCHAND
:
3800 case TILEGX_EXCP_OPCODE_FETCHOR
:
3801 do_fetch(env
, trapnr
, true);
3803 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3804 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3805 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3806 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3807 do_fetch(env
, trapnr
, false);
3809 case TILEGX_EXCP_SIGNAL
:
3810 do_signal(env
, env
->signo
, env
->sigcode
);
3812 case TILEGX_EXCP_REG_IDN_ACCESS
:
3813 case TILEGX_EXCP_REG_UDN_ACCESS
:
3814 gen_sigill_reg(env
);
3817 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3818 g_assert_not_reached();
3820 process_pending_signals(env
);
3826 THREAD CPUState
*thread_cpu
;
3828 void task_settid(TaskState
*ts
)
3830 if (ts
->ts_tid
== 0) {
3831 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3835 void stop_all_tasks(void)
3838 * We trust that when using NPTL, start_exclusive()
3839 * handles thread stopping correctly.
3844 /* Assumes contents are already zeroed. */
3845 void init_task_state(TaskState
*ts
)
3850 CPUArchState
*cpu_copy(CPUArchState
*env
)
3852 CPUState
*cpu
= ENV_GET_CPU(env
);
3853 CPUState
*new_cpu
= cpu_init(cpu_model
);
3854 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3858 /* Reset non arch specific state */
3861 memcpy(new_env
, env
, sizeof(CPUArchState
));
3863 /* Clone all break/watchpoints.
3864 Note: Once we support ptrace with hw-debug register access, make sure
3865 BP_CPU break/watchpoints are handled correctly on clone. */
3866 QTAILQ_INIT(&new_cpu
->breakpoints
);
3867 QTAILQ_INIT(&new_cpu
->watchpoints
);
3868 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3869 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3871 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3872 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3878 static void handle_arg_help(const char *arg
)
3880 usage(EXIT_SUCCESS
);
3883 static void handle_arg_log(const char *arg
)
3887 mask
= qemu_str_to_log_mask(arg
);
3889 qemu_print_log_usage(stdout
);
3892 qemu_log_needs_buffers();
3896 static void handle_arg_log_filename(const char *arg
)
3898 qemu_set_log_filename(arg
, &error_fatal
);
3901 static void handle_arg_set_env(const char *arg
)
3903 char *r
, *p
, *token
;
3904 r
= p
= strdup(arg
);
3905 while ((token
= strsep(&p
, ",")) != NULL
) {
3906 if (envlist_setenv(envlist
, token
) != 0) {
3907 usage(EXIT_FAILURE
);
3913 static void handle_arg_unset_env(const char *arg
)
3915 char *r
, *p
, *token
;
3916 r
= p
= strdup(arg
);
3917 while ((token
= strsep(&p
, ",")) != NULL
) {
3918 if (envlist_unsetenv(envlist
, token
) != 0) {
3919 usage(EXIT_FAILURE
);
3925 static void handle_arg_argv0(const char *arg
)
3927 argv0
= strdup(arg
);
3930 static void handle_arg_stack_size(const char *arg
)
3933 guest_stack_size
= strtoul(arg
, &p
, 0);
3934 if (guest_stack_size
== 0) {
3935 usage(EXIT_FAILURE
);
3939 guest_stack_size
*= 1024 * 1024;
3940 } else if (*p
== 'k' || *p
== 'K') {
3941 guest_stack_size
*= 1024;
3945 static void handle_arg_ld_prefix(const char *arg
)
3947 interp_prefix
= strdup(arg
);
3950 static void handle_arg_pagesize(const char *arg
)
3952 qemu_host_page_size
= atoi(arg
);
3953 if (qemu_host_page_size
== 0 ||
3954 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3955 fprintf(stderr
, "page size must be a power of two\n");
3960 static void handle_arg_randseed(const char *arg
)
3962 unsigned long long seed
;
3964 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3965 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3971 static void handle_arg_gdb(const char *arg
)
3973 gdbstub_port
= atoi(arg
);
3976 static void handle_arg_uname(const char *arg
)
3978 qemu_uname_release
= strdup(arg
);
3981 static void handle_arg_cpu(const char *arg
)
3983 cpu_model
= strdup(arg
);
3984 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3985 /* XXX: implement xxx_cpu_list for targets that still miss it */
3986 #if defined(cpu_list_id)
3987 cpu_list_id(stdout
, &fprintf
, "");
3988 #elif defined(cpu_list)
3989 cpu_list(stdout
, &fprintf
); /* deprecated */
3991 /* TODO: add cpu selection for alpha, microblaze, unicore32, s390x. */
3992 printf("Target ignores cpu selection\n");
3998 static void handle_arg_guest_base(const char *arg
)
4000 guest_base
= strtol(arg
, NULL
, 0);
4001 have_guest_base
= 1;
4004 static void handle_arg_reserved_va(const char *arg
)
4008 reserved_va
= strtoul(arg
, &p
, 0);
4022 unsigned long unshifted
= reserved_va
;
4024 reserved_va
<<= shift
;
4025 if (((reserved_va
>> shift
) != unshifted
)
4026 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
4027 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
4030 fprintf(stderr
, "Reserved virtual address too big\n");
4035 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
4040 static void handle_arg_singlestep(const char *arg
)
4045 static void handle_arg_strace(const char *arg
)
4050 static void handle_arg_version(const char *arg
)
4052 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
4053 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
4057 static char *trace_file
;
4058 static void handle_arg_trace(const char *arg
)
4061 trace_file
= trace_opt_parse(arg
);
4064 struct qemu_argument
{
4068 void (*handle_opt
)(const char *arg
);
4069 const char *example
;
4073 static const struct qemu_argument arg_table
[] = {
4074 {"h", "", false, handle_arg_help
,
4075 "", "print this help"},
4076 {"help", "", false, handle_arg_help
,
4078 {"g", "QEMU_GDB", true, handle_arg_gdb
,
4079 "port", "wait gdb connection to 'port'"},
4080 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
4081 "path", "set the elf interpreter prefix to 'path'"},
4082 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
4083 "size", "set the stack size to 'size' bytes"},
4084 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
4085 "model", "select CPU (-cpu help for list)"},
4086 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
4087 "var=value", "sets targets environment variable (see below)"},
4088 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
4089 "var", "unsets targets environment variable (see below)"},
4090 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
4091 "argv0", "forces target process argv[0] to be 'argv0'"},
4092 {"r", "QEMU_UNAME", true, handle_arg_uname
,
4093 "uname", "set qemu uname release string to 'uname'"},
4094 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
4095 "address", "set guest_base address to 'address'"},
4096 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
4097 "size", "reserve 'size' bytes for guest virtual address space"},
4098 {"d", "QEMU_LOG", true, handle_arg_log
,
4099 "item[,...]", "enable logging of specified items "
4100 "(use '-d help' for a list of items)"},
4101 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
4102 "logfile", "write logs to 'logfile' (default stderr)"},
4103 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
4104 "pagesize", "set the host page size to 'pagesize'"},
4105 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
4106 "", "run in singlestep mode"},
4107 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
4108 "", "log system calls"},
4109 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
4110 "", "Seed for pseudo-random number generator"},
4111 {"trace", "QEMU_TRACE", true, handle_arg_trace
,
4112 "", "[[enable=]<pattern>][,events=<file>][,file=<file>]"},
4113 {"version", "QEMU_VERSION", false, handle_arg_version
,
4114 "", "display version information and exit"},
4115 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
4118 static void QEMU_NORETURN
usage(int exitcode
)
4120 const struct qemu_argument
*arginfo
;
4124 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
4125 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
4127 "Options and associated environment variables:\n"
4130 /* Calculate column widths. We must always have at least enough space
4131 * for the column header.
4133 maxarglen
= strlen("Argument");
4134 maxenvlen
= strlen("Env-variable");
4136 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4137 int arglen
= strlen(arginfo
->argv
);
4138 if (arginfo
->has_arg
) {
4139 arglen
+= strlen(arginfo
->example
) + 1;
4141 if (strlen(arginfo
->env
) > maxenvlen
) {
4142 maxenvlen
= strlen(arginfo
->env
);
4144 if (arglen
> maxarglen
) {
4149 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4150 maxenvlen
, "Env-variable");
4152 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4153 if (arginfo
->has_arg
) {
4154 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4155 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4156 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4158 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4159 maxenvlen
, arginfo
->env
,
4166 "QEMU_LD_PREFIX = %s\n"
4167 "QEMU_STACK_SIZE = %ld byte\n",
4172 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4173 "QEMU_UNSET_ENV environment variables to set and unset\n"
4174 "environment variables for the target process.\n"
4175 "It is possible to provide several variables by separating them\n"
4176 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4177 "provide the -E and -U options multiple times.\n"
4178 "The following lines are equivalent:\n"
4179 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4180 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4181 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4182 "Note that if you provide several changes to a single variable\n"
4183 "the last change will stay in effect.\n");
4188 static int parse_args(int argc
, char **argv
)
4192 const struct qemu_argument
*arginfo
;
4194 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4195 if (arginfo
->env
== NULL
) {
4199 r
= getenv(arginfo
->env
);
4201 arginfo
->handle_opt(r
);
4207 if (optind
>= argc
) {
4216 if (!strcmp(r
, "-")) {
4219 /* Treat --foo the same as -foo. */
4224 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4225 if (!strcmp(r
, arginfo
->argv
)) {
4226 if (arginfo
->has_arg
) {
4227 if (optind
>= argc
) {
4228 (void) fprintf(stderr
,
4229 "qemu: missing argument for option '%s'\n", r
);
4232 arginfo
->handle_opt(argv
[optind
]);
4235 arginfo
->handle_opt(NULL
);
4241 /* no option matched the current argv */
4242 if (arginfo
->handle_opt
== NULL
) {
4243 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4248 if (optind
>= argc
) {
4249 (void) fprintf(stderr
, "qemu: no user program specified\n");
4253 filename
= argv
[optind
];
4254 exec_path
= argv
[optind
];
4259 int main(int argc
, char **argv
)
4261 struct target_pt_regs regs1
, *regs
= ®s1
;
4262 struct image_info info1
, *info
= &info1
;
4263 struct linux_binprm bprm
;
4268 char **target_environ
, **wrk
;
4275 module_call_init(MODULE_INIT_QOM
);
4277 if ((envlist
= envlist_create()) == NULL
) {
4278 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4282 /* add current environment into the list */
4283 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4284 (void) envlist_setenv(envlist
, *wrk
);
4287 /* Read the stack limit from the kernel. If it's "unlimited",
4288 then we can do little else besides use the default. */
4291 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4292 && lim
.rlim_cur
!= RLIM_INFINITY
4293 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4294 guest_stack_size
= lim
.rlim_cur
;
4302 qemu_add_opts(&qemu_trace_opts
);
4304 optind
= parse_args(argc
, argv
);
4306 if (!trace_init_backends()) {
4309 trace_init_file(trace_file
);
4312 memset(regs
, 0, sizeof(struct target_pt_regs
));
4314 /* Zero out image_info */
4315 memset(info
, 0, sizeof(struct image_info
));
4317 memset(&bprm
, 0, sizeof (bprm
));
4319 /* Scan interp_prefix dir for replacement files. */
4320 init_paths(interp_prefix
);
4322 init_qemu_uname_release();
4324 if (cpu_model
== NULL
) {
4325 #if defined(TARGET_I386)
4326 #ifdef TARGET_X86_64
4327 cpu_model
= "qemu64";
4329 cpu_model
= "qemu32";
4331 #elif defined(TARGET_ARM)
4333 #elif defined(TARGET_UNICORE32)
4335 #elif defined(TARGET_M68K)
4337 #elif defined(TARGET_SPARC)
4338 #ifdef TARGET_SPARC64
4339 cpu_model
= "TI UltraSparc II";
4341 cpu_model
= "Fujitsu MB86904";
4343 #elif defined(TARGET_MIPS)
4344 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4349 #elif defined TARGET_OPENRISC
4350 cpu_model
= "or1200";
4351 #elif defined(TARGET_PPC)
4352 # ifdef TARGET_PPC64
4353 cpu_model
= "POWER8";
4357 #elif defined TARGET_SH4
4358 cpu_model
= TYPE_SH7785_CPU
;
4364 /* NOTE: we need to init the CPU at this stage to get
4365 qemu_host_page_size */
4366 cpu
= cpu_init(cpu_model
);
4368 fprintf(stderr
, "Unable to find CPU definition\n");
4376 if (getenv("QEMU_STRACE")) {
4380 if (getenv("QEMU_RAND_SEED")) {
4381 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4384 target_environ
= envlist_to_environ(envlist
, NULL
);
4385 envlist_free(envlist
);
4388 * Now that page sizes are configured in cpu_init() we can do
4389 * proper page alignment for guest_base.
4391 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4393 if (reserved_va
|| have_guest_base
) {
4394 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4396 if (guest_base
== (unsigned long)-1) {
4397 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4398 "space for use as guest address space (check your virtual "
4399 "memory ulimit setting or reserve less using -R option)\n",
4405 mmap_next_start
= reserved_va
;
4410 * Read in mmap_min_addr kernel parameter. This value is used
4411 * When loading the ELF image to determine whether guest_base
4412 * is needed. It is also used in mmap_find_vma.
4417 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4419 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4420 mmap_min_addr
= tmp
;
4421 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4428 * Prepare copy of argv vector for target.
4430 target_argc
= argc
- optind
;
4431 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4432 if (target_argv
== NULL
) {
4433 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4438 * If argv0 is specified (using '-0' switch) we replace
4439 * argv[0] pointer with the given one.
4442 if (argv0
!= NULL
) {
4443 target_argv
[i
++] = strdup(argv0
);
4445 for (; i
< target_argc
; i
++) {
4446 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4448 target_argv
[target_argc
] = NULL
;
4450 ts
= g_new0(TaskState
, 1);
4451 init_task_state(ts
);
4452 /* build Task State */
4458 execfd
= qemu_getauxval(AT_EXECFD
);
4460 execfd
= open(filename
, O_RDONLY
);
4462 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4463 _exit(EXIT_FAILURE
);
4467 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4470 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4471 _exit(EXIT_FAILURE
);
4474 for (wrk
= target_environ
; *wrk
; wrk
++) {
4478 free(target_environ
);
4480 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4481 qemu_log("guest_base 0x%" PRIxPTR
"\n", guest_base
);
4484 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4485 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4486 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4488 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4490 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4491 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4493 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4494 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4497 target_set_brk(info
->brk
);
4501 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4502 generating the prologue until now so that the prologue can take
4503 the real value of GUEST_BASE into account. */
4504 tcg_prologue_init(&tcg_ctx
);
4506 #if defined(TARGET_I386)
4507 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4508 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4509 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4510 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4511 env
->hflags
|= HF_OSFXSR_MASK
;
4513 #ifndef TARGET_ABI32
4514 /* enable 64 bit mode if possible */
4515 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4516 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4519 env
->cr
[4] |= CR4_PAE_MASK
;
4520 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4521 env
->hflags
|= HF_LMA_MASK
;
4524 /* flags setup : we activate the IRQs by default as in user mode */
4525 env
->eflags
|= IF_MASK
;
4527 /* linux register setup */
4528 #ifndef TARGET_ABI32
4529 env
->regs
[R_EAX
] = regs
->rax
;
4530 env
->regs
[R_EBX
] = regs
->rbx
;
4531 env
->regs
[R_ECX
] = regs
->rcx
;
4532 env
->regs
[R_EDX
] = regs
->rdx
;
4533 env
->regs
[R_ESI
] = regs
->rsi
;
4534 env
->regs
[R_EDI
] = regs
->rdi
;
4535 env
->regs
[R_EBP
] = regs
->rbp
;
4536 env
->regs
[R_ESP
] = regs
->rsp
;
4537 env
->eip
= regs
->rip
;
4539 env
->regs
[R_EAX
] = regs
->eax
;
4540 env
->regs
[R_EBX
] = regs
->ebx
;
4541 env
->regs
[R_ECX
] = regs
->ecx
;
4542 env
->regs
[R_EDX
] = regs
->edx
;
4543 env
->regs
[R_ESI
] = regs
->esi
;
4544 env
->regs
[R_EDI
] = regs
->edi
;
4545 env
->regs
[R_EBP
] = regs
->ebp
;
4546 env
->regs
[R_ESP
] = regs
->esp
;
4547 env
->eip
= regs
->eip
;
4550 /* linux interrupt setup */
4551 #ifndef TARGET_ABI32
4552 env
->idt
.limit
= 511;
4554 env
->idt
.limit
= 255;
4556 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4557 PROT_READ
|PROT_WRITE
,
4558 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4559 idt_table
= g2h(env
->idt
.base
);
4582 /* linux segment setup */
4584 uint64_t *gdt_table
;
4585 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4586 PROT_READ
|PROT_WRITE
,
4587 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4588 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4589 gdt_table
= g2h(env
->gdt
.base
);
4591 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4592 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4593 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4595 /* 64 bit code segment */
4596 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4597 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4599 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4601 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4602 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4603 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4605 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4606 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4608 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4609 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4610 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4611 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4612 /* This hack makes Wine work... */
4613 env
->segs
[R_FS
].selector
= 0;
4615 cpu_x86_load_seg(env
, R_DS
, 0);
4616 cpu_x86_load_seg(env
, R_ES
, 0);
4617 cpu_x86_load_seg(env
, R_FS
, 0);
4618 cpu_x86_load_seg(env
, R_GS
, 0);
4620 #elif defined(TARGET_AARCH64)
4624 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4626 "The selected ARM CPU does not support 64 bit mode\n");
4630 for (i
= 0; i
< 31; i
++) {
4631 env
->xregs
[i
] = regs
->regs
[i
];
4634 env
->xregs
[31] = regs
->sp
;
4636 #elif defined(TARGET_ARM)
4639 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4641 for(i
= 0; i
< 16; i
++) {
4642 env
->regs
[i
] = regs
->uregs
[i
];
4644 #ifdef TARGET_WORDS_BIGENDIAN
4646 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4647 && (info
->elf_flags
& EF_ARM_BE8
)) {
4648 env
->uncached_cpsr
|= CPSR_E
;
4649 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4651 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4655 #elif defined(TARGET_UNICORE32)
4658 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4659 for (i
= 0; i
< 32; i
++) {
4660 env
->regs
[i
] = regs
->uregs
[i
];
4663 #elif defined(TARGET_SPARC)
4667 env
->npc
= regs
->npc
;
4669 for(i
= 0; i
< 8; i
++)
4670 env
->gregs
[i
] = regs
->u_regs
[i
];
4671 for(i
= 0; i
< 8; i
++)
4672 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4674 #elif defined(TARGET_PPC)
4678 #if defined(TARGET_PPC64)
4679 #if defined(TARGET_ABI32)
4680 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4682 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4685 env
->nip
= regs
->nip
;
4686 for(i
= 0; i
< 32; i
++) {
4687 env
->gpr
[i
] = regs
->gpr
[i
];
4690 #elif defined(TARGET_M68K)
4693 env
->dregs
[0] = regs
->d0
;
4694 env
->dregs
[1] = regs
->d1
;
4695 env
->dregs
[2] = regs
->d2
;
4696 env
->dregs
[3] = regs
->d3
;
4697 env
->dregs
[4] = regs
->d4
;
4698 env
->dregs
[5] = regs
->d5
;
4699 env
->dregs
[6] = regs
->d6
;
4700 env
->dregs
[7] = regs
->d7
;
4701 env
->aregs
[0] = regs
->a0
;
4702 env
->aregs
[1] = regs
->a1
;
4703 env
->aregs
[2] = regs
->a2
;
4704 env
->aregs
[3] = regs
->a3
;
4705 env
->aregs
[4] = regs
->a4
;
4706 env
->aregs
[5] = regs
->a5
;
4707 env
->aregs
[6] = regs
->a6
;
4708 env
->aregs
[7] = regs
->usp
;
4710 ts
->sim_syscalls
= 1;
4712 #elif defined(TARGET_MICROBLAZE)
4714 env
->regs
[0] = regs
->r0
;
4715 env
->regs
[1] = regs
->r1
;
4716 env
->regs
[2] = regs
->r2
;
4717 env
->regs
[3] = regs
->r3
;
4718 env
->regs
[4] = regs
->r4
;
4719 env
->regs
[5] = regs
->r5
;
4720 env
->regs
[6] = regs
->r6
;
4721 env
->regs
[7] = regs
->r7
;
4722 env
->regs
[8] = regs
->r8
;
4723 env
->regs
[9] = regs
->r9
;
4724 env
->regs
[10] = regs
->r10
;
4725 env
->regs
[11] = regs
->r11
;
4726 env
->regs
[12] = regs
->r12
;
4727 env
->regs
[13] = regs
->r13
;
4728 env
->regs
[14] = regs
->r14
;
4729 env
->regs
[15] = regs
->r15
;
4730 env
->regs
[16] = regs
->r16
;
4731 env
->regs
[17] = regs
->r17
;
4732 env
->regs
[18] = regs
->r18
;
4733 env
->regs
[19] = regs
->r19
;
4734 env
->regs
[20] = regs
->r20
;
4735 env
->regs
[21] = regs
->r21
;
4736 env
->regs
[22] = regs
->r22
;
4737 env
->regs
[23] = regs
->r23
;
4738 env
->regs
[24] = regs
->r24
;
4739 env
->regs
[25] = regs
->r25
;
4740 env
->regs
[26] = regs
->r26
;
4741 env
->regs
[27] = regs
->r27
;
4742 env
->regs
[28] = regs
->r28
;
4743 env
->regs
[29] = regs
->r29
;
4744 env
->regs
[30] = regs
->r30
;
4745 env
->regs
[31] = regs
->r31
;
4746 env
->sregs
[SR_PC
] = regs
->pc
;
4748 #elif defined(TARGET_MIPS)
4752 for(i
= 0; i
< 32; i
++) {
4753 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4755 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4756 if (regs
->cp0_epc
& 1) {
4757 env
->hflags
|= MIPS_HFLAG_M16
;
4759 if (((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) !=
4760 ((env
->active_fpu
.fcr31
& (1 << FCR31_NAN2008
)) != 0)) {
4761 if ((env
->active_fpu
.fcr31_rw_bitmask
&
4762 (1 << FCR31_NAN2008
)) == 0) {
4763 fprintf(stderr
, "ELF binary's NaN mode not supported by CPU\n");
4766 if ((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) {
4767 env
->active_fpu
.fcr31
|= (1 << FCR31_NAN2008
);
4769 env
->active_fpu
.fcr31
&= ~(1 << FCR31_NAN2008
);
4771 restore_snan_bit_mode(env
);
4774 #elif defined(TARGET_OPENRISC)
4778 for (i
= 0; i
< 32; i
++) {
4779 env
->gpr
[i
] = regs
->gpr
[i
];
4785 #elif defined(TARGET_SH4)
4789 for(i
= 0; i
< 16; i
++) {
4790 env
->gregs
[i
] = regs
->regs
[i
];
4794 #elif defined(TARGET_ALPHA)
4798 for(i
= 0; i
< 28; i
++) {
4799 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4801 env
->ir
[IR_SP
] = regs
->usp
;
4804 #elif defined(TARGET_CRIS)
4806 env
->regs
[0] = regs
->r0
;
4807 env
->regs
[1] = regs
->r1
;
4808 env
->regs
[2] = regs
->r2
;
4809 env
->regs
[3] = regs
->r3
;
4810 env
->regs
[4] = regs
->r4
;
4811 env
->regs
[5] = regs
->r5
;
4812 env
->regs
[6] = regs
->r6
;
4813 env
->regs
[7] = regs
->r7
;
4814 env
->regs
[8] = regs
->r8
;
4815 env
->regs
[9] = regs
->r9
;
4816 env
->regs
[10] = regs
->r10
;
4817 env
->regs
[11] = regs
->r11
;
4818 env
->regs
[12] = regs
->r12
;
4819 env
->regs
[13] = regs
->r13
;
4820 env
->regs
[14] = info
->start_stack
;
4821 env
->regs
[15] = regs
->acr
;
4822 env
->pc
= regs
->erp
;
4824 #elif defined(TARGET_S390X)
4827 for (i
= 0; i
< 16; i
++) {
4828 env
->regs
[i
] = regs
->gprs
[i
];
4830 env
->psw
.mask
= regs
->psw
.mask
;
4831 env
->psw
.addr
= regs
->psw
.addr
;
4833 #elif defined(TARGET_TILEGX)
4836 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4837 env
->regs
[i
] = regs
->regs
[i
];
4839 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4845 #error unsupported target CPU
4848 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4849 ts
->stack_base
= info
->start_stack
;
4850 ts
->heap_base
= info
->brk
;
4851 /* This will be filled in on the first SYS_HEAPINFO call. */
4856 if (gdbserver_start(gdbstub_port
) < 0) {
4857 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4861 gdb_handlesig(cpu
, 0);
4863 trace_init_vcpu_events();