4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
24 #include "qapi/error.h"
26 #include "qemu/path.h"
27 #include "qemu/config-file.h"
28 #include "qemu/cutils.h"
29 #include "qemu/help_option.h"
31 #include "exec/exec-all.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
37 #include "trace/control.h"
38 #include "glib-compat.h"
43 static const char *filename
;
44 static const char *argv0
;
45 static int gdbstub_port
;
46 static envlist_t
*envlist
;
47 static const char *cpu_model
;
48 unsigned long mmap_min_addr
;
49 unsigned long guest_base
;
52 #define EXCP_DUMP(env, fmt, ...) \
54 CPUState *cs = ENV_GET_CPU(env); \
55 fprintf(stderr, fmt , ## __VA_ARGS__); \
56 cpu_dump_state(cs, stderr, fprintf, 0); \
57 if (qemu_log_separate()) { \
58 qemu_log(fmt, ## __VA_ARGS__); \
59 log_cpu_state(cs, 0); \
63 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
65 * When running 32-on-64 we should make sure we can fit all of the possible
66 * guest address space into a contiguous chunk of virtual host memory.
68 * This way we will never overlap with our own libraries or binaries or stack
69 * or anything else that QEMU maps.
72 /* MIPS only supports 31 bits of virtual address space for user space */
73 unsigned long reserved_va
= 0x77000000;
75 unsigned long reserved_va
= 0xf7000000;
78 unsigned long reserved_va
;
81 static void usage(int exitcode
);
83 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
84 const char *qemu_uname_release
;
86 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
87 we allocate a bigger stack. Need a better solution, for example
88 by remapping the process stack directly at the right place */
89 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
91 void gemu_log(const char *fmt
, ...)
96 vfprintf(stderr
, fmt
, ap
);
100 #if defined(TARGET_I386)
101 int cpu_get_pic_interrupt(CPUX86State
*env
)
107 /***********************************************************/
108 /* Helper routines for implementing atomic operations. */
110 /* To implement exclusive operations we force all cpus to syncronise.
111 We don't require a full sync, only that no cpus are executing guest code.
112 The alternative is to map target atomic ops onto host equivalents,
113 which requires quite a lot of per host/target work. */
114 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
115 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
116 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
117 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
118 static int pending_cpus
;
120 /* Make sure everything is in a consistent state for calling fork(). */
121 void fork_start(void)
123 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
124 pthread_mutex_lock(&exclusive_lock
);
128 void fork_end(int child
)
130 mmap_fork_end(child
);
132 CPUState
*cpu
, *next_cpu
;
133 /* Child processes created by fork() only have a single thread.
134 Discard information about the parent threads. */
135 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
136 if (cpu
!= thread_cpu
) {
137 QTAILQ_REMOVE(&cpus
, cpu
, node
);
141 pthread_mutex_init(&exclusive_lock
, NULL
);
142 pthread_mutex_init(&cpu_list_mutex
, NULL
);
143 pthread_cond_init(&exclusive_cond
, NULL
);
144 pthread_cond_init(&exclusive_resume
, NULL
);
145 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
146 gdbserver_fork(thread_cpu
);
148 pthread_mutex_unlock(&exclusive_lock
);
149 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
153 /* Wait for pending exclusive operations to complete. The exclusive lock
155 static inline void exclusive_idle(void)
157 while (pending_cpus
) {
158 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
162 /* Start an exclusive operation.
163 Must only be called from outside cpu_exec. */
164 static inline void start_exclusive(void)
168 pthread_mutex_lock(&exclusive_lock
);
172 /* Make all other cpus stop executing. */
173 CPU_FOREACH(other_cpu
) {
174 if (other_cpu
->running
) {
179 if (pending_cpus
> 1) {
180 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
184 /* Finish an exclusive operation. */
185 static inline void __attribute__((unused
)) end_exclusive(void)
188 pthread_cond_broadcast(&exclusive_resume
);
189 pthread_mutex_unlock(&exclusive_lock
);
192 /* Wait for exclusive ops to finish, and begin cpu execution. */
193 static inline void cpu_exec_start(CPUState
*cpu
)
195 pthread_mutex_lock(&exclusive_lock
);
198 pthread_mutex_unlock(&exclusive_lock
);
201 /* Mark cpu as not executing, and release pending exclusive ops. */
202 static inline void cpu_exec_end(CPUState
*cpu
)
204 pthread_mutex_lock(&exclusive_lock
);
205 cpu
->running
= false;
206 if (pending_cpus
> 1) {
208 if (pending_cpus
== 1) {
209 pthread_cond_signal(&exclusive_cond
);
213 pthread_mutex_unlock(&exclusive_lock
);
216 void cpu_list_lock(void)
218 pthread_mutex_lock(&cpu_list_mutex
);
221 void cpu_list_unlock(void)
223 pthread_mutex_unlock(&cpu_list_mutex
);
228 /***********************************************************/
229 /* CPUX86 core interface */
231 uint64_t cpu_get_tsc(CPUX86State
*env
)
233 return cpu_get_host_ticks();
236 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
241 e1
= (addr
<< 16) | (limit
& 0xffff);
242 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
249 static uint64_t *idt_table
;
251 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
252 uint64_t addr
, unsigned int sel
)
255 e1
= (addr
& 0xffff) | (sel
<< 16);
256 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
260 p
[2] = tswap32(addr
>> 32);
263 /* only dpl matters as we do only user space emulation */
264 static void set_idt(int n
, unsigned int dpl
)
266 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
269 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
270 uint32_t addr
, unsigned int sel
)
273 e1
= (addr
& 0xffff) | (sel
<< 16);
274 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
280 /* only dpl matters as we do only user space emulation */
281 static void set_idt(int n
, unsigned int dpl
)
283 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
287 void cpu_loop(CPUX86State
*env
)
289 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
293 target_siginfo_t info
;
297 trapnr
= cpu_exec(cs
);
301 /* linux syscall from int $0x80 */
302 ret
= do_syscall(env
,
311 if (ret
== -TARGET_ERESTARTSYS
) {
313 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
314 env
->regs
[R_EAX
] = ret
;
319 /* linux syscall from syscall instruction */
320 ret
= do_syscall(env
,
329 if (ret
== -TARGET_ERESTARTSYS
) {
331 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
332 env
->regs
[R_EAX
] = ret
;
338 info
.si_signo
= TARGET_SIGBUS
;
340 info
.si_code
= TARGET_SI_KERNEL
;
341 info
._sifields
._sigfault
._addr
= 0;
342 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
345 /* XXX: potential problem if ABI32 */
346 #ifndef TARGET_X86_64
347 if (env
->eflags
& VM_MASK
) {
348 handle_vm86_fault(env
);
352 info
.si_signo
= TARGET_SIGSEGV
;
354 info
.si_code
= TARGET_SI_KERNEL
;
355 info
._sifields
._sigfault
._addr
= 0;
356 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
360 info
.si_signo
= TARGET_SIGSEGV
;
362 if (!(env
->error_code
& 1))
363 info
.si_code
= TARGET_SEGV_MAPERR
;
365 info
.si_code
= TARGET_SEGV_ACCERR
;
366 info
._sifields
._sigfault
._addr
= env
->cr
[2];
367 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
370 #ifndef TARGET_X86_64
371 if (env
->eflags
& VM_MASK
) {
372 handle_vm86_trap(env
, trapnr
);
376 /* division by zero */
377 info
.si_signo
= TARGET_SIGFPE
;
379 info
.si_code
= TARGET_FPE_INTDIV
;
380 info
._sifields
._sigfault
._addr
= env
->eip
;
381 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
386 #ifndef TARGET_X86_64
387 if (env
->eflags
& VM_MASK
) {
388 handle_vm86_trap(env
, trapnr
);
392 info
.si_signo
= TARGET_SIGTRAP
;
394 if (trapnr
== EXCP01_DB
) {
395 info
.si_code
= TARGET_TRAP_BRKPT
;
396 info
._sifields
._sigfault
._addr
= env
->eip
;
398 info
.si_code
= TARGET_SI_KERNEL
;
399 info
._sifields
._sigfault
._addr
= 0;
401 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
406 #ifndef TARGET_X86_64
407 if (env
->eflags
& VM_MASK
) {
408 handle_vm86_trap(env
, trapnr
);
412 info
.si_signo
= TARGET_SIGSEGV
;
414 info
.si_code
= TARGET_SI_KERNEL
;
415 info
._sifields
._sigfault
._addr
= 0;
416 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
420 info
.si_signo
= TARGET_SIGILL
;
422 info
.si_code
= TARGET_ILL_ILLOPN
;
423 info
._sifields
._sigfault
._addr
= env
->eip
;
424 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
427 /* just indicate that signals should be handled asap */
433 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
438 info
.si_code
= TARGET_TRAP_BRKPT
;
439 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
444 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
445 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
449 process_pending_signals(env
);
456 #define get_user_code_u32(x, gaddr, env) \
457 ({ abi_long __r = get_user_u32((x), (gaddr)); \
458 if (!__r && bswap_code(arm_sctlr_b(env))) { \
464 #define get_user_code_u16(x, gaddr, env) \
465 ({ abi_long __r = get_user_u16((x), (gaddr)); \
466 if (!__r && bswap_code(arm_sctlr_b(env))) { \
472 #define get_user_data_u32(x, gaddr, env) \
473 ({ abi_long __r = get_user_u32((x), (gaddr)); \
474 if (!__r && arm_cpu_bswap_data(env)) { \
480 #define get_user_data_u16(x, gaddr, env) \
481 ({ abi_long __r = get_user_u16((x), (gaddr)); \
482 if (!__r && arm_cpu_bswap_data(env)) { \
488 #define put_user_data_u32(x, gaddr, env) \
489 ({ typeof(x) __x = (x); \
490 if (arm_cpu_bswap_data(env)) { \
491 __x = bswap32(__x); \
493 put_user_u32(__x, (gaddr)); \
496 #define put_user_data_u16(x, gaddr, env) \
497 ({ typeof(x) __x = (x); \
498 if (arm_cpu_bswap_data(env)) { \
499 __x = bswap16(__x); \
501 put_user_u16(__x, (gaddr)); \
505 /* Commpage handling -- there is no commpage for AArch64 */
508 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
510 * r0 = pointer to oldval
511 * r1 = pointer to newval
512 * r2 = pointer to target value
515 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
516 * C set if *ptr was changed, clear if no exchange happened
518 * Note segv's in kernel helpers are a bit tricky, we can set the
519 * data address sensibly but the PC address is just the entry point.
521 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
523 uint64_t oldval
, newval
, val
;
525 target_siginfo_t info
;
527 /* Based on the 32 bit code in do_kernel_trap */
529 /* XXX: This only works between threads, not between processes.
530 It's probably possible to implement this with native host
531 operations. However things like ldrex/strex are much harder so
532 there's not much point trying. */
534 cpsr
= cpsr_read(env
);
537 if (get_user_u64(oldval
, env
->regs
[0])) {
538 env
->exception
.vaddress
= env
->regs
[0];
542 if (get_user_u64(newval
, env
->regs
[1])) {
543 env
->exception
.vaddress
= env
->regs
[1];
547 if (get_user_u64(val
, addr
)) {
548 env
->exception
.vaddress
= addr
;
555 if (put_user_u64(val
, addr
)) {
556 env
->exception
.vaddress
= addr
;
566 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
572 /* We get the PC of the entry address - which is as good as anything,
573 on a real kernel what you get depends on which mode it uses. */
574 info
.si_signo
= TARGET_SIGSEGV
;
576 /* XXX: check env->error_code */
577 info
.si_code
= TARGET_SEGV_MAPERR
;
578 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
579 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
582 /* Handle a jump to the kernel code page. */
584 do_kernel_trap(CPUARMState
*env
)
590 switch (env
->regs
[15]) {
591 case 0xffff0fa0: /* __kernel_memory_barrier */
592 /* ??? No-op. Will need to do better for SMP. */
594 case 0xffff0fc0: /* __kernel_cmpxchg */
595 /* XXX: This only works between threads, not between processes.
596 It's probably possible to implement this with native host
597 operations. However things like ldrex/strex are much harder so
598 there's not much point trying. */
600 cpsr
= cpsr_read(env
);
602 /* FIXME: This should SEGV if the access fails. */
603 if (get_user_u32(val
, addr
))
605 if (val
== env
->regs
[0]) {
607 /* FIXME: Check for segfaults. */
608 put_user_u32(val
, addr
);
615 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
618 case 0xffff0fe0: /* __kernel_get_tls */
619 env
->regs
[0] = cpu_get_tls(env
);
621 case 0xffff0f60: /* __kernel_cmpxchg64 */
622 arm_kernel_cmpxchg64_helper(env
);
628 /* Jump back to the caller. */
629 addr
= env
->regs
[14];
634 env
->regs
[15] = addr
;
639 /* Store exclusive handling for AArch32 */
640 static int do_strex(CPUARMState
*env
)
648 if (env
->exclusive_addr
!= env
->exclusive_test
) {
651 /* We know we're always AArch32 so the address is in uint32_t range
652 * unless it was the -1 exclusive-monitor-lost value (which won't
653 * match exclusive_test above).
655 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
656 addr
= env
->exclusive_addr
;
657 size
= env
->exclusive_info
& 0xf;
660 segv
= get_user_u8(val
, addr
);
663 segv
= get_user_data_u16(val
, addr
, env
);
667 segv
= get_user_data_u32(val
, addr
, env
);
673 env
->exception
.vaddress
= addr
;
678 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
680 env
->exception
.vaddress
= addr
+ 4;
683 if (arm_cpu_bswap_data(env
)) {
684 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
686 val
= deposit64(val
, 32, 32, valhi
);
689 if (val
!= env
->exclusive_val
) {
693 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
696 segv
= put_user_u8(val
, addr
);
699 segv
= put_user_data_u16(val
, addr
, env
);
703 segv
= put_user_data_u32(val
, addr
, env
);
707 env
->exception
.vaddress
= addr
;
711 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
712 segv
= put_user_data_u32(val
, addr
+ 4, env
);
714 env
->exception
.vaddress
= addr
+ 4;
721 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
727 void cpu_loop(CPUARMState
*env
)
729 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
731 unsigned int n
, insn
;
732 target_siginfo_t info
;
738 trapnr
= cpu_exec(cs
);
743 TaskState
*ts
= cs
->opaque
;
747 /* we handle the FPU emulation here, as Linux */
748 /* we get the opcode */
749 /* FIXME - what to do if get_user() fails? */
750 get_user_code_u32(opcode
, env
->regs
[15], env
);
752 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
753 if (rc
== 0) { /* illegal instruction */
754 info
.si_signo
= TARGET_SIGILL
;
756 info
.si_code
= TARGET_ILL_ILLOPN
;
757 info
._sifields
._sigfault
._addr
= env
->regs
[15];
758 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
759 } else if (rc
< 0) { /* FP exception */
762 /* translate softfloat flags to FPSR flags */
763 if (-rc
& float_flag_invalid
)
765 if (-rc
& float_flag_divbyzero
)
767 if (-rc
& float_flag_overflow
)
769 if (-rc
& float_flag_underflow
)
771 if (-rc
& float_flag_inexact
)
774 FPSR fpsr
= ts
->fpa
.fpsr
;
775 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
777 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
778 info
.si_signo
= TARGET_SIGFPE
;
781 /* ordered by priority, least first */
782 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
783 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
784 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
785 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
786 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
788 info
._sifields
._sigfault
._addr
= env
->regs
[15];
789 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
794 /* accumulate unenabled exceptions */
795 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
797 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
799 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
801 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
803 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
806 } else { /* everything OK */
817 if (trapnr
== EXCP_BKPT
) {
819 /* FIXME - what to do if get_user() fails? */
820 get_user_code_u16(insn
, env
->regs
[15], env
);
824 /* FIXME - what to do if get_user() fails? */
825 get_user_code_u32(insn
, env
->regs
[15], env
);
826 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
831 /* FIXME - what to do if get_user() fails? */
832 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
835 /* FIXME - what to do if get_user() fails? */
836 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
841 if (n
== ARM_NR_cacheflush
) {
843 } else if (n
== ARM_NR_semihosting
844 || n
== ARM_NR_thumb_semihosting
) {
845 env
->regs
[0] = do_arm_semihosting (env
);
846 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
848 if (env
->thumb
|| n
== 0) {
851 n
-= ARM_SYSCALL_BASE
;
854 if ( n
> ARM_NR_BASE
) {
856 case ARM_NR_cacheflush
:
860 cpu_set_tls(env
, env
->regs
[0]);
863 case ARM_NR_breakpoint
:
864 env
->regs
[15] -= env
->thumb
? 2 : 4;
867 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
869 env
->regs
[0] = -TARGET_ENOSYS
;
873 ret
= do_syscall(env
,
882 if (ret
== -TARGET_ERESTARTSYS
) {
883 env
->regs
[15] -= env
->thumb
? 2 : 4;
884 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
894 /* just indicate that signals should be handled asap */
897 if (!do_strex(env
)) {
900 /* fall through for segv */
901 case EXCP_PREFETCH_ABORT
:
902 case EXCP_DATA_ABORT
:
903 addr
= env
->exception
.vaddress
;
905 info
.si_signo
= TARGET_SIGSEGV
;
907 /* XXX: check env->error_code */
908 info
.si_code
= TARGET_SEGV_MAPERR
;
909 info
._sifields
._sigfault
._addr
= addr
;
910 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
918 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
923 info
.si_code
= TARGET_TRAP_BRKPT
;
924 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
928 case EXCP_KERNEL_TRAP
:
929 if (do_kernel_trap(env
))
933 /* nothing to do here for user-mode, just resume guest code */
937 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
940 process_pending_signals(env
);
947 * Handle AArch64 store-release exclusive
949 * rs = gets the status result of store exclusive
950 * rt = is the register that is stored
951 * rt2 = is the second register store (in STP)
954 static int do_strex_a64(CPUARMState
*env
)
965 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
966 size
= extract32(env
->exclusive_info
, 0, 2);
967 is_pair
= extract32(env
->exclusive_info
, 2, 1);
968 rs
= extract32(env
->exclusive_info
, 4, 5);
969 rt
= extract32(env
->exclusive_info
, 9, 5);
970 rt2
= extract32(env
->exclusive_info
, 14, 5);
972 addr
= env
->exclusive_addr
;
974 if (addr
!= env
->exclusive_test
) {
980 segv
= get_user_u8(val
, addr
);
983 segv
= get_user_u16(val
, addr
);
986 segv
= get_user_u32(val
, addr
);
989 segv
= get_user_u64(val
, addr
);
995 env
->exception
.vaddress
= addr
;
998 if (val
!= env
->exclusive_val
) {
1003 segv
= get_user_u32(val
, addr
+ 4);
1005 segv
= get_user_u64(val
, addr
+ 8);
1008 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1011 if (val
!= env
->exclusive_high
) {
1015 /* handle the zero register */
1016 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
1019 segv
= put_user_u8(val
, addr
);
1022 segv
= put_user_u16(val
, addr
);
1025 segv
= put_user_u32(val
, addr
);
1028 segv
= put_user_u64(val
, addr
);
1035 /* handle the zero register */
1036 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1038 segv
= put_user_u32(val
, addr
+ 4);
1040 segv
= put_user_u64(val
, addr
+ 8);
1043 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1050 /* rs == 31 encodes a write to the ZR, thus throwing away
1051 * the status return. This is rather silly but valid.
1054 env
->xregs
[rs
] = rc
;
1057 /* instruction faulted, PC does not advance */
1058 /* either way a strex releases any exclusive lock we have */
1059 env
->exclusive_addr
= -1;
1064 /* AArch64 main loop */
1065 void cpu_loop(CPUARMState
*env
)
1067 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1070 target_siginfo_t info
;
1074 trapnr
= cpu_exec(cs
);
1079 ret
= do_syscall(env
,
1088 if (ret
== -TARGET_ERESTARTSYS
) {
1090 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1091 env
->xregs
[0] = ret
;
1094 case EXCP_INTERRUPT
:
1095 /* just indicate that signals should be handled asap */
1098 info
.si_signo
= TARGET_SIGILL
;
1100 info
.si_code
= TARGET_ILL_ILLOPN
;
1101 info
._sifields
._sigfault
._addr
= env
->pc
;
1102 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1105 if (!do_strex_a64(env
)) {
1108 /* fall through for segv */
1109 case EXCP_PREFETCH_ABORT
:
1110 case EXCP_DATA_ABORT
:
1111 info
.si_signo
= TARGET_SIGSEGV
;
1113 /* XXX: check env->error_code */
1114 info
.si_code
= TARGET_SEGV_MAPERR
;
1115 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1116 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1120 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1122 info
.si_signo
= sig
;
1124 info
.si_code
= TARGET_TRAP_BRKPT
;
1125 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1129 env
->xregs
[0] = do_arm_semihosting(env
);
1132 /* nothing to do here for user-mode, just resume guest code */
1135 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1138 process_pending_signals(env
);
1139 /* Exception return on AArch64 always clears the exclusive monitor,
1140 * so any return to running guest code implies this.
1141 * A strex (successful or otherwise) also clears the monitor, so
1142 * we don't need to specialcase EXCP_STREX.
1144 env
->exclusive_addr
= -1;
1147 #endif /* ndef TARGET_ABI32 */
1151 #ifdef TARGET_UNICORE32
1153 void cpu_loop(CPUUniCore32State
*env
)
1155 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1157 unsigned int n
, insn
;
1158 target_siginfo_t info
;
1162 trapnr
= cpu_exec(cs
);
1165 case UC32_EXCP_PRIV
:
1168 get_user_u32(insn
, env
->regs
[31] - 4);
1169 n
= insn
& 0xffffff;
1171 if (n
>= UC32_SYSCALL_BASE
) {
1173 n
-= UC32_SYSCALL_BASE
;
1174 if (n
== UC32_SYSCALL_NR_set_tls
) {
1175 cpu_set_tls(env
, env
->regs
[0]);
1178 abi_long ret
= do_syscall(env
,
1187 if (ret
== -TARGET_ERESTARTSYS
) {
1189 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1198 case UC32_EXCP_DTRAP
:
1199 case UC32_EXCP_ITRAP
:
1200 info
.si_signo
= TARGET_SIGSEGV
;
1202 /* XXX: check env->error_code */
1203 info
.si_code
= TARGET_SEGV_MAPERR
;
1204 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1205 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1207 case EXCP_INTERRUPT
:
1208 /* just indicate that signals should be handled asap */
1214 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1216 info
.si_signo
= sig
;
1218 info
.si_code
= TARGET_TRAP_BRKPT
;
1219 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1226 process_pending_signals(env
);
1230 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1236 #define SPARC64_STACK_BIAS 2047
1240 /* WARNING: dealing with register windows _is_ complicated. More info
1241 can be found at http://www.sics.se/~psm/sparcstack.html */
1242 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1244 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1245 /* wrap handling : if cwp is on the last window, then we use the
1246 registers 'after' the end */
1247 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1248 index
+= 16 * env
->nwindows
;
1252 /* save the register window 'cwp1' */
1253 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1258 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1259 #ifdef TARGET_SPARC64
1261 sp_ptr
+= SPARC64_STACK_BIAS
;
1263 #if defined(DEBUG_WIN)
1264 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1267 for(i
= 0; i
< 16; i
++) {
1268 /* FIXME - what to do if put_user() fails? */
1269 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1270 sp_ptr
+= sizeof(abi_ulong
);
1274 static void save_window(CPUSPARCState
*env
)
1276 #ifndef TARGET_SPARC64
1277 unsigned int new_wim
;
1278 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1279 ((1LL << env
->nwindows
) - 1);
1280 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1283 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1289 static void restore_window(CPUSPARCState
*env
)
1291 #ifndef TARGET_SPARC64
1292 unsigned int new_wim
;
1294 unsigned int i
, cwp1
;
1297 #ifndef TARGET_SPARC64
1298 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1299 ((1LL << env
->nwindows
) - 1);
1302 /* restore the invalid window */
1303 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1304 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1305 #ifdef TARGET_SPARC64
1307 sp_ptr
+= SPARC64_STACK_BIAS
;
1309 #if defined(DEBUG_WIN)
1310 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1313 for(i
= 0; i
< 16; i
++) {
1314 /* FIXME - what to do if get_user() fails? */
1315 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1316 sp_ptr
+= sizeof(abi_ulong
);
1318 #ifdef TARGET_SPARC64
1320 if (env
->cleanwin
< env
->nwindows
- 1)
1328 static void flush_windows(CPUSPARCState
*env
)
1334 /* if restore would invoke restore_window(), then we can stop */
1335 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1336 #ifndef TARGET_SPARC64
1337 if (env
->wim
& (1 << cwp1
))
1340 if (env
->canrestore
== 0)
1345 save_window_offset(env
, cwp1
);
1348 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1349 #ifndef TARGET_SPARC64
1350 /* set wim so that restore will reload the registers */
1351 env
->wim
= 1 << cwp1
;
1353 #if defined(DEBUG_WIN)
1354 printf("flush_windows: nb=%d\n", offset
- 1);
1358 void cpu_loop (CPUSPARCState
*env
)
1360 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1363 target_siginfo_t info
;
1367 trapnr
= cpu_exec(cs
);
1370 /* Compute PSR before exposing state. */
1371 if (env
->cc_op
!= CC_OP_FLAGS
) {
1376 #ifndef TARGET_SPARC64
1383 ret
= do_syscall (env
, env
->gregs
[1],
1384 env
->regwptr
[0], env
->regwptr
[1],
1385 env
->regwptr
[2], env
->regwptr
[3],
1386 env
->regwptr
[4], env
->regwptr
[5],
1388 if (ret
== -TARGET_ERESTARTSYS
|| ret
== -TARGET_QEMU_ESIGRETURN
) {
1391 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1392 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1393 env
->xcc
|= PSR_CARRY
;
1395 env
->psr
|= PSR_CARRY
;
1399 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1400 env
->xcc
&= ~PSR_CARRY
;
1402 env
->psr
&= ~PSR_CARRY
;
1405 env
->regwptr
[0] = ret
;
1406 /* next instruction */
1408 env
->npc
= env
->npc
+ 4;
1410 case 0x83: /* flush windows */
1415 /* next instruction */
1417 env
->npc
= env
->npc
+ 4;
1419 #ifndef TARGET_SPARC64
1420 case TT_WIN_OVF
: /* window overflow */
1423 case TT_WIN_UNF
: /* window underflow */
1424 restore_window(env
);
1429 info
.si_signo
= TARGET_SIGSEGV
;
1431 /* XXX: check env->error_code */
1432 info
.si_code
= TARGET_SEGV_MAPERR
;
1433 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1434 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1438 case TT_SPILL
: /* window overflow */
1441 case TT_FILL
: /* window underflow */
1442 restore_window(env
);
1447 info
.si_signo
= TARGET_SIGSEGV
;
1449 /* XXX: check env->error_code */
1450 info
.si_code
= TARGET_SEGV_MAPERR
;
1451 if (trapnr
== TT_DFAULT
)
1452 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1454 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1455 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1458 #ifndef TARGET_ABI32
1461 sparc64_get_context(env
);
1465 sparc64_set_context(env
);
1469 case EXCP_INTERRUPT
:
1470 /* just indicate that signals should be handled asap */
1474 info
.si_signo
= TARGET_SIGILL
;
1476 info
.si_code
= TARGET_ILL_ILLOPC
;
1477 info
._sifields
._sigfault
._addr
= env
->pc
;
1478 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1485 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1488 info
.si_signo
= sig
;
1490 info
.si_code
= TARGET_TRAP_BRKPT
;
1491 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1496 printf ("Unhandled trap: 0x%x\n", trapnr
);
1497 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1500 process_pending_signals (env
);
1507 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1509 return cpu_get_host_ticks();
1512 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1514 return cpu_ppc_get_tb(env
);
1517 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1519 return cpu_ppc_get_tb(env
) >> 32;
1522 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1524 return cpu_ppc_get_tb(env
);
1527 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1529 return cpu_ppc_get_tb(env
) >> 32;
1532 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1533 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1535 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1537 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1540 /* XXX: to be fixed */
1541 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1546 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1551 static int do_store_exclusive(CPUPPCState
*env
)
1554 target_ulong page_addr
;
1555 target_ulong val
, val2
__attribute__((unused
)) = 0;
1559 addr
= env
->reserve_ea
;
1560 page_addr
= addr
& TARGET_PAGE_MASK
;
1563 flags
= page_get_flags(page_addr
);
1564 if ((flags
& PAGE_READ
) == 0) {
1567 int reg
= env
->reserve_info
& 0x1f;
1568 int size
= env
->reserve_info
>> 5;
1571 if (addr
== env
->reserve_addr
) {
1573 case 1: segv
= get_user_u8(val
, addr
); break;
1574 case 2: segv
= get_user_u16(val
, addr
); break;
1575 case 4: segv
= get_user_u32(val
, addr
); break;
1576 #if defined(TARGET_PPC64)
1577 case 8: segv
= get_user_u64(val
, addr
); break;
1579 segv
= get_user_u64(val
, addr
);
1581 segv
= get_user_u64(val2
, addr
+ 8);
1588 if (!segv
&& val
== env
->reserve_val
) {
1589 val
= env
->gpr
[reg
];
1591 case 1: segv
= put_user_u8(val
, addr
); break;
1592 case 2: segv
= put_user_u16(val
, addr
); break;
1593 case 4: segv
= put_user_u32(val
, addr
); break;
1594 #if defined(TARGET_PPC64)
1595 case 8: segv
= put_user_u64(val
, addr
); break;
1597 if (val2
== env
->reserve_val2
) {
1600 val
= env
->gpr
[reg
+1];
1602 val2
= env
->gpr
[reg
+1];
1604 segv
= put_user_u64(val
, addr
);
1606 segv
= put_user_u64(val2
, addr
+ 8);
1619 env
->crf
[0] = (stored
<< 1) | xer_so
;
1620 env
->reserve_addr
= (target_ulong
)-1;
1630 void cpu_loop(CPUPPCState
*env
)
1632 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1633 target_siginfo_t info
;
1639 trapnr
= cpu_exec(cs
);
1642 case POWERPC_EXCP_NONE
:
1645 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1646 cpu_abort(cs
, "Critical interrupt while in user mode. "
1649 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1650 cpu_abort(cs
, "Machine check exception while in user mode. "
1653 case POWERPC_EXCP_DSI
: /* Data storage exception */
1654 /* XXX: check this. Seems bugged */
1655 switch (env
->error_code
& 0xFF000000) {
1658 info
.si_signo
= TARGET_SIGSEGV
;
1660 info
.si_code
= TARGET_SEGV_MAPERR
;
1663 info
.si_signo
= TARGET_SIGILL
;
1665 info
.si_code
= TARGET_ILL_ILLADR
;
1668 info
.si_signo
= TARGET_SIGSEGV
;
1670 info
.si_code
= TARGET_SEGV_ACCERR
;
1673 /* Let's send a regular segfault... */
1674 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1676 info
.si_signo
= TARGET_SIGSEGV
;
1678 info
.si_code
= TARGET_SEGV_MAPERR
;
1681 info
._sifields
._sigfault
._addr
= env
->nip
;
1682 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1684 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1685 /* XXX: check this */
1686 switch (env
->error_code
& 0xFF000000) {
1688 info
.si_signo
= TARGET_SIGSEGV
;
1690 info
.si_code
= TARGET_SEGV_MAPERR
;
1694 info
.si_signo
= TARGET_SIGSEGV
;
1696 info
.si_code
= TARGET_SEGV_ACCERR
;
1699 /* Let's send a regular segfault... */
1700 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1702 info
.si_signo
= TARGET_SIGSEGV
;
1704 info
.si_code
= TARGET_SEGV_MAPERR
;
1707 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1708 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1710 case POWERPC_EXCP_EXTERNAL
: /* External input */
1711 cpu_abort(cs
, "External interrupt while in user mode. "
1714 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1715 /* XXX: check this */
1716 info
.si_signo
= TARGET_SIGBUS
;
1718 info
.si_code
= TARGET_BUS_ADRALN
;
1719 info
._sifields
._sigfault
._addr
= env
->nip
;
1720 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1722 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1723 case POWERPC_EXCP_HV_EMU
: /* HV emulation */
1724 /* XXX: check this */
1725 switch (env
->error_code
& ~0xF) {
1726 case POWERPC_EXCP_FP
:
1727 info
.si_signo
= TARGET_SIGFPE
;
1729 switch (env
->error_code
& 0xF) {
1730 case POWERPC_EXCP_FP_OX
:
1731 info
.si_code
= TARGET_FPE_FLTOVF
;
1733 case POWERPC_EXCP_FP_UX
:
1734 info
.si_code
= TARGET_FPE_FLTUND
;
1736 case POWERPC_EXCP_FP_ZX
:
1737 case POWERPC_EXCP_FP_VXZDZ
:
1738 info
.si_code
= TARGET_FPE_FLTDIV
;
1740 case POWERPC_EXCP_FP_XX
:
1741 info
.si_code
= TARGET_FPE_FLTRES
;
1743 case POWERPC_EXCP_FP_VXSOFT
:
1744 info
.si_code
= TARGET_FPE_FLTINV
;
1746 case POWERPC_EXCP_FP_VXSNAN
:
1747 case POWERPC_EXCP_FP_VXISI
:
1748 case POWERPC_EXCP_FP_VXIDI
:
1749 case POWERPC_EXCP_FP_VXIMZ
:
1750 case POWERPC_EXCP_FP_VXVC
:
1751 case POWERPC_EXCP_FP_VXSQRT
:
1752 case POWERPC_EXCP_FP_VXCVI
:
1753 info
.si_code
= TARGET_FPE_FLTSUB
;
1756 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1761 case POWERPC_EXCP_INVAL
:
1762 info
.si_signo
= TARGET_SIGILL
;
1764 switch (env
->error_code
& 0xF) {
1765 case POWERPC_EXCP_INVAL_INVAL
:
1766 info
.si_code
= TARGET_ILL_ILLOPC
;
1768 case POWERPC_EXCP_INVAL_LSWX
:
1769 info
.si_code
= TARGET_ILL_ILLOPN
;
1771 case POWERPC_EXCP_INVAL_SPR
:
1772 info
.si_code
= TARGET_ILL_PRVREG
;
1774 case POWERPC_EXCP_INVAL_FP
:
1775 info
.si_code
= TARGET_ILL_COPROC
;
1778 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1779 env
->error_code
& 0xF);
1780 info
.si_code
= TARGET_ILL_ILLADR
;
1784 case POWERPC_EXCP_PRIV
:
1785 info
.si_signo
= TARGET_SIGILL
;
1787 switch (env
->error_code
& 0xF) {
1788 case POWERPC_EXCP_PRIV_OPC
:
1789 info
.si_code
= TARGET_ILL_PRVOPC
;
1791 case POWERPC_EXCP_PRIV_REG
:
1792 info
.si_code
= TARGET_ILL_PRVREG
;
1795 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1796 env
->error_code
& 0xF);
1797 info
.si_code
= TARGET_ILL_PRVOPC
;
1801 case POWERPC_EXCP_TRAP
:
1802 cpu_abort(cs
, "Tried to call a TRAP\n");
1805 /* Should not happen ! */
1806 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1810 info
._sifields
._sigfault
._addr
= env
->nip
;
1811 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1813 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1814 info
.si_signo
= TARGET_SIGILL
;
1816 info
.si_code
= TARGET_ILL_COPROC
;
1817 info
._sifields
._sigfault
._addr
= env
->nip
;
1818 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1820 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1821 cpu_abort(cs
, "Syscall exception while in user mode. "
1824 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1825 info
.si_signo
= TARGET_SIGILL
;
1827 info
.si_code
= TARGET_ILL_COPROC
;
1828 info
._sifields
._sigfault
._addr
= env
->nip
;
1829 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1831 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1832 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1835 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1836 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1839 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1840 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1843 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1844 cpu_abort(cs
, "Data TLB exception while in user mode. "
1847 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1848 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1851 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1852 info
.si_signo
= TARGET_SIGILL
;
1854 info
.si_code
= TARGET_ILL_COPROC
;
1855 info
._sifields
._sigfault
._addr
= env
->nip
;
1856 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1858 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1859 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1861 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1862 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1864 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1865 cpu_abort(cs
, "Performance monitor exception not handled\n");
1867 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1868 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1871 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1872 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1875 case POWERPC_EXCP_RESET
: /* System reset exception */
1876 cpu_abort(cs
, "Reset interrupt while in user mode. "
1879 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1880 cpu_abort(cs
, "Data segment exception while in user mode. "
1883 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1884 cpu_abort(cs
, "Instruction segment exception "
1885 "while in user mode. Aborting\n");
1887 /* PowerPC 64 with hypervisor mode support */
1888 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1889 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1890 "while in user mode. Aborting\n");
1892 case POWERPC_EXCP_TRACE
: /* Trace exception */
1894 * we use this exception to emulate step-by-step execution mode.
1897 /* PowerPC 64 with hypervisor mode support */
1898 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1899 cpu_abort(cs
, "Hypervisor data storage exception "
1900 "while in user mode. Aborting\n");
1902 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1903 cpu_abort(cs
, "Hypervisor instruction storage exception "
1904 "while in user mode. Aborting\n");
1906 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1907 cpu_abort(cs
, "Hypervisor data segment exception "
1908 "while in user mode. Aborting\n");
1910 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1911 cpu_abort(cs
, "Hypervisor instruction segment exception "
1912 "while in user mode. Aborting\n");
1914 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1915 info
.si_signo
= TARGET_SIGILL
;
1917 info
.si_code
= TARGET_ILL_COPROC
;
1918 info
._sifields
._sigfault
._addr
= env
->nip
;
1919 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1921 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1922 cpu_abort(cs
, "Programmable interval timer interrupt "
1923 "while in user mode. Aborting\n");
1925 case POWERPC_EXCP_IO
: /* IO error exception */
1926 cpu_abort(cs
, "IO error exception while in user mode. "
1929 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1930 cpu_abort(cs
, "Run mode exception while in user mode. "
1933 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1934 cpu_abort(cs
, "Emulation trap exception not handled\n");
1936 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1937 cpu_abort(cs
, "Instruction fetch TLB exception "
1938 "while in user-mode. Aborting");
1940 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1941 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1944 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1945 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1948 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1949 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1951 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1952 cpu_abort(cs
, "Instruction address breakpoint exception "
1955 case POWERPC_EXCP_SMI
: /* System management interrupt */
1956 cpu_abort(cs
, "System management interrupt while in user mode. "
1959 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1960 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1963 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1964 cpu_abort(cs
, "Performance monitor exception not handled\n");
1966 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1967 cpu_abort(cs
, "Vector assist exception not handled\n");
1969 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1970 cpu_abort(cs
, "Soft patch exception not handled\n");
1972 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1973 cpu_abort(cs
, "Maintenance exception while in user mode. "
1976 case POWERPC_EXCP_STOP
: /* stop translation */
1977 /* We did invalidate the instruction cache. Go on */
1979 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1980 /* We just stopped because of a branch. Go on */
1982 case POWERPC_EXCP_SYSCALL_USER
:
1983 /* system call in user-mode emulation */
1985 * PPC ABI uses overflow flag in cr0 to signal an error
1988 env
->crf
[0] &= ~0x1;
1989 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1990 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1992 if (ret
== -TARGET_ERESTARTSYS
) {
1995 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1996 /* Returning from a successful sigreturn syscall.
1997 Avoid corrupting register state. */
2001 if (ret
> (target_ulong
)(-515)) {
2007 case POWERPC_EXCP_STCX
:
2008 if (do_store_exclusive(env
)) {
2009 info
.si_signo
= TARGET_SIGSEGV
;
2011 info
.si_code
= TARGET_SEGV_MAPERR
;
2012 info
._sifields
._sigfault
._addr
= env
->nip
;
2013 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2020 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2022 info
.si_signo
= sig
;
2024 info
.si_code
= TARGET_TRAP_BRKPT
;
2025 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2029 case EXCP_INTERRUPT
:
2030 /* just indicate that signals should be handled asap */
2033 cpu_abort(cs
, "Unknown exception 0x%x. Aborting\n", trapnr
);
2036 process_pending_signals(env
);
2043 # ifdef TARGET_ABI_MIPSO32
2044 # define MIPS_SYS(name, args) args,
2045 static const uint8_t mips_syscall_args
[] = {
2046 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2047 MIPS_SYS(sys_exit
, 1)
2048 MIPS_SYS(sys_fork
, 0)
2049 MIPS_SYS(sys_read
, 3)
2050 MIPS_SYS(sys_write
, 3)
2051 MIPS_SYS(sys_open
, 3) /* 4005 */
2052 MIPS_SYS(sys_close
, 1)
2053 MIPS_SYS(sys_waitpid
, 3)
2054 MIPS_SYS(sys_creat
, 2)
2055 MIPS_SYS(sys_link
, 2)
2056 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2057 MIPS_SYS(sys_execve
, 0)
2058 MIPS_SYS(sys_chdir
, 1)
2059 MIPS_SYS(sys_time
, 1)
2060 MIPS_SYS(sys_mknod
, 3)
2061 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2062 MIPS_SYS(sys_lchown
, 3)
2063 MIPS_SYS(sys_ni_syscall
, 0)
2064 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2065 MIPS_SYS(sys_lseek
, 3)
2066 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2067 MIPS_SYS(sys_mount
, 5)
2068 MIPS_SYS(sys_umount
, 1)
2069 MIPS_SYS(sys_setuid
, 1)
2070 MIPS_SYS(sys_getuid
, 0)
2071 MIPS_SYS(sys_stime
, 1) /* 4025 */
2072 MIPS_SYS(sys_ptrace
, 4)
2073 MIPS_SYS(sys_alarm
, 1)
2074 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2075 MIPS_SYS(sys_pause
, 0)
2076 MIPS_SYS(sys_utime
, 2) /* 4030 */
2077 MIPS_SYS(sys_ni_syscall
, 0)
2078 MIPS_SYS(sys_ni_syscall
, 0)
2079 MIPS_SYS(sys_access
, 2)
2080 MIPS_SYS(sys_nice
, 1)
2081 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2082 MIPS_SYS(sys_sync
, 0)
2083 MIPS_SYS(sys_kill
, 2)
2084 MIPS_SYS(sys_rename
, 2)
2085 MIPS_SYS(sys_mkdir
, 2)
2086 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2087 MIPS_SYS(sys_dup
, 1)
2088 MIPS_SYS(sys_pipe
, 0)
2089 MIPS_SYS(sys_times
, 1)
2090 MIPS_SYS(sys_ni_syscall
, 0)
2091 MIPS_SYS(sys_brk
, 1) /* 4045 */
2092 MIPS_SYS(sys_setgid
, 1)
2093 MIPS_SYS(sys_getgid
, 0)
2094 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2095 MIPS_SYS(sys_geteuid
, 0)
2096 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2097 MIPS_SYS(sys_acct
, 0)
2098 MIPS_SYS(sys_umount2
, 2)
2099 MIPS_SYS(sys_ni_syscall
, 0)
2100 MIPS_SYS(sys_ioctl
, 3)
2101 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2102 MIPS_SYS(sys_ni_syscall
, 2)
2103 MIPS_SYS(sys_setpgid
, 2)
2104 MIPS_SYS(sys_ni_syscall
, 0)
2105 MIPS_SYS(sys_olduname
, 1)
2106 MIPS_SYS(sys_umask
, 1) /* 4060 */
2107 MIPS_SYS(sys_chroot
, 1)
2108 MIPS_SYS(sys_ustat
, 2)
2109 MIPS_SYS(sys_dup2
, 2)
2110 MIPS_SYS(sys_getppid
, 0)
2111 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2112 MIPS_SYS(sys_setsid
, 0)
2113 MIPS_SYS(sys_sigaction
, 3)
2114 MIPS_SYS(sys_sgetmask
, 0)
2115 MIPS_SYS(sys_ssetmask
, 1)
2116 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2117 MIPS_SYS(sys_setregid
, 2)
2118 MIPS_SYS(sys_sigsuspend
, 0)
2119 MIPS_SYS(sys_sigpending
, 1)
2120 MIPS_SYS(sys_sethostname
, 2)
2121 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2122 MIPS_SYS(sys_getrlimit
, 2)
2123 MIPS_SYS(sys_getrusage
, 2)
2124 MIPS_SYS(sys_gettimeofday
, 2)
2125 MIPS_SYS(sys_settimeofday
, 2)
2126 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2127 MIPS_SYS(sys_setgroups
, 2)
2128 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2129 MIPS_SYS(sys_symlink
, 2)
2130 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2131 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2132 MIPS_SYS(sys_uselib
, 1)
2133 MIPS_SYS(sys_swapon
, 2)
2134 MIPS_SYS(sys_reboot
, 3)
2135 MIPS_SYS(old_readdir
, 3)
2136 MIPS_SYS(old_mmap
, 6) /* 4090 */
2137 MIPS_SYS(sys_munmap
, 2)
2138 MIPS_SYS(sys_truncate
, 2)
2139 MIPS_SYS(sys_ftruncate
, 2)
2140 MIPS_SYS(sys_fchmod
, 2)
2141 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2142 MIPS_SYS(sys_getpriority
, 2)
2143 MIPS_SYS(sys_setpriority
, 3)
2144 MIPS_SYS(sys_ni_syscall
, 0)
2145 MIPS_SYS(sys_statfs
, 2)
2146 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2147 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2148 MIPS_SYS(sys_socketcall
, 2)
2149 MIPS_SYS(sys_syslog
, 3)
2150 MIPS_SYS(sys_setitimer
, 3)
2151 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2152 MIPS_SYS(sys_newstat
, 2)
2153 MIPS_SYS(sys_newlstat
, 2)
2154 MIPS_SYS(sys_newfstat
, 2)
2155 MIPS_SYS(sys_uname
, 1)
2156 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2157 MIPS_SYS(sys_vhangup
, 0)
2158 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2159 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2160 MIPS_SYS(sys_wait4
, 4)
2161 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2162 MIPS_SYS(sys_sysinfo
, 1)
2163 MIPS_SYS(sys_ipc
, 6)
2164 MIPS_SYS(sys_fsync
, 1)
2165 MIPS_SYS(sys_sigreturn
, 0)
2166 MIPS_SYS(sys_clone
, 6) /* 4120 */
2167 MIPS_SYS(sys_setdomainname
, 2)
2168 MIPS_SYS(sys_newuname
, 1)
2169 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2170 MIPS_SYS(sys_adjtimex
, 1)
2171 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2172 MIPS_SYS(sys_sigprocmask
, 3)
2173 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2174 MIPS_SYS(sys_init_module
, 5)
2175 MIPS_SYS(sys_delete_module
, 1)
2176 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2177 MIPS_SYS(sys_quotactl
, 0)
2178 MIPS_SYS(sys_getpgid
, 1)
2179 MIPS_SYS(sys_fchdir
, 1)
2180 MIPS_SYS(sys_bdflush
, 2)
2181 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2182 MIPS_SYS(sys_personality
, 1)
2183 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2184 MIPS_SYS(sys_setfsuid
, 1)
2185 MIPS_SYS(sys_setfsgid
, 1)
2186 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2187 MIPS_SYS(sys_getdents
, 3)
2188 MIPS_SYS(sys_select
, 5)
2189 MIPS_SYS(sys_flock
, 2)
2190 MIPS_SYS(sys_msync
, 3)
2191 MIPS_SYS(sys_readv
, 3) /* 4145 */
2192 MIPS_SYS(sys_writev
, 3)
2193 MIPS_SYS(sys_cacheflush
, 3)
2194 MIPS_SYS(sys_cachectl
, 3)
2195 MIPS_SYS(sys_sysmips
, 4)
2196 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2197 MIPS_SYS(sys_getsid
, 1)
2198 MIPS_SYS(sys_fdatasync
, 0)
2199 MIPS_SYS(sys_sysctl
, 1)
2200 MIPS_SYS(sys_mlock
, 2)
2201 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2202 MIPS_SYS(sys_mlockall
, 1)
2203 MIPS_SYS(sys_munlockall
, 0)
2204 MIPS_SYS(sys_sched_setparam
, 2)
2205 MIPS_SYS(sys_sched_getparam
, 2)
2206 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2207 MIPS_SYS(sys_sched_getscheduler
, 1)
2208 MIPS_SYS(sys_sched_yield
, 0)
2209 MIPS_SYS(sys_sched_get_priority_max
, 1)
2210 MIPS_SYS(sys_sched_get_priority_min
, 1)
2211 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2212 MIPS_SYS(sys_nanosleep
, 2)
2213 MIPS_SYS(sys_mremap
, 5)
2214 MIPS_SYS(sys_accept
, 3)
2215 MIPS_SYS(sys_bind
, 3)
2216 MIPS_SYS(sys_connect
, 3) /* 4170 */
2217 MIPS_SYS(sys_getpeername
, 3)
2218 MIPS_SYS(sys_getsockname
, 3)
2219 MIPS_SYS(sys_getsockopt
, 5)
2220 MIPS_SYS(sys_listen
, 2)
2221 MIPS_SYS(sys_recv
, 4) /* 4175 */
2222 MIPS_SYS(sys_recvfrom
, 6)
2223 MIPS_SYS(sys_recvmsg
, 3)
2224 MIPS_SYS(sys_send
, 4)
2225 MIPS_SYS(sys_sendmsg
, 3)
2226 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2227 MIPS_SYS(sys_setsockopt
, 5)
2228 MIPS_SYS(sys_shutdown
, 2)
2229 MIPS_SYS(sys_socket
, 3)
2230 MIPS_SYS(sys_socketpair
, 4)
2231 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2232 MIPS_SYS(sys_getresuid
, 3)
2233 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2234 MIPS_SYS(sys_poll
, 3)
2235 MIPS_SYS(sys_nfsservctl
, 3)
2236 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2237 MIPS_SYS(sys_getresgid
, 3)
2238 MIPS_SYS(sys_prctl
, 5)
2239 MIPS_SYS(sys_rt_sigreturn
, 0)
2240 MIPS_SYS(sys_rt_sigaction
, 4)
2241 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2242 MIPS_SYS(sys_rt_sigpending
, 2)
2243 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2244 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2245 MIPS_SYS(sys_rt_sigsuspend
, 0)
2246 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2247 MIPS_SYS(sys_pwrite64
, 6)
2248 MIPS_SYS(sys_chown
, 3)
2249 MIPS_SYS(sys_getcwd
, 2)
2250 MIPS_SYS(sys_capget
, 2)
2251 MIPS_SYS(sys_capset
, 2) /* 4205 */
2252 MIPS_SYS(sys_sigaltstack
, 2)
2253 MIPS_SYS(sys_sendfile
, 4)
2254 MIPS_SYS(sys_ni_syscall
, 0)
2255 MIPS_SYS(sys_ni_syscall
, 0)
2256 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2257 MIPS_SYS(sys_truncate64
, 4)
2258 MIPS_SYS(sys_ftruncate64
, 4)
2259 MIPS_SYS(sys_stat64
, 2)
2260 MIPS_SYS(sys_lstat64
, 2)
2261 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2262 MIPS_SYS(sys_pivot_root
, 2)
2263 MIPS_SYS(sys_mincore
, 3)
2264 MIPS_SYS(sys_madvise
, 3)
2265 MIPS_SYS(sys_getdents64
, 3)
2266 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2267 MIPS_SYS(sys_ni_syscall
, 0)
2268 MIPS_SYS(sys_gettid
, 0)
2269 MIPS_SYS(sys_readahead
, 5)
2270 MIPS_SYS(sys_setxattr
, 5)
2271 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2272 MIPS_SYS(sys_fsetxattr
, 5)
2273 MIPS_SYS(sys_getxattr
, 4)
2274 MIPS_SYS(sys_lgetxattr
, 4)
2275 MIPS_SYS(sys_fgetxattr
, 4)
2276 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2277 MIPS_SYS(sys_llistxattr
, 3)
2278 MIPS_SYS(sys_flistxattr
, 3)
2279 MIPS_SYS(sys_removexattr
, 2)
2280 MIPS_SYS(sys_lremovexattr
, 2)
2281 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2282 MIPS_SYS(sys_tkill
, 2)
2283 MIPS_SYS(sys_sendfile64
, 5)
2284 MIPS_SYS(sys_futex
, 6)
2285 MIPS_SYS(sys_sched_setaffinity
, 3)
2286 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2287 MIPS_SYS(sys_io_setup
, 2)
2288 MIPS_SYS(sys_io_destroy
, 1)
2289 MIPS_SYS(sys_io_getevents
, 5)
2290 MIPS_SYS(sys_io_submit
, 3)
2291 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2292 MIPS_SYS(sys_exit_group
, 1)
2293 MIPS_SYS(sys_lookup_dcookie
, 3)
2294 MIPS_SYS(sys_epoll_create
, 1)
2295 MIPS_SYS(sys_epoll_ctl
, 4)
2296 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2297 MIPS_SYS(sys_remap_file_pages
, 5)
2298 MIPS_SYS(sys_set_tid_address
, 1)
2299 MIPS_SYS(sys_restart_syscall
, 0)
2300 MIPS_SYS(sys_fadvise64_64
, 7)
2301 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2302 MIPS_SYS(sys_fstatfs64
, 2)
2303 MIPS_SYS(sys_timer_create
, 3)
2304 MIPS_SYS(sys_timer_settime
, 4)
2305 MIPS_SYS(sys_timer_gettime
, 2)
2306 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2307 MIPS_SYS(sys_timer_delete
, 1)
2308 MIPS_SYS(sys_clock_settime
, 2)
2309 MIPS_SYS(sys_clock_gettime
, 2)
2310 MIPS_SYS(sys_clock_getres
, 2)
2311 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2312 MIPS_SYS(sys_tgkill
, 3)
2313 MIPS_SYS(sys_utimes
, 2)
2314 MIPS_SYS(sys_mbind
, 4)
2315 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2316 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2317 MIPS_SYS(sys_mq_open
, 4)
2318 MIPS_SYS(sys_mq_unlink
, 1)
2319 MIPS_SYS(sys_mq_timedsend
, 5)
2320 MIPS_SYS(sys_mq_timedreceive
, 5)
2321 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2322 MIPS_SYS(sys_mq_getsetattr
, 3)
2323 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2324 MIPS_SYS(sys_waitid
, 4)
2325 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2326 MIPS_SYS(sys_add_key
, 5)
2327 MIPS_SYS(sys_request_key
, 4)
2328 MIPS_SYS(sys_keyctl
, 5)
2329 MIPS_SYS(sys_set_thread_area
, 1)
2330 MIPS_SYS(sys_inotify_init
, 0)
2331 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2332 MIPS_SYS(sys_inotify_rm_watch
, 2)
2333 MIPS_SYS(sys_migrate_pages
, 4)
2334 MIPS_SYS(sys_openat
, 4)
2335 MIPS_SYS(sys_mkdirat
, 3)
2336 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2337 MIPS_SYS(sys_fchownat
, 5)
2338 MIPS_SYS(sys_futimesat
, 3)
2339 MIPS_SYS(sys_fstatat64
, 4)
2340 MIPS_SYS(sys_unlinkat
, 3)
2341 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2342 MIPS_SYS(sys_linkat
, 5)
2343 MIPS_SYS(sys_symlinkat
, 3)
2344 MIPS_SYS(sys_readlinkat
, 4)
2345 MIPS_SYS(sys_fchmodat
, 3)
2346 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2347 MIPS_SYS(sys_pselect6
, 6)
2348 MIPS_SYS(sys_ppoll
, 5)
2349 MIPS_SYS(sys_unshare
, 1)
2350 MIPS_SYS(sys_splice
, 6)
2351 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2352 MIPS_SYS(sys_tee
, 4)
2353 MIPS_SYS(sys_vmsplice
, 4)
2354 MIPS_SYS(sys_move_pages
, 6)
2355 MIPS_SYS(sys_set_robust_list
, 2)
2356 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2357 MIPS_SYS(sys_kexec_load
, 4)
2358 MIPS_SYS(sys_getcpu
, 3)
2359 MIPS_SYS(sys_epoll_pwait
, 6)
2360 MIPS_SYS(sys_ioprio_set
, 3)
2361 MIPS_SYS(sys_ioprio_get
, 2)
2362 MIPS_SYS(sys_utimensat
, 4)
2363 MIPS_SYS(sys_signalfd
, 3)
2364 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2365 MIPS_SYS(sys_eventfd
, 1)
2366 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2367 MIPS_SYS(sys_timerfd_create
, 2)
2368 MIPS_SYS(sys_timerfd_gettime
, 2)
2369 MIPS_SYS(sys_timerfd_settime
, 4)
2370 MIPS_SYS(sys_signalfd4
, 4)
2371 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2372 MIPS_SYS(sys_epoll_create1
, 1)
2373 MIPS_SYS(sys_dup3
, 3)
2374 MIPS_SYS(sys_pipe2
, 2)
2375 MIPS_SYS(sys_inotify_init1
, 1)
2376 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2377 MIPS_SYS(sys_pwritev
, 6)
2378 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2379 MIPS_SYS(sys_perf_event_open
, 5)
2380 MIPS_SYS(sys_accept4
, 4)
2381 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2382 MIPS_SYS(sys_fanotify_init
, 2)
2383 MIPS_SYS(sys_fanotify_mark
, 6)
2384 MIPS_SYS(sys_prlimit64
, 4)
2385 MIPS_SYS(sys_name_to_handle_at
, 5)
2386 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2387 MIPS_SYS(sys_clock_adjtime
, 2)
2388 MIPS_SYS(sys_syncfs
, 1)
2393 static int do_store_exclusive(CPUMIPSState
*env
)
2396 target_ulong page_addr
;
2404 page_addr
= addr
& TARGET_PAGE_MASK
;
2407 flags
= page_get_flags(page_addr
);
2408 if ((flags
& PAGE_READ
) == 0) {
2411 reg
= env
->llreg
& 0x1f;
2412 d
= (env
->llreg
& 0x20) != 0;
2414 segv
= get_user_s64(val
, addr
);
2416 segv
= get_user_s32(val
, addr
);
2419 if (val
!= env
->llval
) {
2420 env
->active_tc
.gpr
[reg
] = 0;
2423 segv
= put_user_u64(env
->llnewval
, addr
);
2425 segv
= put_user_u32(env
->llnewval
, addr
);
2428 env
->active_tc
.gpr
[reg
] = 1;
2435 env
->active_tc
.PC
+= 4;
2448 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2456 info
->si_signo
= TARGET_SIGFPE
;
2458 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2459 queue_signal(env
, info
->si_signo
, QEMU_SI_FAULT
, &*info
);
2463 info
->si_signo
= TARGET_SIGTRAP
;
2465 queue_signal(env
, info
->si_signo
, QEMU_SI_FAULT
, &*info
);
2473 void cpu_loop(CPUMIPSState
*env
)
2475 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2476 target_siginfo_t info
;
2479 # ifdef TARGET_ABI_MIPSO32
2480 unsigned int syscall_num
;
2485 trapnr
= cpu_exec(cs
);
2489 env
->active_tc
.PC
+= 4;
2490 # ifdef TARGET_ABI_MIPSO32
2491 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2492 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2493 ret
= -TARGET_ENOSYS
;
2497 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2499 nb_args
= mips_syscall_args
[syscall_num
];
2500 sp_reg
= env
->active_tc
.gpr
[29];
2502 /* these arguments are taken from the stack */
2504 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2508 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2512 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2516 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2522 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2523 env
->active_tc
.gpr
[4],
2524 env
->active_tc
.gpr
[5],
2525 env
->active_tc
.gpr
[6],
2526 env
->active_tc
.gpr
[7],
2527 arg5
, arg6
, arg7
, arg8
);
2531 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2532 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2533 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2534 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2535 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2537 if (ret
== -TARGET_ERESTARTSYS
) {
2538 env
->active_tc
.PC
-= 4;
2541 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2542 /* Returning from a successful sigreturn syscall.
2543 Avoid clobbering register state. */
2546 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2547 env
->active_tc
.gpr
[7] = 1; /* error flag */
2550 env
->active_tc
.gpr
[7] = 0; /* error flag */
2552 env
->active_tc
.gpr
[2] = ret
;
2558 info
.si_signo
= TARGET_SIGSEGV
;
2560 /* XXX: check env->error_code */
2561 info
.si_code
= TARGET_SEGV_MAPERR
;
2562 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2563 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2567 info
.si_signo
= TARGET_SIGILL
;
2570 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2572 case EXCP_INTERRUPT
:
2573 /* just indicate that signals should be handled asap */
2579 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2582 info
.si_signo
= sig
;
2584 info
.si_code
= TARGET_TRAP_BRKPT
;
2585 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2590 if (do_store_exclusive(env
)) {
2591 info
.si_signo
= TARGET_SIGSEGV
;
2593 info
.si_code
= TARGET_SEGV_MAPERR
;
2594 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2595 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2599 info
.si_signo
= TARGET_SIGILL
;
2601 info
.si_code
= TARGET_ILL_ILLOPC
;
2602 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2604 /* The code below was inspired by the MIPS Linux kernel trap
2605 * handling code in arch/mips/kernel/traps.c.
2609 abi_ulong trap_instr
;
2612 if (env
->hflags
& MIPS_HFLAG_M16
) {
2613 if (env
->insn_flags
& ASE_MICROMIPS
) {
2614 /* microMIPS mode */
2615 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2620 if ((trap_instr
>> 10) == 0x11) {
2621 /* 16-bit instruction */
2622 code
= trap_instr
& 0xf;
2624 /* 32-bit instruction */
2627 ret
= get_user_u16(instr_lo
,
2628 env
->active_tc
.PC
+ 2);
2632 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2633 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2634 /* Unfortunately, microMIPS also suffers from
2635 the old assembler bug... */
2636 if (code
>= (1 << 10)) {
2642 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2646 code
= (trap_instr
>> 6) & 0x3f;
2649 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2654 /* As described in the original Linux kernel code, the
2655 * below checks on 'code' are to work around an old
2658 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2659 if (code
>= (1 << 10)) {
2664 if (do_break(env
, &info
, code
) != 0) {
2671 abi_ulong trap_instr
;
2672 unsigned int code
= 0;
2674 if (env
->hflags
& MIPS_HFLAG_M16
) {
2675 /* microMIPS mode */
2678 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2679 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2681 trap_instr
= (instr
[0] << 16) | instr
[1];
2683 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2690 /* The immediate versions don't provide a code. */
2691 if (!(trap_instr
& 0xFC000000)) {
2692 if (env
->hflags
& MIPS_HFLAG_M16
) {
2693 /* microMIPS mode */
2694 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2696 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2700 if (do_break(env
, &info
, code
) != 0) {
2707 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2710 process_pending_signals(env
);
2715 #ifdef TARGET_OPENRISC
2717 void cpu_loop(CPUOpenRISCState
*env
)
2719 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2725 trapnr
= cpu_exec(cs
);
2731 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2735 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2736 gdbsig
= TARGET_SIGBUS
;
2740 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2741 gdbsig
= TARGET_SIGSEGV
;
2744 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2747 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2748 gdbsig
= TARGET_SIGBUS
;
2751 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2752 gdbsig
= TARGET_SIGILL
;
2755 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2759 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2762 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2763 gdbsig
= TARGET_SIGSEGV
;
2766 env
->pc
+= 4; /* 0xc00; */
2767 ret
= do_syscall(env
,
2768 env
->gpr
[11], /* return value */
2769 env
->gpr
[3], /* r3 - r7 are params */
2775 if (ret
== -TARGET_ERESTARTSYS
) {
2777 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2782 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2785 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2786 gdbsig
= TARGET_SIGTRAP
;
2789 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2792 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2794 gdbsig
= TARGET_SIGILL
;
2798 gdb_handlesig(cs
, gdbsig
);
2799 if (gdbsig
!= TARGET_SIGTRAP
) {
2804 process_pending_signals(env
);
2808 #endif /* TARGET_OPENRISC */
2811 void cpu_loop(CPUSH4State
*env
)
2813 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2815 target_siginfo_t info
;
2819 trapnr
= cpu_exec(cs
);
2825 ret
= do_syscall(env
,
2834 if (ret
== -TARGET_ERESTARTSYS
) {
2836 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2837 env
->gregs
[0] = ret
;
2840 case EXCP_INTERRUPT
:
2841 /* just indicate that signals should be handled asap */
2847 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2850 info
.si_signo
= sig
;
2852 info
.si_code
= TARGET_TRAP_BRKPT
;
2853 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2859 info
.si_signo
= TARGET_SIGSEGV
;
2861 info
.si_code
= TARGET_SEGV_MAPERR
;
2862 info
._sifields
._sigfault
._addr
= env
->tea
;
2863 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2867 printf ("Unhandled trap: 0x%x\n", trapnr
);
2868 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2871 process_pending_signals (env
);
2877 void cpu_loop(CPUCRISState
*env
)
2879 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2881 target_siginfo_t info
;
2885 trapnr
= cpu_exec(cs
);
2890 info
.si_signo
= TARGET_SIGSEGV
;
2892 /* XXX: check env->error_code */
2893 info
.si_code
= TARGET_SEGV_MAPERR
;
2894 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2895 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2898 case EXCP_INTERRUPT
:
2899 /* just indicate that signals should be handled asap */
2902 ret
= do_syscall(env
,
2911 if (ret
== -TARGET_ERESTARTSYS
) {
2913 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2914 env
->regs
[10] = ret
;
2921 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2924 info
.si_signo
= sig
;
2926 info
.si_code
= TARGET_TRAP_BRKPT
;
2927 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2932 printf ("Unhandled trap: 0x%x\n", trapnr
);
2933 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2936 process_pending_signals (env
);
2941 #ifdef TARGET_MICROBLAZE
2942 void cpu_loop(CPUMBState
*env
)
2944 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2946 target_siginfo_t info
;
2950 trapnr
= cpu_exec(cs
);
2955 info
.si_signo
= TARGET_SIGSEGV
;
2957 /* XXX: check env->error_code */
2958 info
.si_code
= TARGET_SEGV_MAPERR
;
2959 info
._sifields
._sigfault
._addr
= 0;
2960 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2963 case EXCP_INTERRUPT
:
2964 /* just indicate that signals should be handled asap */
2967 /* Return address is 4 bytes after the call. */
2969 env
->sregs
[SR_PC
] = env
->regs
[14];
2970 ret
= do_syscall(env
,
2979 if (ret
== -TARGET_ERESTARTSYS
) {
2980 /* Wind back to before the syscall. */
2981 env
->sregs
[SR_PC
] -= 4;
2982 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2985 /* All syscall exits result in guest r14 being equal to the
2986 * PC we return to, because the kernel syscall exit "rtbd" does
2987 * this. (This is true even for sigreturn(); note that r14 is
2988 * not a userspace-usable register, as the kernel may clobber it
2991 env
->regs
[14] = env
->sregs
[SR_PC
];
2994 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2995 if (env
->iflags
& D_FLAG
) {
2996 env
->sregs
[SR_ESR
] |= 1 << 12;
2997 env
->sregs
[SR_PC
] -= 4;
2998 /* FIXME: if branch was immed, replay the imm as well. */
3001 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
3003 switch (env
->sregs
[SR_ESR
] & 31) {
3004 case ESR_EC_DIVZERO
:
3005 info
.si_signo
= TARGET_SIGFPE
;
3007 info
.si_code
= TARGET_FPE_FLTDIV
;
3008 info
._sifields
._sigfault
._addr
= 0;
3009 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3012 info
.si_signo
= TARGET_SIGFPE
;
3014 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
3015 info
.si_code
= TARGET_FPE_FLTINV
;
3017 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
3018 info
.si_code
= TARGET_FPE_FLTDIV
;
3020 info
._sifields
._sigfault
._addr
= 0;
3021 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3024 printf ("Unhandled hw-exception: 0x%x\n",
3025 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
3026 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3035 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3038 info
.si_signo
= sig
;
3040 info
.si_code
= TARGET_TRAP_BRKPT
;
3041 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3046 printf ("Unhandled trap: 0x%x\n", trapnr
);
3047 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3050 process_pending_signals (env
);
3057 void cpu_loop(CPUM68KState
*env
)
3059 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3062 target_siginfo_t info
;
3063 TaskState
*ts
= cs
->opaque
;
3067 trapnr
= cpu_exec(cs
);
3072 if (ts
->sim_syscalls
) {
3074 get_user_u16(nr
, env
->pc
+ 2);
3076 do_m68k_simcall(env
, nr
);
3082 case EXCP_HALT_INSN
:
3083 /* Semihosing syscall. */
3085 do_m68k_semihosting(env
, env
->dregs
[0]);
3089 case EXCP_UNSUPPORTED
:
3091 info
.si_signo
= TARGET_SIGILL
;
3093 info
.si_code
= TARGET_ILL_ILLOPN
;
3094 info
._sifields
._sigfault
._addr
= env
->pc
;
3095 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3100 ts
->sim_syscalls
= 0;
3103 ret
= do_syscall(env
,
3112 if (ret
== -TARGET_ERESTARTSYS
) {
3114 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3115 env
->dregs
[0] = ret
;
3119 case EXCP_INTERRUPT
:
3120 /* just indicate that signals should be handled asap */
3124 info
.si_signo
= TARGET_SIGSEGV
;
3126 /* XXX: check env->error_code */
3127 info
.si_code
= TARGET_SEGV_MAPERR
;
3128 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3129 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3136 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3139 info
.si_signo
= sig
;
3141 info
.si_code
= TARGET_TRAP_BRKPT
;
3142 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3147 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3150 process_pending_signals(env
);
3153 #endif /* TARGET_M68K */
3156 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3158 target_ulong addr
, val
, tmp
;
3159 target_siginfo_t info
;
3162 addr
= env
->lock_addr
;
3163 tmp
= env
->lock_st_addr
;
3164 env
->lock_addr
= -1;
3165 env
->lock_st_addr
= 0;
3171 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3175 if (val
== env
->lock_value
) {
3177 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3194 info
.si_signo
= TARGET_SIGSEGV
;
3196 info
.si_code
= TARGET_SEGV_MAPERR
;
3197 info
._sifields
._sigfault
._addr
= addr
;
3198 queue_signal(env
, TARGET_SIGSEGV
, QEMU_SI_FAULT
, &info
);
3201 void cpu_loop(CPUAlphaState
*env
)
3203 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3205 target_siginfo_t info
;
3210 trapnr
= cpu_exec(cs
);
3213 /* All of the traps imply a transition through PALcode, which
3214 implies an REI instruction has been executed. Which means
3215 that the intr_flag should be cleared. */
3220 fprintf(stderr
, "Reset requested. Exit\n");
3224 fprintf(stderr
, "Machine check exception. Exit\n");
3227 case EXCP_SMP_INTERRUPT
:
3228 case EXCP_CLK_INTERRUPT
:
3229 case EXCP_DEV_INTERRUPT
:
3230 fprintf(stderr
, "External interrupt. Exit\n");
3234 env
->lock_addr
= -1;
3235 info
.si_signo
= TARGET_SIGSEGV
;
3237 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3238 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3239 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3240 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3243 env
->lock_addr
= -1;
3244 info
.si_signo
= TARGET_SIGBUS
;
3246 info
.si_code
= TARGET_BUS_ADRALN
;
3247 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3248 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3252 env
->lock_addr
= -1;
3253 info
.si_signo
= TARGET_SIGILL
;
3255 info
.si_code
= TARGET_ILL_ILLOPC
;
3256 info
._sifields
._sigfault
._addr
= env
->pc
;
3257 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3260 env
->lock_addr
= -1;
3261 info
.si_signo
= TARGET_SIGFPE
;
3263 info
.si_code
= TARGET_FPE_FLTINV
;
3264 info
._sifields
._sigfault
._addr
= env
->pc
;
3265 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3268 /* No-op. Linux simply re-enables the FPU. */
3271 env
->lock_addr
= -1;
3272 switch (env
->error_code
) {
3275 info
.si_signo
= TARGET_SIGTRAP
;
3277 info
.si_code
= TARGET_TRAP_BRKPT
;
3278 info
._sifields
._sigfault
._addr
= env
->pc
;
3279 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3283 info
.si_signo
= TARGET_SIGTRAP
;
3286 info
._sifields
._sigfault
._addr
= env
->pc
;
3287 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3291 trapnr
= env
->ir
[IR_V0
];
3292 sysret
= do_syscall(env
, trapnr
,
3293 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3294 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3295 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3297 if (sysret
== -TARGET_ERESTARTSYS
) {
3301 if (sysret
== -TARGET_QEMU_ESIGRETURN
) {
3304 /* Syscall writes 0 to V0 to bypass error check, similar
3305 to how this is handled internal to Linux kernel.
3306 (Ab)use trapnr temporarily as boolean indicating error. */
3307 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3308 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3309 env
->ir
[IR_A3
] = trapnr
;
3313 /* ??? We can probably elide the code using page_unprotect
3314 that is checking for self-modifying code. Instead we
3315 could simply call tb_flush here. Until we work out the
3316 changes required to turn off the extra write protection,
3317 this can be a no-op. */
3321 /* Handled in the translator for usermode. */
3325 /* Handled in the translator for usermode. */
3329 info
.si_signo
= TARGET_SIGFPE
;
3330 switch (env
->ir
[IR_A0
]) {
3331 case TARGET_GEN_INTOVF
:
3332 info
.si_code
= TARGET_FPE_INTOVF
;
3334 case TARGET_GEN_INTDIV
:
3335 info
.si_code
= TARGET_FPE_INTDIV
;
3337 case TARGET_GEN_FLTOVF
:
3338 info
.si_code
= TARGET_FPE_FLTOVF
;
3340 case TARGET_GEN_FLTUND
:
3341 info
.si_code
= TARGET_FPE_FLTUND
;
3343 case TARGET_GEN_FLTINV
:
3344 info
.si_code
= TARGET_FPE_FLTINV
;
3346 case TARGET_GEN_FLTINE
:
3347 info
.si_code
= TARGET_FPE_FLTRES
;
3349 case TARGET_GEN_ROPRAND
:
3353 info
.si_signo
= TARGET_SIGTRAP
;
3358 info
._sifields
._sigfault
._addr
= env
->pc
;
3359 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3366 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3367 if (info
.si_signo
) {
3368 env
->lock_addr
= -1;
3370 info
.si_code
= TARGET_TRAP_BRKPT
;
3371 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3376 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3378 case EXCP_INTERRUPT
:
3379 /* Just indicate that signals should be handled asap. */
3382 printf ("Unhandled trap: 0x%x\n", trapnr
);
3383 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3386 process_pending_signals (env
);
3389 #endif /* TARGET_ALPHA */
3392 void cpu_loop(CPUS390XState
*env
)
3394 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3396 target_siginfo_t info
;
3402 trapnr
= cpu_exec(cs
);
3405 case EXCP_INTERRUPT
:
3406 /* Just indicate that signals should be handled asap. */
3410 n
= env
->int_svc_code
;
3412 /* syscalls > 255 */
3415 env
->psw
.addr
+= env
->int_svc_ilen
;
3416 ret
= do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3417 env
->regs
[4], env
->regs
[5],
3418 env
->regs
[6], env
->regs
[7], 0, 0);
3419 if (ret
== -TARGET_ERESTARTSYS
) {
3420 env
->psw
.addr
-= env
->int_svc_ilen
;
3421 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3427 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3429 n
= TARGET_TRAP_BRKPT
;
3434 n
= env
->int_pgm_code
;
3437 case PGM_PRIVILEGED
:
3438 sig
= TARGET_SIGILL
;
3439 n
= TARGET_ILL_ILLOPC
;
3441 case PGM_PROTECTION
:
3442 case PGM_ADDRESSING
:
3443 sig
= TARGET_SIGSEGV
;
3444 /* XXX: check env->error_code */
3445 n
= TARGET_SEGV_MAPERR
;
3446 addr
= env
->__excp_addr
;
3449 case PGM_SPECIFICATION
:
3450 case PGM_SPECIAL_OP
:
3453 sig
= TARGET_SIGILL
;
3454 n
= TARGET_ILL_ILLOPN
;
3457 case PGM_FIXPT_OVERFLOW
:
3458 sig
= TARGET_SIGFPE
;
3459 n
= TARGET_FPE_INTOVF
;
3461 case PGM_FIXPT_DIVIDE
:
3462 sig
= TARGET_SIGFPE
;
3463 n
= TARGET_FPE_INTDIV
;
3467 n
= (env
->fpc
>> 8) & 0xff;
3469 /* compare-and-trap */
3472 /* An IEEE exception, simulated or otherwise. */
3474 n
= TARGET_FPE_FLTINV
;
3475 } else if (n
& 0x40) {
3476 n
= TARGET_FPE_FLTDIV
;
3477 } else if (n
& 0x20) {
3478 n
= TARGET_FPE_FLTOVF
;
3479 } else if (n
& 0x10) {
3480 n
= TARGET_FPE_FLTUND
;
3481 } else if (n
& 0x08) {
3482 n
= TARGET_FPE_FLTRES
;
3484 /* ??? Quantum exception; BFP, DFP error. */
3487 sig
= TARGET_SIGFPE
;
3492 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3493 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3499 addr
= env
->psw
.addr
;
3501 info
.si_signo
= sig
;
3504 info
._sifields
._sigfault
._addr
= addr
;
3505 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3509 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3510 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3513 process_pending_signals (env
);
3517 #endif /* TARGET_S390X */
3519 #ifdef TARGET_TILEGX
3521 static void gen_sigill_reg(CPUTLGState
*env
)
3523 target_siginfo_t info
;
3525 info
.si_signo
= TARGET_SIGILL
;
3527 info
.si_code
= TARGET_ILL_PRVREG
;
3528 info
._sifields
._sigfault
._addr
= env
->pc
;
3529 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3532 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3534 target_siginfo_t info
;
3536 info
.si_signo
= signo
;
3538 info
._sifields
._sigfault
._addr
= env
->pc
;
3540 if (signo
== TARGET_SIGSEGV
) {
3541 /* The passed in sigcode is a dummy; check for a page mapping
3542 and pass either MAPERR or ACCERR. */
3543 target_ulong addr
= env
->excaddr
;
3544 info
._sifields
._sigfault
._addr
= addr
;
3545 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3546 sigcode
= TARGET_SEGV_MAPERR
;
3548 sigcode
= TARGET_SEGV_ACCERR
;
3551 info
.si_code
= sigcode
;
3553 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3556 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3558 env
->excaddr
= addr
;
3559 do_signal(env
, TARGET_SIGSEGV
, 0);
3562 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3564 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3575 gen_sigill_reg(env
);
3578 g_assert_not_reached();
3581 env
->regs
[reg
] = val
;
3585 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3586 * memory at the address held in the first source register. If the values are
3587 * not equal, then no memory operation is performed. If the values are equal,
3588 * the 8-byte quantity from the second source register is written into memory
3589 * at the address held in the first source register. In either case, the result
3590 * of the instruction is the value read from memory. The compare and write to
3591 * memory are atomic and thus can be used for synchronization purposes. This
3592 * instruction only operates for addresses aligned to a 8-byte boundary.
3593 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3595 * Functional Description (64-bit)
3596 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3597 * rf[Dest] = memVal;
3598 * if (memVal == SPR[CmpValueSPR])
3599 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3601 * Functional Description (32-bit)
3602 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3603 * rf[Dest] = memVal;
3604 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3605 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3608 * This function also processes exch and exch4 which need not process SPR.
3610 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3613 target_long val
, sprval
;
3617 addr
= env
->atomic_srca
;
3618 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3619 goto sigsegv_maperr
;
3624 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3626 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3630 if (!cmp
|| val
== sprval
) {
3631 target_long valb
= env
->atomic_srcb
;
3632 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3633 goto sigsegv_maperr
;
3637 set_regval(env
, env
->atomic_dstr
, val
);
3643 gen_sigsegv_maperr(env
, addr
);
3646 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3650 target_long val
, valb
;
3654 addr
= env
->atomic_srca
;
3655 valb
= env
->atomic_srcb
;
3656 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3657 goto sigsegv_maperr
;
3661 case TILEGX_EXCP_OPCODE_FETCHADD
:
3662 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3665 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3671 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3673 if ((int32_t)valb
< 0) {
3677 case TILEGX_EXCP_OPCODE_FETCHAND
:
3678 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3681 case TILEGX_EXCP_OPCODE_FETCHOR
:
3682 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3686 g_assert_not_reached();
3690 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3691 goto sigsegv_maperr
;
3695 set_regval(env
, env
->atomic_dstr
, val
);
3701 gen_sigsegv_maperr(env
, addr
);
3704 void cpu_loop(CPUTLGState
*env
)
3706 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3711 trapnr
= cpu_exec(cs
);
3714 case TILEGX_EXCP_SYSCALL
:
3716 abi_ulong ret
= do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3717 env
->regs
[0], env
->regs
[1],
3718 env
->regs
[2], env
->regs
[3],
3719 env
->regs
[4], env
->regs
[5],
3720 env
->regs
[6], env
->regs
[7]);
3721 if (ret
== -TARGET_ERESTARTSYS
) {
3723 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3724 env
->regs
[TILEGX_R_RE
] = ret
;
3725 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(ret
) ? -ret
: 0;
3729 case TILEGX_EXCP_OPCODE_EXCH
:
3730 do_exch(env
, true, false);
3732 case TILEGX_EXCP_OPCODE_EXCH4
:
3733 do_exch(env
, false, false);
3735 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3736 do_exch(env
, true, true);
3738 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3739 do_exch(env
, false, true);
3741 case TILEGX_EXCP_OPCODE_FETCHADD
:
3742 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3743 case TILEGX_EXCP_OPCODE_FETCHAND
:
3744 case TILEGX_EXCP_OPCODE_FETCHOR
:
3745 do_fetch(env
, trapnr
, true);
3747 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3748 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3749 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3750 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3751 do_fetch(env
, trapnr
, false);
3753 case TILEGX_EXCP_SIGNAL
:
3754 do_signal(env
, env
->signo
, env
->sigcode
);
3756 case TILEGX_EXCP_REG_IDN_ACCESS
:
3757 case TILEGX_EXCP_REG_UDN_ACCESS
:
3758 gen_sigill_reg(env
);
3761 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3762 g_assert_not_reached();
3764 process_pending_signals(env
);
3770 THREAD CPUState
*thread_cpu
;
3772 void task_settid(TaskState
*ts
)
3774 if (ts
->ts_tid
== 0) {
3775 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3779 void stop_all_tasks(void)
3782 * We trust that when using NPTL, start_exclusive()
3783 * handles thread stopping correctly.
3788 /* Assumes contents are already zeroed. */
3789 void init_task_state(TaskState
*ts
)
3794 CPUArchState
*cpu_copy(CPUArchState
*env
)
3796 CPUState
*cpu
= ENV_GET_CPU(env
);
3797 CPUState
*new_cpu
= cpu_init(cpu_model
);
3798 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3802 /* Reset non arch specific state */
3805 memcpy(new_env
, env
, sizeof(CPUArchState
));
3807 /* Clone all break/watchpoints.
3808 Note: Once we support ptrace with hw-debug register access, make sure
3809 BP_CPU break/watchpoints are handled correctly on clone. */
3810 QTAILQ_INIT(&new_cpu
->breakpoints
);
3811 QTAILQ_INIT(&new_cpu
->watchpoints
);
3812 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3813 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3815 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3816 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3822 static void handle_arg_help(const char *arg
)
3824 usage(EXIT_SUCCESS
);
3827 static void handle_arg_log(const char *arg
)
3831 mask
= qemu_str_to_log_mask(arg
);
3833 qemu_print_log_usage(stdout
);
3836 qemu_log_needs_buffers();
3840 static void handle_arg_log_filename(const char *arg
)
3842 qemu_set_log_filename(arg
, &error_fatal
);
3845 static void handle_arg_set_env(const char *arg
)
3847 char *r
, *p
, *token
;
3848 r
= p
= strdup(arg
);
3849 while ((token
= strsep(&p
, ",")) != NULL
) {
3850 if (envlist_setenv(envlist
, token
) != 0) {
3851 usage(EXIT_FAILURE
);
3857 static void handle_arg_unset_env(const char *arg
)
3859 char *r
, *p
, *token
;
3860 r
= p
= strdup(arg
);
3861 while ((token
= strsep(&p
, ",")) != NULL
) {
3862 if (envlist_unsetenv(envlist
, token
) != 0) {
3863 usage(EXIT_FAILURE
);
3869 static void handle_arg_argv0(const char *arg
)
3871 argv0
= strdup(arg
);
3874 static void handle_arg_stack_size(const char *arg
)
3877 guest_stack_size
= strtoul(arg
, &p
, 0);
3878 if (guest_stack_size
== 0) {
3879 usage(EXIT_FAILURE
);
3883 guest_stack_size
*= 1024 * 1024;
3884 } else if (*p
== 'k' || *p
== 'K') {
3885 guest_stack_size
*= 1024;
3889 static void handle_arg_ld_prefix(const char *arg
)
3891 interp_prefix
= strdup(arg
);
3894 static void handle_arg_pagesize(const char *arg
)
3896 qemu_host_page_size
= atoi(arg
);
3897 if (qemu_host_page_size
== 0 ||
3898 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3899 fprintf(stderr
, "page size must be a power of two\n");
3904 static void handle_arg_randseed(const char *arg
)
3906 unsigned long long seed
;
3908 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3909 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3915 static void handle_arg_gdb(const char *arg
)
3917 gdbstub_port
= atoi(arg
);
3920 static void handle_arg_uname(const char *arg
)
3922 qemu_uname_release
= strdup(arg
);
3925 static void handle_arg_cpu(const char *arg
)
3927 cpu_model
= strdup(arg
);
3928 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3929 /* XXX: implement xxx_cpu_list for targets that still miss it */
3930 #if defined(cpu_list)
3931 cpu_list(stdout
, &fprintf
);
3937 static void handle_arg_guest_base(const char *arg
)
3939 guest_base
= strtol(arg
, NULL
, 0);
3940 have_guest_base
= 1;
3943 static void handle_arg_reserved_va(const char *arg
)
3947 reserved_va
= strtoul(arg
, &p
, 0);
3961 unsigned long unshifted
= reserved_va
;
3963 reserved_va
<<= shift
;
3964 if (((reserved_va
>> shift
) != unshifted
)
3965 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3966 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3969 fprintf(stderr
, "Reserved virtual address too big\n");
3974 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3979 static void handle_arg_singlestep(const char *arg
)
3984 static void handle_arg_strace(const char *arg
)
3989 static void handle_arg_version(const char *arg
)
3991 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3992 ", " QEMU_COPYRIGHT
"\n");
3996 static char *trace_file
;
3997 static void handle_arg_trace(const char *arg
)
4000 trace_file
= trace_opt_parse(arg
);
4003 struct qemu_argument
{
4007 void (*handle_opt
)(const char *arg
);
4008 const char *example
;
4012 static const struct qemu_argument arg_table
[] = {
4013 {"h", "", false, handle_arg_help
,
4014 "", "print this help"},
4015 {"help", "", false, handle_arg_help
,
4017 {"g", "QEMU_GDB", true, handle_arg_gdb
,
4018 "port", "wait gdb connection to 'port'"},
4019 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
4020 "path", "set the elf interpreter prefix to 'path'"},
4021 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
4022 "size", "set the stack size to 'size' bytes"},
4023 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
4024 "model", "select CPU (-cpu help for list)"},
4025 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
4026 "var=value", "sets targets environment variable (see below)"},
4027 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
4028 "var", "unsets targets environment variable (see below)"},
4029 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
4030 "argv0", "forces target process argv[0] to be 'argv0'"},
4031 {"r", "QEMU_UNAME", true, handle_arg_uname
,
4032 "uname", "set qemu uname release string to 'uname'"},
4033 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
4034 "address", "set guest_base address to 'address'"},
4035 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
4036 "size", "reserve 'size' bytes for guest virtual address space"},
4037 {"d", "QEMU_LOG", true, handle_arg_log
,
4038 "item[,...]", "enable logging of specified items "
4039 "(use '-d help' for a list of items)"},
4040 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
4041 "logfile", "write logs to 'logfile' (default stderr)"},
4042 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
4043 "pagesize", "set the host page size to 'pagesize'"},
4044 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
4045 "", "run in singlestep mode"},
4046 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
4047 "", "log system calls"},
4048 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
4049 "", "Seed for pseudo-random number generator"},
4050 {"trace", "QEMU_TRACE", true, handle_arg_trace
,
4051 "", "[[enable=]<pattern>][,events=<file>][,file=<file>]"},
4052 {"version", "QEMU_VERSION", false, handle_arg_version
,
4053 "", "display version information and exit"},
4054 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
4057 static void usage(int exitcode
)
4059 const struct qemu_argument
*arginfo
;
4063 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
4064 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
4066 "Options and associated environment variables:\n"
4069 /* Calculate column widths. We must always have at least enough space
4070 * for the column header.
4072 maxarglen
= strlen("Argument");
4073 maxenvlen
= strlen("Env-variable");
4075 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4076 int arglen
= strlen(arginfo
->argv
);
4077 if (arginfo
->has_arg
) {
4078 arglen
+= strlen(arginfo
->example
) + 1;
4080 if (strlen(arginfo
->env
) > maxenvlen
) {
4081 maxenvlen
= strlen(arginfo
->env
);
4083 if (arglen
> maxarglen
) {
4088 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4089 maxenvlen
, "Env-variable");
4091 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4092 if (arginfo
->has_arg
) {
4093 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4094 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4095 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4097 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4098 maxenvlen
, arginfo
->env
,
4105 "QEMU_LD_PREFIX = %s\n"
4106 "QEMU_STACK_SIZE = %ld byte\n",
4111 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4112 "QEMU_UNSET_ENV environment variables to set and unset\n"
4113 "environment variables for the target process.\n"
4114 "It is possible to provide several variables by separating them\n"
4115 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4116 "provide the -E and -U options multiple times.\n"
4117 "The following lines are equivalent:\n"
4118 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4119 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4120 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4121 "Note that if you provide several changes to a single variable\n"
4122 "the last change will stay in effect.\n");
4127 static int parse_args(int argc
, char **argv
)
4131 const struct qemu_argument
*arginfo
;
4133 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4134 if (arginfo
->env
== NULL
) {
4138 r
= getenv(arginfo
->env
);
4140 arginfo
->handle_opt(r
);
4146 if (optind
>= argc
) {
4155 if (!strcmp(r
, "-")) {
4158 /* Treat --foo the same as -foo. */
4163 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4164 if (!strcmp(r
, arginfo
->argv
)) {
4165 if (arginfo
->has_arg
) {
4166 if (optind
>= argc
) {
4167 (void) fprintf(stderr
,
4168 "qemu: missing argument for option '%s'\n", r
);
4171 arginfo
->handle_opt(argv
[optind
]);
4174 arginfo
->handle_opt(NULL
);
4180 /* no option matched the current argv */
4181 if (arginfo
->handle_opt
== NULL
) {
4182 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4187 if (optind
>= argc
) {
4188 (void) fprintf(stderr
, "qemu: no user program specified\n");
4192 filename
= argv
[optind
];
4193 exec_path
= argv
[optind
];
4198 int main(int argc
, char **argv
, char **envp
)
4200 struct target_pt_regs regs1
, *regs
= ®s1
;
4201 struct image_info info1
, *info
= &info1
;
4202 struct linux_binprm bprm
;
4207 char **target_environ
, **wrk
;
4214 module_call_init(MODULE_INIT_QOM
);
4216 if ((envlist
= envlist_create()) == NULL
) {
4217 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4221 /* add current environment into the list */
4222 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4223 (void) envlist_setenv(envlist
, *wrk
);
4226 /* Read the stack limit from the kernel. If it's "unlimited",
4227 then we can do little else besides use the default. */
4230 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4231 && lim
.rlim_cur
!= RLIM_INFINITY
4232 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4233 guest_stack_size
= lim
.rlim_cur
;
4241 qemu_add_opts(&qemu_trace_opts
);
4243 optind
= parse_args(argc
, argv
);
4245 if (!trace_init_backends()) {
4248 trace_init_file(trace_file
);
4251 memset(regs
, 0, sizeof(struct target_pt_regs
));
4253 /* Zero out image_info */
4254 memset(info
, 0, sizeof(struct image_info
));
4256 memset(&bprm
, 0, sizeof (bprm
));
4258 /* Scan interp_prefix dir for replacement files. */
4259 init_paths(interp_prefix
);
4261 init_qemu_uname_release();
4263 if (cpu_model
== NULL
) {
4264 #if defined(TARGET_I386)
4265 #ifdef TARGET_X86_64
4266 cpu_model
= "qemu64";
4268 cpu_model
= "qemu32";
4270 #elif defined(TARGET_ARM)
4272 #elif defined(TARGET_UNICORE32)
4274 #elif defined(TARGET_M68K)
4276 #elif defined(TARGET_SPARC)
4277 #ifdef TARGET_SPARC64
4278 cpu_model
= "TI UltraSparc II";
4280 cpu_model
= "Fujitsu MB86904";
4282 #elif defined(TARGET_MIPS)
4283 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4288 #elif defined TARGET_OPENRISC
4289 cpu_model
= "or1200";
4290 #elif defined(TARGET_PPC)
4291 # ifdef TARGET_PPC64
4292 cpu_model
= "POWER8";
4296 #elif defined TARGET_SH4
4297 cpu_model
= TYPE_SH7785_CPU
;
4303 /* NOTE: we need to init the CPU at this stage to get
4304 qemu_host_page_size */
4305 cpu
= cpu_init(cpu_model
);
4307 fprintf(stderr
, "Unable to find CPU definition\n");
4315 if (getenv("QEMU_STRACE")) {
4319 if (getenv("QEMU_RAND_SEED")) {
4320 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4323 target_environ
= envlist_to_environ(envlist
, NULL
);
4324 envlist_free(envlist
);
4327 * Now that page sizes are configured in cpu_init() we can do
4328 * proper page alignment for guest_base.
4330 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4332 if (reserved_va
|| have_guest_base
) {
4333 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4335 if (guest_base
== (unsigned long)-1) {
4336 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4337 "space for use as guest address space (check your virtual "
4338 "memory ulimit setting or reserve less using -R option)\n",
4344 mmap_next_start
= reserved_va
;
4349 * Read in mmap_min_addr kernel parameter. This value is used
4350 * When loading the ELF image to determine whether guest_base
4351 * is needed. It is also used in mmap_find_vma.
4356 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4358 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4359 mmap_min_addr
= tmp
;
4360 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4367 * Prepare copy of argv vector for target.
4369 target_argc
= argc
- optind
;
4370 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4371 if (target_argv
== NULL
) {
4372 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4377 * If argv0 is specified (using '-0' switch) we replace
4378 * argv[0] pointer with the given one.
4381 if (argv0
!= NULL
) {
4382 target_argv
[i
++] = strdup(argv0
);
4384 for (; i
< target_argc
; i
++) {
4385 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4387 target_argv
[target_argc
] = NULL
;
4389 ts
= g_new0(TaskState
, 1);
4390 init_task_state(ts
);
4391 /* build Task State */
4397 execfd
= qemu_getauxval(AT_EXECFD
);
4399 execfd
= open(filename
, O_RDONLY
);
4401 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4402 _exit(EXIT_FAILURE
);
4406 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4409 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4410 _exit(EXIT_FAILURE
);
4413 for (wrk
= target_environ
; *wrk
; wrk
++) {
4417 free(target_environ
);
4419 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4420 qemu_log("guest_base 0x%lx\n", guest_base
);
4423 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4424 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4425 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4427 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4429 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4430 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4432 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4433 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4436 target_set_brk(info
->brk
);
4440 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4441 generating the prologue until now so that the prologue can take
4442 the real value of GUEST_BASE into account. */
4443 tcg_prologue_init(&tcg_ctx
);
4445 #if defined(TARGET_I386)
4446 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4447 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4448 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4449 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4450 env
->hflags
|= HF_OSFXSR_MASK
;
4452 #ifndef TARGET_ABI32
4453 /* enable 64 bit mode if possible */
4454 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4455 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4458 env
->cr
[4] |= CR4_PAE_MASK
;
4459 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4460 env
->hflags
|= HF_LMA_MASK
;
4463 /* flags setup : we activate the IRQs by default as in user mode */
4464 env
->eflags
|= IF_MASK
;
4466 /* linux register setup */
4467 #ifndef TARGET_ABI32
4468 env
->regs
[R_EAX
] = regs
->rax
;
4469 env
->regs
[R_EBX
] = regs
->rbx
;
4470 env
->regs
[R_ECX
] = regs
->rcx
;
4471 env
->regs
[R_EDX
] = regs
->rdx
;
4472 env
->regs
[R_ESI
] = regs
->rsi
;
4473 env
->regs
[R_EDI
] = regs
->rdi
;
4474 env
->regs
[R_EBP
] = regs
->rbp
;
4475 env
->regs
[R_ESP
] = regs
->rsp
;
4476 env
->eip
= regs
->rip
;
4478 env
->regs
[R_EAX
] = regs
->eax
;
4479 env
->regs
[R_EBX
] = regs
->ebx
;
4480 env
->regs
[R_ECX
] = regs
->ecx
;
4481 env
->regs
[R_EDX
] = regs
->edx
;
4482 env
->regs
[R_ESI
] = regs
->esi
;
4483 env
->regs
[R_EDI
] = regs
->edi
;
4484 env
->regs
[R_EBP
] = regs
->ebp
;
4485 env
->regs
[R_ESP
] = regs
->esp
;
4486 env
->eip
= regs
->eip
;
4489 /* linux interrupt setup */
4490 #ifndef TARGET_ABI32
4491 env
->idt
.limit
= 511;
4493 env
->idt
.limit
= 255;
4495 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4496 PROT_READ
|PROT_WRITE
,
4497 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4498 idt_table
= g2h(env
->idt
.base
);
4521 /* linux segment setup */
4523 uint64_t *gdt_table
;
4524 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4525 PROT_READ
|PROT_WRITE
,
4526 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4527 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4528 gdt_table
= g2h(env
->gdt
.base
);
4530 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4531 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4532 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4534 /* 64 bit code segment */
4535 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4536 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4538 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4540 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4541 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4542 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4544 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4545 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4547 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4548 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4549 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4550 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4551 /* This hack makes Wine work... */
4552 env
->segs
[R_FS
].selector
= 0;
4554 cpu_x86_load_seg(env
, R_DS
, 0);
4555 cpu_x86_load_seg(env
, R_ES
, 0);
4556 cpu_x86_load_seg(env
, R_FS
, 0);
4557 cpu_x86_load_seg(env
, R_GS
, 0);
4559 #elif defined(TARGET_AARCH64)
4563 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4565 "The selected ARM CPU does not support 64 bit mode\n");
4569 for (i
= 0; i
< 31; i
++) {
4570 env
->xregs
[i
] = regs
->regs
[i
];
4573 env
->xregs
[31] = regs
->sp
;
4575 #elif defined(TARGET_ARM)
4578 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4580 for(i
= 0; i
< 16; i
++) {
4581 env
->regs
[i
] = regs
->uregs
[i
];
4583 #ifdef TARGET_WORDS_BIGENDIAN
4585 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4586 && (info
->elf_flags
& EF_ARM_BE8
)) {
4587 env
->uncached_cpsr
|= CPSR_E
;
4588 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4590 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4594 #elif defined(TARGET_UNICORE32)
4597 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4598 for (i
= 0; i
< 32; i
++) {
4599 env
->regs
[i
] = regs
->uregs
[i
];
4602 #elif defined(TARGET_SPARC)
4606 env
->npc
= regs
->npc
;
4608 for(i
= 0; i
< 8; i
++)
4609 env
->gregs
[i
] = regs
->u_regs
[i
];
4610 for(i
= 0; i
< 8; i
++)
4611 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4613 #elif defined(TARGET_PPC)
4617 #if defined(TARGET_PPC64)
4618 int flag
= (env
->insns_flags2
& PPC2_BOOKE206
) ? MSR_CM
: MSR_SF
;
4619 #if defined(TARGET_ABI32)
4620 env
->msr
&= ~((target_ulong
)1 << flag
);
4622 env
->msr
|= (target_ulong
)1 << flag
;
4625 env
->nip
= regs
->nip
;
4626 for(i
= 0; i
< 32; i
++) {
4627 env
->gpr
[i
] = regs
->gpr
[i
];
4630 #elif defined(TARGET_M68K)
4633 env
->dregs
[0] = regs
->d0
;
4634 env
->dregs
[1] = regs
->d1
;
4635 env
->dregs
[2] = regs
->d2
;
4636 env
->dregs
[3] = regs
->d3
;
4637 env
->dregs
[4] = regs
->d4
;
4638 env
->dregs
[5] = regs
->d5
;
4639 env
->dregs
[6] = regs
->d6
;
4640 env
->dregs
[7] = regs
->d7
;
4641 env
->aregs
[0] = regs
->a0
;
4642 env
->aregs
[1] = regs
->a1
;
4643 env
->aregs
[2] = regs
->a2
;
4644 env
->aregs
[3] = regs
->a3
;
4645 env
->aregs
[4] = regs
->a4
;
4646 env
->aregs
[5] = regs
->a5
;
4647 env
->aregs
[6] = regs
->a6
;
4648 env
->aregs
[7] = regs
->usp
;
4650 ts
->sim_syscalls
= 1;
4652 #elif defined(TARGET_MICROBLAZE)
4654 env
->regs
[0] = regs
->r0
;
4655 env
->regs
[1] = regs
->r1
;
4656 env
->regs
[2] = regs
->r2
;
4657 env
->regs
[3] = regs
->r3
;
4658 env
->regs
[4] = regs
->r4
;
4659 env
->regs
[5] = regs
->r5
;
4660 env
->regs
[6] = regs
->r6
;
4661 env
->regs
[7] = regs
->r7
;
4662 env
->regs
[8] = regs
->r8
;
4663 env
->regs
[9] = regs
->r9
;
4664 env
->regs
[10] = regs
->r10
;
4665 env
->regs
[11] = regs
->r11
;
4666 env
->regs
[12] = regs
->r12
;
4667 env
->regs
[13] = regs
->r13
;
4668 env
->regs
[14] = regs
->r14
;
4669 env
->regs
[15] = regs
->r15
;
4670 env
->regs
[16] = regs
->r16
;
4671 env
->regs
[17] = regs
->r17
;
4672 env
->regs
[18] = regs
->r18
;
4673 env
->regs
[19] = regs
->r19
;
4674 env
->regs
[20] = regs
->r20
;
4675 env
->regs
[21] = regs
->r21
;
4676 env
->regs
[22] = regs
->r22
;
4677 env
->regs
[23] = regs
->r23
;
4678 env
->regs
[24] = regs
->r24
;
4679 env
->regs
[25] = regs
->r25
;
4680 env
->regs
[26] = regs
->r26
;
4681 env
->regs
[27] = regs
->r27
;
4682 env
->regs
[28] = regs
->r28
;
4683 env
->regs
[29] = regs
->r29
;
4684 env
->regs
[30] = regs
->r30
;
4685 env
->regs
[31] = regs
->r31
;
4686 env
->sregs
[SR_PC
] = regs
->pc
;
4688 #elif defined(TARGET_MIPS)
4692 for(i
= 0; i
< 32; i
++) {
4693 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4695 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4696 if (regs
->cp0_epc
& 1) {
4697 env
->hflags
|= MIPS_HFLAG_M16
;
4699 if (((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) !=
4700 ((env
->active_fpu
.fcr31
& (1 << FCR31_NAN2008
)) != 0)) {
4701 if ((env
->active_fpu
.fcr31_rw_bitmask
&
4702 (1 << FCR31_NAN2008
)) == 0) {
4703 fprintf(stderr
, "ELF binary's NaN mode not supported by CPU\n");
4706 if ((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) {
4707 env
->active_fpu
.fcr31
|= (1 << FCR31_NAN2008
);
4709 env
->active_fpu
.fcr31
&= ~(1 << FCR31_NAN2008
);
4711 restore_snan_bit_mode(env
);
4714 #elif defined(TARGET_OPENRISC)
4718 for (i
= 0; i
< 32; i
++) {
4719 env
->gpr
[i
] = regs
->gpr
[i
];
4725 #elif defined(TARGET_SH4)
4729 for(i
= 0; i
< 16; i
++) {
4730 env
->gregs
[i
] = regs
->regs
[i
];
4734 #elif defined(TARGET_ALPHA)
4738 for(i
= 0; i
< 28; i
++) {
4739 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4741 env
->ir
[IR_SP
] = regs
->usp
;
4744 #elif defined(TARGET_CRIS)
4746 env
->regs
[0] = regs
->r0
;
4747 env
->regs
[1] = regs
->r1
;
4748 env
->regs
[2] = regs
->r2
;
4749 env
->regs
[3] = regs
->r3
;
4750 env
->regs
[4] = regs
->r4
;
4751 env
->regs
[5] = regs
->r5
;
4752 env
->regs
[6] = regs
->r6
;
4753 env
->regs
[7] = regs
->r7
;
4754 env
->regs
[8] = regs
->r8
;
4755 env
->regs
[9] = regs
->r9
;
4756 env
->regs
[10] = regs
->r10
;
4757 env
->regs
[11] = regs
->r11
;
4758 env
->regs
[12] = regs
->r12
;
4759 env
->regs
[13] = regs
->r13
;
4760 env
->regs
[14] = info
->start_stack
;
4761 env
->regs
[15] = regs
->acr
;
4762 env
->pc
= regs
->erp
;
4764 #elif defined(TARGET_S390X)
4767 for (i
= 0; i
< 16; i
++) {
4768 env
->regs
[i
] = regs
->gprs
[i
];
4770 env
->psw
.mask
= regs
->psw
.mask
;
4771 env
->psw
.addr
= regs
->psw
.addr
;
4773 #elif defined(TARGET_TILEGX)
4776 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4777 env
->regs
[i
] = regs
->regs
[i
];
4779 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4785 #error unsupported target CPU
4788 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4789 ts
->stack_base
= info
->start_stack
;
4790 ts
->heap_base
= info
->brk
;
4791 /* This will be filled in on the first SYS_HEAPINFO call. */
4796 if (gdbserver_start(gdbstub_port
) < 0) {
4797 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4801 gdb_handlesig(cpu
, 0);
4803 trace_init_vcpu_events();