4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
24 #include "qapi/error.h"
26 #include "qemu/path.h"
27 #include "qemu/config-file.h"
28 #include "qemu/cutils.h"
29 #include "qemu/help_option.h"
31 #include "exec/exec-all.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
37 #include "trace/control.h"
38 #include "glib-compat.h"
43 static const char *filename
;
44 static const char *argv0
;
45 static int gdbstub_port
;
46 static envlist_t
*envlist
;
47 static const char *cpu_model
;
48 unsigned long mmap_min_addr
;
49 unsigned long guest_base
;
52 #define EXCP_DUMP(env, fmt, ...) \
54 CPUState *cs = ENV_GET_CPU(env); \
55 fprintf(stderr, fmt , ## __VA_ARGS__); \
56 cpu_dump_state(cs, stderr, fprintf, 0); \
57 if (qemu_log_separate()) { \
58 qemu_log(fmt, ## __VA_ARGS__); \
59 log_cpu_state(cs, 0); \
63 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
65 * When running 32-on-64 we should make sure we can fit all of the possible
66 * guest address space into a contiguous chunk of virtual host memory.
68 * This way we will never overlap with our own libraries or binaries or stack
69 * or anything else that QEMU maps.
72 /* MIPS only supports 31 bits of virtual address space for user space */
73 unsigned long reserved_va
= 0x77000000;
75 unsigned long reserved_va
= 0xf7000000;
78 unsigned long reserved_va
;
81 static void usage(int exitcode
);
83 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
84 const char *qemu_uname_release
;
86 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
87 we allocate a bigger stack. Need a better solution, for example
88 by remapping the process stack directly at the right place */
89 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
91 void gemu_log(const char *fmt
, ...)
96 vfprintf(stderr
, fmt
, ap
);
100 #if defined(TARGET_I386)
101 int cpu_get_pic_interrupt(CPUX86State
*env
)
107 /***********************************************************/
108 /* Helper routines for implementing atomic operations. */
110 /* To implement exclusive operations we force all cpus to syncronise.
111 We don't require a full sync, only that no cpus are executing guest code.
112 The alternative is to map target atomic ops onto host equivalents,
113 which requires quite a lot of per host/target work. */
114 static QemuMutex cpu_list_lock
;
115 static QemuMutex exclusive_lock
;
116 static QemuCond exclusive_cond
;
117 static QemuCond exclusive_resume
;
118 static int pending_cpus
;
120 void qemu_init_cpu_loop(void)
122 qemu_mutex_init(&cpu_list_lock
);
123 qemu_mutex_init(&exclusive_lock
);
124 qemu_cond_init(&exclusive_cond
);
125 qemu_cond_init(&exclusive_resume
);
128 /* Make sure everything is in a consistent state for calling fork(). */
129 void fork_start(void)
131 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
132 qemu_mutex_lock(&exclusive_lock
);
136 void fork_end(int child
)
138 mmap_fork_end(child
);
140 CPUState
*cpu
, *next_cpu
;
141 /* Child processes created by fork() only have a single thread.
142 Discard information about the parent threads. */
143 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
144 if (cpu
!= thread_cpu
) {
145 QTAILQ_REMOVE(&cpus
, cpu
, node
);
149 qemu_mutex_init(&exclusive_lock
);
150 qemu_mutex_init(&cpu_list_lock
);
151 qemu_cond_init(&exclusive_cond
);
152 qemu_cond_init(&exclusive_resume
);
153 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
154 gdbserver_fork(thread_cpu
);
156 qemu_mutex_unlock(&exclusive_lock
);
157 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
161 /* Wait for pending exclusive operations to complete. The exclusive lock
163 static inline void exclusive_idle(void)
165 while (pending_cpus
) {
166 qemu_cond_wait(&exclusive_resume
, &exclusive_lock
);
170 /* Start an exclusive operation.
171 Must only be called from outside cpu_exec. */
172 static inline void start_exclusive(void)
176 qemu_mutex_lock(&exclusive_lock
);
180 /* Make all other cpus stop executing. */
181 CPU_FOREACH(other_cpu
) {
182 if (other_cpu
->running
) {
187 while (pending_cpus
> 1) {
188 qemu_cond_wait(&exclusive_cond
, &exclusive_lock
);
192 /* Finish an exclusive operation. */
193 static inline void __attribute__((unused
)) end_exclusive(void)
196 qemu_cond_broadcast(&exclusive_resume
);
197 qemu_mutex_unlock(&exclusive_lock
);
200 /* Wait for exclusive ops to finish, and begin cpu execution. */
201 static inline void cpu_exec_start(CPUState
*cpu
)
203 qemu_mutex_lock(&exclusive_lock
);
206 qemu_mutex_unlock(&exclusive_lock
);
209 /* Mark cpu as not executing, and release pending exclusive ops. */
210 static inline void cpu_exec_end(CPUState
*cpu
)
212 qemu_mutex_lock(&exclusive_lock
);
213 cpu
->running
= false;
214 if (pending_cpus
> 1) {
216 if (pending_cpus
== 1) {
217 qemu_cond_signal(&exclusive_cond
);
221 qemu_mutex_unlock(&exclusive_lock
);
224 void cpu_list_lock(void)
226 qemu_mutex_lock(&cpu_list_lock
);
229 void cpu_list_unlock(void)
231 qemu_mutex_unlock(&cpu_list_lock
);
236 /***********************************************************/
237 /* CPUX86 core interface */
239 uint64_t cpu_get_tsc(CPUX86State
*env
)
241 return cpu_get_host_ticks();
244 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
249 e1
= (addr
<< 16) | (limit
& 0xffff);
250 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
257 static uint64_t *idt_table
;
259 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
260 uint64_t addr
, unsigned int sel
)
263 e1
= (addr
& 0xffff) | (sel
<< 16);
264 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
268 p
[2] = tswap32(addr
>> 32);
271 /* only dpl matters as we do only user space emulation */
272 static void set_idt(int n
, unsigned int dpl
)
274 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
277 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
278 uint32_t addr
, unsigned int sel
)
281 e1
= (addr
& 0xffff) | (sel
<< 16);
282 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
288 /* only dpl matters as we do only user space emulation */
289 static void set_idt(int n
, unsigned int dpl
)
291 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
295 void cpu_loop(CPUX86State
*env
)
297 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
301 target_siginfo_t info
;
305 trapnr
= cpu_exec(cs
);
309 /* linux syscall from int $0x80 */
310 ret
= do_syscall(env
,
319 if (ret
== -TARGET_ERESTARTSYS
) {
321 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
322 env
->regs
[R_EAX
] = ret
;
327 /* linux syscall from syscall instruction */
328 ret
= do_syscall(env
,
337 if (ret
== -TARGET_ERESTARTSYS
) {
339 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
340 env
->regs
[R_EAX
] = ret
;
346 info
.si_signo
= TARGET_SIGBUS
;
348 info
.si_code
= TARGET_SI_KERNEL
;
349 info
._sifields
._sigfault
._addr
= 0;
350 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
353 /* XXX: potential problem if ABI32 */
354 #ifndef TARGET_X86_64
355 if (env
->eflags
& VM_MASK
) {
356 handle_vm86_fault(env
);
360 info
.si_signo
= TARGET_SIGSEGV
;
362 info
.si_code
= TARGET_SI_KERNEL
;
363 info
._sifields
._sigfault
._addr
= 0;
364 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
368 info
.si_signo
= TARGET_SIGSEGV
;
370 if (!(env
->error_code
& 1))
371 info
.si_code
= TARGET_SEGV_MAPERR
;
373 info
.si_code
= TARGET_SEGV_ACCERR
;
374 info
._sifields
._sigfault
._addr
= env
->cr
[2];
375 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
378 #ifndef TARGET_X86_64
379 if (env
->eflags
& VM_MASK
) {
380 handle_vm86_trap(env
, trapnr
);
384 /* division by zero */
385 info
.si_signo
= TARGET_SIGFPE
;
387 info
.si_code
= TARGET_FPE_INTDIV
;
388 info
._sifields
._sigfault
._addr
= env
->eip
;
389 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
394 #ifndef TARGET_X86_64
395 if (env
->eflags
& VM_MASK
) {
396 handle_vm86_trap(env
, trapnr
);
400 info
.si_signo
= TARGET_SIGTRAP
;
402 if (trapnr
== EXCP01_DB
) {
403 info
.si_code
= TARGET_TRAP_BRKPT
;
404 info
._sifields
._sigfault
._addr
= env
->eip
;
406 info
.si_code
= TARGET_SI_KERNEL
;
407 info
._sifields
._sigfault
._addr
= 0;
409 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
414 #ifndef TARGET_X86_64
415 if (env
->eflags
& VM_MASK
) {
416 handle_vm86_trap(env
, trapnr
);
420 info
.si_signo
= TARGET_SIGSEGV
;
422 info
.si_code
= TARGET_SI_KERNEL
;
423 info
._sifields
._sigfault
._addr
= 0;
424 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
428 info
.si_signo
= TARGET_SIGILL
;
430 info
.si_code
= TARGET_ILL_ILLOPN
;
431 info
._sifields
._sigfault
._addr
= env
->eip
;
432 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
435 /* just indicate that signals should be handled asap */
441 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
446 info
.si_code
= TARGET_TRAP_BRKPT
;
447 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
452 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
453 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
457 process_pending_signals(env
);
464 #define get_user_code_u32(x, gaddr, env) \
465 ({ abi_long __r = get_user_u32((x), (gaddr)); \
466 if (!__r && bswap_code(arm_sctlr_b(env))) { \
472 #define get_user_code_u16(x, gaddr, env) \
473 ({ abi_long __r = get_user_u16((x), (gaddr)); \
474 if (!__r && bswap_code(arm_sctlr_b(env))) { \
480 #define get_user_data_u32(x, gaddr, env) \
481 ({ abi_long __r = get_user_u32((x), (gaddr)); \
482 if (!__r && arm_cpu_bswap_data(env)) { \
488 #define get_user_data_u16(x, gaddr, env) \
489 ({ abi_long __r = get_user_u16((x), (gaddr)); \
490 if (!__r && arm_cpu_bswap_data(env)) { \
496 #define put_user_data_u32(x, gaddr, env) \
497 ({ typeof(x) __x = (x); \
498 if (arm_cpu_bswap_data(env)) { \
499 __x = bswap32(__x); \
501 put_user_u32(__x, (gaddr)); \
504 #define put_user_data_u16(x, gaddr, env) \
505 ({ typeof(x) __x = (x); \
506 if (arm_cpu_bswap_data(env)) { \
507 __x = bswap16(__x); \
509 put_user_u16(__x, (gaddr)); \
513 /* Commpage handling -- there is no commpage for AArch64 */
516 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
518 * r0 = pointer to oldval
519 * r1 = pointer to newval
520 * r2 = pointer to target value
523 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
524 * C set if *ptr was changed, clear if no exchange happened
526 * Note segv's in kernel helpers are a bit tricky, we can set the
527 * data address sensibly but the PC address is just the entry point.
529 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
531 uint64_t oldval
, newval
, val
;
533 target_siginfo_t info
;
535 /* Based on the 32 bit code in do_kernel_trap */
537 /* XXX: This only works between threads, not between processes.
538 It's probably possible to implement this with native host
539 operations. However things like ldrex/strex are much harder so
540 there's not much point trying. */
542 cpsr
= cpsr_read(env
);
545 if (get_user_u64(oldval
, env
->regs
[0])) {
546 env
->exception
.vaddress
= env
->regs
[0];
550 if (get_user_u64(newval
, env
->regs
[1])) {
551 env
->exception
.vaddress
= env
->regs
[1];
555 if (get_user_u64(val
, addr
)) {
556 env
->exception
.vaddress
= addr
;
563 if (put_user_u64(val
, addr
)) {
564 env
->exception
.vaddress
= addr
;
574 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
580 /* We get the PC of the entry address - which is as good as anything,
581 on a real kernel what you get depends on which mode it uses. */
582 info
.si_signo
= TARGET_SIGSEGV
;
584 /* XXX: check env->error_code */
585 info
.si_code
= TARGET_SEGV_MAPERR
;
586 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
587 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
590 /* Handle a jump to the kernel code page. */
592 do_kernel_trap(CPUARMState
*env
)
598 switch (env
->regs
[15]) {
599 case 0xffff0fa0: /* __kernel_memory_barrier */
600 /* ??? No-op. Will need to do better for SMP. */
602 case 0xffff0fc0: /* __kernel_cmpxchg */
603 /* XXX: This only works between threads, not between processes.
604 It's probably possible to implement this with native host
605 operations. However things like ldrex/strex are much harder so
606 there's not much point trying. */
608 cpsr
= cpsr_read(env
);
610 /* FIXME: This should SEGV if the access fails. */
611 if (get_user_u32(val
, addr
))
613 if (val
== env
->regs
[0]) {
615 /* FIXME: Check for segfaults. */
616 put_user_u32(val
, addr
);
623 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
626 case 0xffff0fe0: /* __kernel_get_tls */
627 env
->regs
[0] = cpu_get_tls(env
);
629 case 0xffff0f60: /* __kernel_cmpxchg64 */
630 arm_kernel_cmpxchg64_helper(env
);
636 /* Jump back to the caller. */
637 addr
= env
->regs
[14];
642 env
->regs
[15] = addr
;
647 /* Store exclusive handling for AArch32 */
648 static int do_strex(CPUARMState
*env
)
656 if (env
->exclusive_addr
!= env
->exclusive_test
) {
659 /* We know we're always AArch32 so the address is in uint32_t range
660 * unless it was the -1 exclusive-monitor-lost value (which won't
661 * match exclusive_test above).
663 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
664 addr
= env
->exclusive_addr
;
665 size
= env
->exclusive_info
& 0xf;
668 segv
= get_user_u8(val
, addr
);
671 segv
= get_user_data_u16(val
, addr
, env
);
675 segv
= get_user_data_u32(val
, addr
, env
);
681 env
->exception
.vaddress
= addr
;
686 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
688 env
->exception
.vaddress
= addr
+ 4;
691 if (arm_cpu_bswap_data(env
)) {
692 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
694 val
= deposit64(val
, 32, 32, valhi
);
697 if (val
!= env
->exclusive_val
) {
701 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
704 segv
= put_user_u8(val
, addr
);
707 segv
= put_user_data_u16(val
, addr
, env
);
711 segv
= put_user_data_u32(val
, addr
, env
);
715 env
->exception
.vaddress
= addr
;
719 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
720 segv
= put_user_data_u32(val
, addr
+ 4, env
);
722 env
->exception
.vaddress
= addr
+ 4;
729 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
735 void cpu_loop(CPUARMState
*env
)
737 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
739 unsigned int n
, insn
;
740 target_siginfo_t info
;
746 trapnr
= cpu_exec(cs
);
751 TaskState
*ts
= cs
->opaque
;
755 /* we handle the FPU emulation here, as Linux */
756 /* we get the opcode */
757 /* FIXME - what to do if get_user() fails? */
758 get_user_code_u32(opcode
, env
->regs
[15], env
);
760 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
761 if (rc
== 0) { /* illegal instruction */
762 info
.si_signo
= TARGET_SIGILL
;
764 info
.si_code
= TARGET_ILL_ILLOPN
;
765 info
._sifields
._sigfault
._addr
= env
->regs
[15];
766 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
767 } else if (rc
< 0) { /* FP exception */
770 /* translate softfloat flags to FPSR flags */
771 if (-rc
& float_flag_invalid
)
773 if (-rc
& float_flag_divbyzero
)
775 if (-rc
& float_flag_overflow
)
777 if (-rc
& float_flag_underflow
)
779 if (-rc
& float_flag_inexact
)
782 FPSR fpsr
= ts
->fpa
.fpsr
;
783 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
785 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
786 info
.si_signo
= TARGET_SIGFPE
;
789 /* ordered by priority, least first */
790 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
791 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
792 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
793 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
794 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
796 info
._sifields
._sigfault
._addr
= env
->regs
[15];
797 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
802 /* accumulate unenabled exceptions */
803 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
805 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
807 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
809 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
811 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
814 } else { /* everything OK */
825 if (trapnr
== EXCP_BKPT
) {
827 /* FIXME - what to do if get_user() fails? */
828 get_user_code_u16(insn
, env
->regs
[15], env
);
832 /* FIXME - what to do if get_user() fails? */
833 get_user_code_u32(insn
, env
->regs
[15], env
);
834 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
839 /* FIXME - what to do if get_user() fails? */
840 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
843 /* FIXME - what to do if get_user() fails? */
844 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
849 if (n
== ARM_NR_cacheflush
) {
851 } else if (n
== ARM_NR_semihosting
852 || n
== ARM_NR_thumb_semihosting
) {
853 env
->regs
[0] = do_arm_semihosting (env
);
854 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
856 if (env
->thumb
|| n
== 0) {
859 n
-= ARM_SYSCALL_BASE
;
862 if ( n
> ARM_NR_BASE
) {
864 case ARM_NR_cacheflush
:
868 cpu_set_tls(env
, env
->regs
[0]);
871 case ARM_NR_breakpoint
:
872 env
->regs
[15] -= env
->thumb
? 2 : 4;
875 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
877 env
->regs
[0] = -TARGET_ENOSYS
;
881 ret
= do_syscall(env
,
890 if (ret
== -TARGET_ERESTARTSYS
) {
891 env
->regs
[15] -= env
->thumb
? 2 : 4;
892 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
902 /* just indicate that signals should be handled asap */
905 if (!do_strex(env
)) {
908 /* fall through for segv */
909 case EXCP_PREFETCH_ABORT
:
910 case EXCP_DATA_ABORT
:
911 addr
= env
->exception
.vaddress
;
913 info
.si_signo
= TARGET_SIGSEGV
;
915 /* XXX: check env->error_code */
916 info
.si_code
= TARGET_SEGV_MAPERR
;
917 info
._sifields
._sigfault
._addr
= addr
;
918 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
926 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
931 info
.si_code
= TARGET_TRAP_BRKPT
;
932 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
936 case EXCP_KERNEL_TRAP
:
937 if (do_kernel_trap(env
))
941 /* nothing to do here for user-mode, just resume guest code */
945 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
948 process_pending_signals(env
);
955 * Handle AArch64 store-release exclusive
957 * rs = gets the status result of store exclusive
958 * rt = is the register that is stored
959 * rt2 = is the second register store (in STP)
962 static int do_strex_a64(CPUARMState
*env
)
973 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
974 size
= extract32(env
->exclusive_info
, 0, 2);
975 is_pair
= extract32(env
->exclusive_info
, 2, 1);
976 rs
= extract32(env
->exclusive_info
, 4, 5);
977 rt
= extract32(env
->exclusive_info
, 9, 5);
978 rt2
= extract32(env
->exclusive_info
, 14, 5);
980 addr
= env
->exclusive_addr
;
982 if (addr
!= env
->exclusive_test
) {
988 segv
= get_user_u8(val
, addr
);
991 segv
= get_user_u16(val
, addr
);
994 segv
= get_user_u32(val
, addr
);
997 segv
= get_user_u64(val
, addr
);
1003 env
->exception
.vaddress
= addr
;
1006 if (val
!= env
->exclusive_val
) {
1011 segv
= get_user_u32(val
, addr
+ 4);
1013 segv
= get_user_u64(val
, addr
+ 8);
1016 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1019 if (val
!= env
->exclusive_high
) {
1023 /* handle the zero register */
1024 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
1027 segv
= put_user_u8(val
, addr
);
1030 segv
= put_user_u16(val
, addr
);
1033 segv
= put_user_u32(val
, addr
);
1036 segv
= put_user_u64(val
, addr
);
1043 /* handle the zero register */
1044 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1046 segv
= put_user_u32(val
, addr
+ 4);
1048 segv
= put_user_u64(val
, addr
+ 8);
1051 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1058 /* rs == 31 encodes a write to the ZR, thus throwing away
1059 * the status return. This is rather silly but valid.
1062 env
->xregs
[rs
] = rc
;
1065 /* instruction faulted, PC does not advance */
1066 /* either way a strex releases any exclusive lock we have */
1067 env
->exclusive_addr
= -1;
1072 /* AArch64 main loop */
1073 void cpu_loop(CPUARMState
*env
)
1075 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1078 target_siginfo_t info
;
1082 trapnr
= cpu_exec(cs
);
1087 ret
= do_syscall(env
,
1096 if (ret
== -TARGET_ERESTARTSYS
) {
1098 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1099 env
->xregs
[0] = ret
;
1102 case EXCP_INTERRUPT
:
1103 /* just indicate that signals should be handled asap */
1106 info
.si_signo
= TARGET_SIGILL
;
1108 info
.si_code
= TARGET_ILL_ILLOPN
;
1109 info
._sifields
._sigfault
._addr
= env
->pc
;
1110 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1113 if (!do_strex_a64(env
)) {
1116 /* fall through for segv */
1117 case EXCP_PREFETCH_ABORT
:
1118 case EXCP_DATA_ABORT
:
1119 info
.si_signo
= TARGET_SIGSEGV
;
1121 /* XXX: check env->error_code */
1122 info
.si_code
= TARGET_SEGV_MAPERR
;
1123 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1124 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1128 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1130 info
.si_signo
= sig
;
1132 info
.si_code
= TARGET_TRAP_BRKPT
;
1133 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1137 env
->xregs
[0] = do_arm_semihosting(env
);
1140 /* nothing to do here for user-mode, just resume guest code */
1143 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1146 process_pending_signals(env
);
1147 /* Exception return on AArch64 always clears the exclusive monitor,
1148 * so any return to running guest code implies this.
1149 * A strex (successful or otherwise) also clears the monitor, so
1150 * we don't need to specialcase EXCP_STREX.
1152 env
->exclusive_addr
= -1;
1155 #endif /* ndef TARGET_ABI32 */
1159 #ifdef TARGET_UNICORE32
1161 void cpu_loop(CPUUniCore32State
*env
)
1163 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1165 unsigned int n
, insn
;
1166 target_siginfo_t info
;
1170 trapnr
= cpu_exec(cs
);
1173 case UC32_EXCP_PRIV
:
1176 get_user_u32(insn
, env
->regs
[31] - 4);
1177 n
= insn
& 0xffffff;
1179 if (n
>= UC32_SYSCALL_BASE
) {
1181 n
-= UC32_SYSCALL_BASE
;
1182 if (n
== UC32_SYSCALL_NR_set_tls
) {
1183 cpu_set_tls(env
, env
->regs
[0]);
1186 abi_long ret
= do_syscall(env
,
1195 if (ret
== -TARGET_ERESTARTSYS
) {
1197 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1206 case UC32_EXCP_DTRAP
:
1207 case UC32_EXCP_ITRAP
:
1208 info
.si_signo
= TARGET_SIGSEGV
;
1210 /* XXX: check env->error_code */
1211 info
.si_code
= TARGET_SEGV_MAPERR
;
1212 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1213 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1215 case EXCP_INTERRUPT
:
1216 /* just indicate that signals should be handled asap */
1222 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1224 info
.si_signo
= sig
;
1226 info
.si_code
= TARGET_TRAP_BRKPT
;
1227 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1234 process_pending_signals(env
);
1238 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1244 #define SPARC64_STACK_BIAS 2047
1248 /* WARNING: dealing with register windows _is_ complicated. More info
1249 can be found at http://www.sics.se/~psm/sparcstack.html */
1250 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1252 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1253 /* wrap handling : if cwp is on the last window, then we use the
1254 registers 'after' the end */
1255 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1256 index
+= 16 * env
->nwindows
;
1260 /* save the register window 'cwp1' */
1261 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1266 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1267 #ifdef TARGET_SPARC64
1269 sp_ptr
+= SPARC64_STACK_BIAS
;
1271 #if defined(DEBUG_WIN)
1272 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1275 for(i
= 0; i
< 16; i
++) {
1276 /* FIXME - what to do if put_user() fails? */
1277 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1278 sp_ptr
+= sizeof(abi_ulong
);
1282 static void save_window(CPUSPARCState
*env
)
1284 #ifndef TARGET_SPARC64
1285 unsigned int new_wim
;
1286 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1287 ((1LL << env
->nwindows
) - 1);
1288 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1291 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1297 static void restore_window(CPUSPARCState
*env
)
1299 #ifndef TARGET_SPARC64
1300 unsigned int new_wim
;
1302 unsigned int i
, cwp1
;
1305 #ifndef TARGET_SPARC64
1306 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1307 ((1LL << env
->nwindows
) - 1);
1310 /* restore the invalid window */
1311 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1312 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1313 #ifdef TARGET_SPARC64
1315 sp_ptr
+= SPARC64_STACK_BIAS
;
1317 #if defined(DEBUG_WIN)
1318 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1321 for(i
= 0; i
< 16; i
++) {
1322 /* FIXME - what to do if get_user() fails? */
1323 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1324 sp_ptr
+= sizeof(abi_ulong
);
1326 #ifdef TARGET_SPARC64
1328 if (env
->cleanwin
< env
->nwindows
- 1)
1336 static void flush_windows(CPUSPARCState
*env
)
1342 /* if restore would invoke restore_window(), then we can stop */
1343 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1344 #ifndef TARGET_SPARC64
1345 if (env
->wim
& (1 << cwp1
))
1348 if (env
->canrestore
== 0)
1353 save_window_offset(env
, cwp1
);
1356 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1357 #ifndef TARGET_SPARC64
1358 /* set wim so that restore will reload the registers */
1359 env
->wim
= 1 << cwp1
;
1361 #if defined(DEBUG_WIN)
1362 printf("flush_windows: nb=%d\n", offset
- 1);
1366 void cpu_loop (CPUSPARCState
*env
)
1368 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1371 target_siginfo_t info
;
1375 trapnr
= cpu_exec(cs
);
1378 /* Compute PSR before exposing state. */
1379 if (env
->cc_op
!= CC_OP_FLAGS
) {
1384 #ifndef TARGET_SPARC64
1391 ret
= do_syscall (env
, env
->gregs
[1],
1392 env
->regwptr
[0], env
->regwptr
[1],
1393 env
->regwptr
[2], env
->regwptr
[3],
1394 env
->regwptr
[4], env
->regwptr
[5],
1396 if (ret
== -TARGET_ERESTARTSYS
|| ret
== -TARGET_QEMU_ESIGRETURN
) {
1399 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1400 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1401 env
->xcc
|= PSR_CARRY
;
1403 env
->psr
|= PSR_CARRY
;
1407 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1408 env
->xcc
&= ~PSR_CARRY
;
1410 env
->psr
&= ~PSR_CARRY
;
1413 env
->regwptr
[0] = ret
;
1414 /* next instruction */
1416 env
->npc
= env
->npc
+ 4;
1418 case 0x83: /* flush windows */
1423 /* next instruction */
1425 env
->npc
= env
->npc
+ 4;
1427 #ifndef TARGET_SPARC64
1428 case TT_WIN_OVF
: /* window overflow */
1431 case TT_WIN_UNF
: /* window underflow */
1432 restore_window(env
);
1437 info
.si_signo
= TARGET_SIGSEGV
;
1439 /* XXX: check env->error_code */
1440 info
.si_code
= TARGET_SEGV_MAPERR
;
1441 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1442 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1446 case TT_SPILL
: /* window overflow */
1449 case TT_FILL
: /* window underflow */
1450 restore_window(env
);
1455 info
.si_signo
= TARGET_SIGSEGV
;
1457 /* XXX: check env->error_code */
1458 info
.si_code
= TARGET_SEGV_MAPERR
;
1459 if (trapnr
== TT_DFAULT
)
1460 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1462 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1463 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1466 #ifndef TARGET_ABI32
1469 sparc64_get_context(env
);
1473 sparc64_set_context(env
);
1477 case EXCP_INTERRUPT
:
1478 /* just indicate that signals should be handled asap */
1482 info
.si_signo
= TARGET_SIGILL
;
1484 info
.si_code
= TARGET_ILL_ILLOPC
;
1485 info
._sifields
._sigfault
._addr
= env
->pc
;
1486 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1493 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1496 info
.si_signo
= sig
;
1498 info
.si_code
= TARGET_TRAP_BRKPT
;
1499 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1504 printf ("Unhandled trap: 0x%x\n", trapnr
);
1505 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1508 process_pending_signals (env
);
1515 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1517 return cpu_get_host_ticks();
1520 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1522 return cpu_ppc_get_tb(env
);
1525 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1527 return cpu_ppc_get_tb(env
) >> 32;
1530 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1532 return cpu_ppc_get_tb(env
);
1535 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1537 return cpu_ppc_get_tb(env
) >> 32;
1540 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1541 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1543 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1545 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1548 /* XXX: to be fixed */
1549 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1554 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1559 static int do_store_exclusive(CPUPPCState
*env
)
1562 target_ulong page_addr
;
1563 target_ulong val
, val2
__attribute__((unused
)) = 0;
1567 addr
= env
->reserve_ea
;
1568 page_addr
= addr
& TARGET_PAGE_MASK
;
1571 flags
= page_get_flags(page_addr
);
1572 if ((flags
& PAGE_READ
) == 0) {
1575 int reg
= env
->reserve_info
& 0x1f;
1576 int size
= env
->reserve_info
>> 5;
1579 if (addr
== env
->reserve_addr
) {
1581 case 1: segv
= get_user_u8(val
, addr
); break;
1582 case 2: segv
= get_user_u16(val
, addr
); break;
1583 case 4: segv
= get_user_u32(val
, addr
); break;
1584 #if defined(TARGET_PPC64)
1585 case 8: segv
= get_user_u64(val
, addr
); break;
1587 segv
= get_user_u64(val
, addr
);
1589 segv
= get_user_u64(val2
, addr
+ 8);
1596 if (!segv
&& val
== env
->reserve_val
) {
1597 val
= env
->gpr
[reg
];
1599 case 1: segv
= put_user_u8(val
, addr
); break;
1600 case 2: segv
= put_user_u16(val
, addr
); break;
1601 case 4: segv
= put_user_u32(val
, addr
); break;
1602 #if defined(TARGET_PPC64)
1603 case 8: segv
= put_user_u64(val
, addr
); break;
1605 if (val2
== env
->reserve_val2
) {
1608 val
= env
->gpr
[reg
+1];
1610 val2
= env
->gpr
[reg
+1];
1612 segv
= put_user_u64(val
, addr
);
1614 segv
= put_user_u64(val2
, addr
+ 8);
1627 env
->crf
[0] = (stored
<< 1) | xer_so
;
1628 env
->reserve_addr
= (target_ulong
)-1;
1638 void cpu_loop(CPUPPCState
*env
)
1640 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1641 target_siginfo_t info
;
1647 trapnr
= cpu_exec(cs
);
1650 case POWERPC_EXCP_NONE
:
1653 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1654 cpu_abort(cs
, "Critical interrupt while in user mode. "
1657 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1658 cpu_abort(cs
, "Machine check exception while in user mode. "
1661 case POWERPC_EXCP_DSI
: /* Data storage exception */
1662 /* XXX: check this. Seems bugged */
1663 switch (env
->error_code
& 0xFF000000) {
1666 info
.si_signo
= TARGET_SIGSEGV
;
1668 info
.si_code
= TARGET_SEGV_MAPERR
;
1671 info
.si_signo
= TARGET_SIGILL
;
1673 info
.si_code
= TARGET_ILL_ILLADR
;
1676 info
.si_signo
= TARGET_SIGSEGV
;
1678 info
.si_code
= TARGET_SEGV_ACCERR
;
1681 /* Let's send a regular segfault... */
1682 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1684 info
.si_signo
= TARGET_SIGSEGV
;
1686 info
.si_code
= TARGET_SEGV_MAPERR
;
1689 info
._sifields
._sigfault
._addr
= env
->nip
;
1690 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1692 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1693 /* XXX: check this */
1694 switch (env
->error_code
& 0xFF000000) {
1696 info
.si_signo
= TARGET_SIGSEGV
;
1698 info
.si_code
= TARGET_SEGV_MAPERR
;
1702 info
.si_signo
= TARGET_SIGSEGV
;
1704 info
.si_code
= TARGET_SEGV_ACCERR
;
1707 /* Let's send a regular segfault... */
1708 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1710 info
.si_signo
= TARGET_SIGSEGV
;
1712 info
.si_code
= TARGET_SEGV_MAPERR
;
1715 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1716 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1718 case POWERPC_EXCP_EXTERNAL
: /* External input */
1719 cpu_abort(cs
, "External interrupt while in user mode. "
1722 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1723 /* XXX: check this */
1724 info
.si_signo
= TARGET_SIGBUS
;
1726 info
.si_code
= TARGET_BUS_ADRALN
;
1727 info
._sifields
._sigfault
._addr
= env
->nip
;
1728 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1730 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1731 case POWERPC_EXCP_HV_EMU
: /* HV emulation */
1732 /* XXX: check this */
1733 switch (env
->error_code
& ~0xF) {
1734 case POWERPC_EXCP_FP
:
1735 info
.si_signo
= TARGET_SIGFPE
;
1737 switch (env
->error_code
& 0xF) {
1738 case POWERPC_EXCP_FP_OX
:
1739 info
.si_code
= TARGET_FPE_FLTOVF
;
1741 case POWERPC_EXCP_FP_UX
:
1742 info
.si_code
= TARGET_FPE_FLTUND
;
1744 case POWERPC_EXCP_FP_ZX
:
1745 case POWERPC_EXCP_FP_VXZDZ
:
1746 info
.si_code
= TARGET_FPE_FLTDIV
;
1748 case POWERPC_EXCP_FP_XX
:
1749 info
.si_code
= TARGET_FPE_FLTRES
;
1751 case POWERPC_EXCP_FP_VXSOFT
:
1752 info
.si_code
= TARGET_FPE_FLTINV
;
1754 case POWERPC_EXCP_FP_VXSNAN
:
1755 case POWERPC_EXCP_FP_VXISI
:
1756 case POWERPC_EXCP_FP_VXIDI
:
1757 case POWERPC_EXCP_FP_VXIMZ
:
1758 case POWERPC_EXCP_FP_VXVC
:
1759 case POWERPC_EXCP_FP_VXSQRT
:
1760 case POWERPC_EXCP_FP_VXCVI
:
1761 info
.si_code
= TARGET_FPE_FLTSUB
;
1764 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1769 case POWERPC_EXCP_INVAL
:
1770 info
.si_signo
= TARGET_SIGILL
;
1772 switch (env
->error_code
& 0xF) {
1773 case POWERPC_EXCP_INVAL_INVAL
:
1774 info
.si_code
= TARGET_ILL_ILLOPC
;
1776 case POWERPC_EXCP_INVAL_LSWX
:
1777 info
.si_code
= TARGET_ILL_ILLOPN
;
1779 case POWERPC_EXCP_INVAL_SPR
:
1780 info
.si_code
= TARGET_ILL_PRVREG
;
1782 case POWERPC_EXCP_INVAL_FP
:
1783 info
.si_code
= TARGET_ILL_COPROC
;
1786 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1787 env
->error_code
& 0xF);
1788 info
.si_code
= TARGET_ILL_ILLADR
;
1792 case POWERPC_EXCP_PRIV
:
1793 info
.si_signo
= TARGET_SIGILL
;
1795 switch (env
->error_code
& 0xF) {
1796 case POWERPC_EXCP_PRIV_OPC
:
1797 info
.si_code
= TARGET_ILL_PRVOPC
;
1799 case POWERPC_EXCP_PRIV_REG
:
1800 info
.si_code
= TARGET_ILL_PRVREG
;
1803 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1804 env
->error_code
& 0xF);
1805 info
.si_code
= TARGET_ILL_PRVOPC
;
1809 case POWERPC_EXCP_TRAP
:
1810 cpu_abort(cs
, "Tried to call a TRAP\n");
1813 /* Should not happen ! */
1814 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1818 info
._sifields
._sigfault
._addr
= env
->nip
;
1819 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1821 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1822 info
.si_signo
= TARGET_SIGILL
;
1824 info
.si_code
= TARGET_ILL_COPROC
;
1825 info
._sifields
._sigfault
._addr
= env
->nip
;
1826 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1828 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1829 cpu_abort(cs
, "Syscall exception while in user mode. "
1832 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1833 info
.si_signo
= TARGET_SIGILL
;
1835 info
.si_code
= TARGET_ILL_COPROC
;
1836 info
._sifields
._sigfault
._addr
= env
->nip
;
1837 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1839 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1840 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1843 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1844 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1847 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1848 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1851 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1852 cpu_abort(cs
, "Data TLB exception while in user mode. "
1855 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1856 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1859 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1860 info
.si_signo
= TARGET_SIGILL
;
1862 info
.si_code
= TARGET_ILL_COPROC
;
1863 info
._sifields
._sigfault
._addr
= env
->nip
;
1864 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1866 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1867 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1869 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1870 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1872 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1873 cpu_abort(cs
, "Performance monitor exception not handled\n");
1875 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1876 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1879 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1880 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1883 case POWERPC_EXCP_RESET
: /* System reset exception */
1884 cpu_abort(cs
, "Reset interrupt while in user mode. "
1887 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1888 cpu_abort(cs
, "Data segment exception while in user mode. "
1891 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1892 cpu_abort(cs
, "Instruction segment exception "
1893 "while in user mode. Aborting\n");
1895 /* PowerPC 64 with hypervisor mode support */
1896 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1897 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1898 "while in user mode. Aborting\n");
1900 case POWERPC_EXCP_TRACE
: /* Trace exception */
1902 * we use this exception to emulate step-by-step execution mode.
1905 /* PowerPC 64 with hypervisor mode support */
1906 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1907 cpu_abort(cs
, "Hypervisor data storage exception "
1908 "while in user mode. Aborting\n");
1910 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1911 cpu_abort(cs
, "Hypervisor instruction storage exception "
1912 "while in user mode. Aborting\n");
1914 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1915 cpu_abort(cs
, "Hypervisor data segment exception "
1916 "while in user mode. Aborting\n");
1918 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1919 cpu_abort(cs
, "Hypervisor instruction segment exception "
1920 "while in user mode. Aborting\n");
1922 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1923 info
.si_signo
= TARGET_SIGILL
;
1925 info
.si_code
= TARGET_ILL_COPROC
;
1926 info
._sifields
._sigfault
._addr
= env
->nip
;
1927 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1929 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1930 cpu_abort(cs
, "Programmable interval timer interrupt "
1931 "while in user mode. Aborting\n");
1933 case POWERPC_EXCP_IO
: /* IO error exception */
1934 cpu_abort(cs
, "IO error exception while in user mode. "
1937 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1938 cpu_abort(cs
, "Run mode exception while in user mode. "
1941 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1942 cpu_abort(cs
, "Emulation trap exception not handled\n");
1944 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1945 cpu_abort(cs
, "Instruction fetch TLB exception "
1946 "while in user-mode. Aborting");
1948 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1949 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1952 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1953 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1956 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1957 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1959 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1960 cpu_abort(cs
, "Instruction address breakpoint exception "
1963 case POWERPC_EXCP_SMI
: /* System management interrupt */
1964 cpu_abort(cs
, "System management interrupt while in user mode. "
1967 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1968 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1971 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1972 cpu_abort(cs
, "Performance monitor exception not handled\n");
1974 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1975 cpu_abort(cs
, "Vector assist exception not handled\n");
1977 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1978 cpu_abort(cs
, "Soft patch exception not handled\n");
1980 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1981 cpu_abort(cs
, "Maintenance exception while in user mode. "
1984 case POWERPC_EXCP_STOP
: /* stop translation */
1985 /* We did invalidate the instruction cache. Go on */
1987 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1988 /* We just stopped because of a branch. Go on */
1990 case POWERPC_EXCP_SYSCALL_USER
:
1991 /* system call in user-mode emulation */
1993 * PPC ABI uses overflow flag in cr0 to signal an error
1996 env
->crf
[0] &= ~0x1;
1997 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1998 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
2000 if (ret
== -TARGET_ERESTARTSYS
) {
2003 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
2004 /* Returning from a successful sigreturn syscall.
2005 Avoid corrupting register state. */
2009 if (ret
> (target_ulong
)(-515)) {
2015 case POWERPC_EXCP_STCX
:
2016 if (do_store_exclusive(env
)) {
2017 info
.si_signo
= TARGET_SIGSEGV
;
2019 info
.si_code
= TARGET_SEGV_MAPERR
;
2020 info
._sifields
._sigfault
._addr
= env
->nip
;
2021 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2028 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2030 info
.si_signo
= sig
;
2032 info
.si_code
= TARGET_TRAP_BRKPT
;
2033 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2037 case EXCP_INTERRUPT
:
2038 /* just indicate that signals should be handled asap */
2041 cpu_abort(cs
, "Unknown exception 0x%x. Aborting\n", trapnr
);
2044 process_pending_signals(env
);
2051 # ifdef TARGET_ABI_MIPSO32
2052 # define MIPS_SYS(name, args) args,
2053 static const uint8_t mips_syscall_args
[] = {
2054 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2055 MIPS_SYS(sys_exit
, 1)
2056 MIPS_SYS(sys_fork
, 0)
2057 MIPS_SYS(sys_read
, 3)
2058 MIPS_SYS(sys_write
, 3)
2059 MIPS_SYS(sys_open
, 3) /* 4005 */
2060 MIPS_SYS(sys_close
, 1)
2061 MIPS_SYS(sys_waitpid
, 3)
2062 MIPS_SYS(sys_creat
, 2)
2063 MIPS_SYS(sys_link
, 2)
2064 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2065 MIPS_SYS(sys_execve
, 0)
2066 MIPS_SYS(sys_chdir
, 1)
2067 MIPS_SYS(sys_time
, 1)
2068 MIPS_SYS(sys_mknod
, 3)
2069 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2070 MIPS_SYS(sys_lchown
, 3)
2071 MIPS_SYS(sys_ni_syscall
, 0)
2072 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2073 MIPS_SYS(sys_lseek
, 3)
2074 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2075 MIPS_SYS(sys_mount
, 5)
2076 MIPS_SYS(sys_umount
, 1)
2077 MIPS_SYS(sys_setuid
, 1)
2078 MIPS_SYS(sys_getuid
, 0)
2079 MIPS_SYS(sys_stime
, 1) /* 4025 */
2080 MIPS_SYS(sys_ptrace
, 4)
2081 MIPS_SYS(sys_alarm
, 1)
2082 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2083 MIPS_SYS(sys_pause
, 0)
2084 MIPS_SYS(sys_utime
, 2) /* 4030 */
2085 MIPS_SYS(sys_ni_syscall
, 0)
2086 MIPS_SYS(sys_ni_syscall
, 0)
2087 MIPS_SYS(sys_access
, 2)
2088 MIPS_SYS(sys_nice
, 1)
2089 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2090 MIPS_SYS(sys_sync
, 0)
2091 MIPS_SYS(sys_kill
, 2)
2092 MIPS_SYS(sys_rename
, 2)
2093 MIPS_SYS(sys_mkdir
, 2)
2094 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2095 MIPS_SYS(sys_dup
, 1)
2096 MIPS_SYS(sys_pipe
, 0)
2097 MIPS_SYS(sys_times
, 1)
2098 MIPS_SYS(sys_ni_syscall
, 0)
2099 MIPS_SYS(sys_brk
, 1) /* 4045 */
2100 MIPS_SYS(sys_setgid
, 1)
2101 MIPS_SYS(sys_getgid
, 0)
2102 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2103 MIPS_SYS(sys_geteuid
, 0)
2104 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2105 MIPS_SYS(sys_acct
, 0)
2106 MIPS_SYS(sys_umount2
, 2)
2107 MIPS_SYS(sys_ni_syscall
, 0)
2108 MIPS_SYS(sys_ioctl
, 3)
2109 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2110 MIPS_SYS(sys_ni_syscall
, 2)
2111 MIPS_SYS(sys_setpgid
, 2)
2112 MIPS_SYS(sys_ni_syscall
, 0)
2113 MIPS_SYS(sys_olduname
, 1)
2114 MIPS_SYS(sys_umask
, 1) /* 4060 */
2115 MIPS_SYS(sys_chroot
, 1)
2116 MIPS_SYS(sys_ustat
, 2)
2117 MIPS_SYS(sys_dup2
, 2)
2118 MIPS_SYS(sys_getppid
, 0)
2119 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2120 MIPS_SYS(sys_setsid
, 0)
2121 MIPS_SYS(sys_sigaction
, 3)
2122 MIPS_SYS(sys_sgetmask
, 0)
2123 MIPS_SYS(sys_ssetmask
, 1)
2124 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2125 MIPS_SYS(sys_setregid
, 2)
2126 MIPS_SYS(sys_sigsuspend
, 0)
2127 MIPS_SYS(sys_sigpending
, 1)
2128 MIPS_SYS(sys_sethostname
, 2)
2129 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2130 MIPS_SYS(sys_getrlimit
, 2)
2131 MIPS_SYS(sys_getrusage
, 2)
2132 MIPS_SYS(sys_gettimeofday
, 2)
2133 MIPS_SYS(sys_settimeofday
, 2)
2134 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2135 MIPS_SYS(sys_setgroups
, 2)
2136 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2137 MIPS_SYS(sys_symlink
, 2)
2138 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2139 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2140 MIPS_SYS(sys_uselib
, 1)
2141 MIPS_SYS(sys_swapon
, 2)
2142 MIPS_SYS(sys_reboot
, 3)
2143 MIPS_SYS(old_readdir
, 3)
2144 MIPS_SYS(old_mmap
, 6) /* 4090 */
2145 MIPS_SYS(sys_munmap
, 2)
2146 MIPS_SYS(sys_truncate
, 2)
2147 MIPS_SYS(sys_ftruncate
, 2)
2148 MIPS_SYS(sys_fchmod
, 2)
2149 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2150 MIPS_SYS(sys_getpriority
, 2)
2151 MIPS_SYS(sys_setpriority
, 3)
2152 MIPS_SYS(sys_ni_syscall
, 0)
2153 MIPS_SYS(sys_statfs
, 2)
2154 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2155 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2156 MIPS_SYS(sys_socketcall
, 2)
2157 MIPS_SYS(sys_syslog
, 3)
2158 MIPS_SYS(sys_setitimer
, 3)
2159 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2160 MIPS_SYS(sys_newstat
, 2)
2161 MIPS_SYS(sys_newlstat
, 2)
2162 MIPS_SYS(sys_newfstat
, 2)
2163 MIPS_SYS(sys_uname
, 1)
2164 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2165 MIPS_SYS(sys_vhangup
, 0)
2166 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2167 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2168 MIPS_SYS(sys_wait4
, 4)
2169 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2170 MIPS_SYS(sys_sysinfo
, 1)
2171 MIPS_SYS(sys_ipc
, 6)
2172 MIPS_SYS(sys_fsync
, 1)
2173 MIPS_SYS(sys_sigreturn
, 0)
2174 MIPS_SYS(sys_clone
, 6) /* 4120 */
2175 MIPS_SYS(sys_setdomainname
, 2)
2176 MIPS_SYS(sys_newuname
, 1)
2177 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2178 MIPS_SYS(sys_adjtimex
, 1)
2179 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2180 MIPS_SYS(sys_sigprocmask
, 3)
2181 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2182 MIPS_SYS(sys_init_module
, 5)
2183 MIPS_SYS(sys_delete_module
, 1)
2184 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2185 MIPS_SYS(sys_quotactl
, 0)
2186 MIPS_SYS(sys_getpgid
, 1)
2187 MIPS_SYS(sys_fchdir
, 1)
2188 MIPS_SYS(sys_bdflush
, 2)
2189 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2190 MIPS_SYS(sys_personality
, 1)
2191 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2192 MIPS_SYS(sys_setfsuid
, 1)
2193 MIPS_SYS(sys_setfsgid
, 1)
2194 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2195 MIPS_SYS(sys_getdents
, 3)
2196 MIPS_SYS(sys_select
, 5)
2197 MIPS_SYS(sys_flock
, 2)
2198 MIPS_SYS(sys_msync
, 3)
2199 MIPS_SYS(sys_readv
, 3) /* 4145 */
2200 MIPS_SYS(sys_writev
, 3)
2201 MIPS_SYS(sys_cacheflush
, 3)
2202 MIPS_SYS(sys_cachectl
, 3)
2203 MIPS_SYS(sys_sysmips
, 4)
2204 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2205 MIPS_SYS(sys_getsid
, 1)
2206 MIPS_SYS(sys_fdatasync
, 0)
2207 MIPS_SYS(sys_sysctl
, 1)
2208 MIPS_SYS(sys_mlock
, 2)
2209 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2210 MIPS_SYS(sys_mlockall
, 1)
2211 MIPS_SYS(sys_munlockall
, 0)
2212 MIPS_SYS(sys_sched_setparam
, 2)
2213 MIPS_SYS(sys_sched_getparam
, 2)
2214 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2215 MIPS_SYS(sys_sched_getscheduler
, 1)
2216 MIPS_SYS(sys_sched_yield
, 0)
2217 MIPS_SYS(sys_sched_get_priority_max
, 1)
2218 MIPS_SYS(sys_sched_get_priority_min
, 1)
2219 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2220 MIPS_SYS(sys_nanosleep
, 2)
2221 MIPS_SYS(sys_mremap
, 5)
2222 MIPS_SYS(sys_accept
, 3)
2223 MIPS_SYS(sys_bind
, 3)
2224 MIPS_SYS(sys_connect
, 3) /* 4170 */
2225 MIPS_SYS(sys_getpeername
, 3)
2226 MIPS_SYS(sys_getsockname
, 3)
2227 MIPS_SYS(sys_getsockopt
, 5)
2228 MIPS_SYS(sys_listen
, 2)
2229 MIPS_SYS(sys_recv
, 4) /* 4175 */
2230 MIPS_SYS(sys_recvfrom
, 6)
2231 MIPS_SYS(sys_recvmsg
, 3)
2232 MIPS_SYS(sys_send
, 4)
2233 MIPS_SYS(sys_sendmsg
, 3)
2234 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2235 MIPS_SYS(sys_setsockopt
, 5)
2236 MIPS_SYS(sys_shutdown
, 2)
2237 MIPS_SYS(sys_socket
, 3)
2238 MIPS_SYS(sys_socketpair
, 4)
2239 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2240 MIPS_SYS(sys_getresuid
, 3)
2241 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2242 MIPS_SYS(sys_poll
, 3)
2243 MIPS_SYS(sys_nfsservctl
, 3)
2244 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2245 MIPS_SYS(sys_getresgid
, 3)
2246 MIPS_SYS(sys_prctl
, 5)
2247 MIPS_SYS(sys_rt_sigreturn
, 0)
2248 MIPS_SYS(sys_rt_sigaction
, 4)
2249 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2250 MIPS_SYS(sys_rt_sigpending
, 2)
2251 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2252 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2253 MIPS_SYS(sys_rt_sigsuspend
, 0)
2254 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2255 MIPS_SYS(sys_pwrite64
, 6)
2256 MIPS_SYS(sys_chown
, 3)
2257 MIPS_SYS(sys_getcwd
, 2)
2258 MIPS_SYS(sys_capget
, 2)
2259 MIPS_SYS(sys_capset
, 2) /* 4205 */
2260 MIPS_SYS(sys_sigaltstack
, 2)
2261 MIPS_SYS(sys_sendfile
, 4)
2262 MIPS_SYS(sys_ni_syscall
, 0)
2263 MIPS_SYS(sys_ni_syscall
, 0)
2264 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2265 MIPS_SYS(sys_truncate64
, 4)
2266 MIPS_SYS(sys_ftruncate64
, 4)
2267 MIPS_SYS(sys_stat64
, 2)
2268 MIPS_SYS(sys_lstat64
, 2)
2269 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2270 MIPS_SYS(sys_pivot_root
, 2)
2271 MIPS_SYS(sys_mincore
, 3)
2272 MIPS_SYS(sys_madvise
, 3)
2273 MIPS_SYS(sys_getdents64
, 3)
2274 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2275 MIPS_SYS(sys_ni_syscall
, 0)
2276 MIPS_SYS(sys_gettid
, 0)
2277 MIPS_SYS(sys_readahead
, 5)
2278 MIPS_SYS(sys_setxattr
, 5)
2279 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2280 MIPS_SYS(sys_fsetxattr
, 5)
2281 MIPS_SYS(sys_getxattr
, 4)
2282 MIPS_SYS(sys_lgetxattr
, 4)
2283 MIPS_SYS(sys_fgetxattr
, 4)
2284 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2285 MIPS_SYS(sys_llistxattr
, 3)
2286 MIPS_SYS(sys_flistxattr
, 3)
2287 MIPS_SYS(sys_removexattr
, 2)
2288 MIPS_SYS(sys_lremovexattr
, 2)
2289 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2290 MIPS_SYS(sys_tkill
, 2)
2291 MIPS_SYS(sys_sendfile64
, 5)
2292 MIPS_SYS(sys_futex
, 6)
2293 MIPS_SYS(sys_sched_setaffinity
, 3)
2294 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2295 MIPS_SYS(sys_io_setup
, 2)
2296 MIPS_SYS(sys_io_destroy
, 1)
2297 MIPS_SYS(sys_io_getevents
, 5)
2298 MIPS_SYS(sys_io_submit
, 3)
2299 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2300 MIPS_SYS(sys_exit_group
, 1)
2301 MIPS_SYS(sys_lookup_dcookie
, 3)
2302 MIPS_SYS(sys_epoll_create
, 1)
2303 MIPS_SYS(sys_epoll_ctl
, 4)
2304 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2305 MIPS_SYS(sys_remap_file_pages
, 5)
2306 MIPS_SYS(sys_set_tid_address
, 1)
2307 MIPS_SYS(sys_restart_syscall
, 0)
2308 MIPS_SYS(sys_fadvise64_64
, 7)
2309 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2310 MIPS_SYS(sys_fstatfs64
, 2)
2311 MIPS_SYS(sys_timer_create
, 3)
2312 MIPS_SYS(sys_timer_settime
, 4)
2313 MIPS_SYS(sys_timer_gettime
, 2)
2314 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2315 MIPS_SYS(sys_timer_delete
, 1)
2316 MIPS_SYS(sys_clock_settime
, 2)
2317 MIPS_SYS(sys_clock_gettime
, 2)
2318 MIPS_SYS(sys_clock_getres
, 2)
2319 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2320 MIPS_SYS(sys_tgkill
, 3)
2321 MIPS_SYS(sys_utimes
, 2)
2322 MIPS_SYS(sys_mbind
, 4)
2323 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2324 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2325 MIPS_SYS(sys_mq_open
, 4)
2326 MIPS_SYS(sys_mq_unlink
, 1)
2327 MIPS_SYS(sys_mq_timedsend
, 5)
2328 MIPS_SYS(sys_mq_timedreceive
, 5)
2329 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2330 MIPS_SYS(sys_mq_getsetattr
, 3)
2331 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2332 MIPS_SYS(sys_waitid
, 4)
2333 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2334 MIPS_SYS(sys_add_key
, 5)
2335 MIPS_SYS(sys_request_key
, 4)
2336 MIPS_SYS(sys_keyctl
, 5)
2337 MIPS_SYS(sys_set_thread_area
, 1)
2338 MIPS_SYS(sys_inotify_init
, 0)
2339 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2340 MIPS_SYS(sys_inotify_rm_watch
, 2)
2341 MIPS_SYS(sys_migrate_pages
, 4)
2342 MIPS_SYS(sys_openat
, 4)
2343 MIPS_SYS(sys_mkdirat
, 3)
2344 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2345 MIPS_SYS(sys_fchownat
, 5)
2346 MIPS_SYS(sys_futimesat
, 3)
2347 MIPS_SYS(sys_fstatat64
, 4)
2348 MIPS_SYS(sys_unlinkat
, 3)
2349 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2350 MIPS_SYS(sys_linkat
, 5)
2351 MIPS_SYS(sys_symlinkat
, 3)
2352 MIPS_SYS(sys_readlinkat
, 4)
2353 MIPS_SYS(sys_fchmodat
, 3)
2354 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2355 MIPS_SYS(sys_pselect6
, 6)
2356 MIPS_SYS(sys_ppoll
, 5)
2357 MIPS_SYS(sys_unshare
, 1)
2358 MIPS_SYS(sys_splice
, 6)
2359 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2360 MIPS_SYS(sys_tee
, 4)
2361 MIPS_SYS(sys_vmsplice
, 4)
2362 MIPS_SYS(sys_move_pages
, 6)
2363 MIPS_SYS(sys_set_robust_list
, 2)
2364 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2365 MIPS_SYS(sys_kexec_load
, 4)
2366 MIPS_SYS(sys_getcpu
, 3)
2367 MIPS_SYS(sys_epoll_pwait
, 6)
2368 MIPS_SYS(sys_ioprio_set
, 3)
2369 MIPS_SYS(sys_ioprio_get
, 2)
2370 MIPS_SYS(sys_utimensat
, 4)
2371 MIPS_SYS(sys_signalfd
, 3)
2372 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2373 MIPS_SYS(sys_eventfd
, 1)
2374 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2375 MIPS_SYS(sys_timerfd_create
, 2)
2376 MIPS_SYS(sys_timerfd_gettime
, 2)
2377 MIPS_SYS(sys_timerfd_settime
, 4)
2378 MIPS_SYS(sys_signalfd4
, 4)
2379 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2380 MIPS_SYS(sys_epoll_create1
, 1)
2381 MIPS_SYS(sys_dup3
, 3)
2382 MIPS_SYS(sys_pipe2
, 2)
2383 MIPS_SYS(sys_inotify_init1
, 1)
2384 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2385 MIPS_SYS(sys_pwritev
, 6)
2386 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2387 MIPS_SYS(sys_perf_event_open
, 5)
2388 MIPS_SYS(sys_accept4
, 4)
2389 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2390 MIPS_SYS(sys_fanotify_init
, 2)
2391 MIPS_SYS(sys_fanotify_mark
, 6)
2392 MIPS_SYS(sys_prlimit64
, 4)
2393 MIPS_SYS(sys_name_to_handle_at
, 5)
2394 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2395 MIPS_SYS(sys_clock_adjtime
, 2)
2396 MIPS_SYS(sys_syncfs
, 1)
2401 static int do_store_exclusive(CPUMIPSState
*env
)
2404 target_ulong page_addr
;
2412 page_addr
= addr
& TARGET_PAGE_MASK
;
2415 flags
= page_get_flags(page_addr
);
2416 if ((flags
& PAGE_READ
) == 0) {
2419 reg
= env
->llreg
& 0x1f;
2420 d
= (env
->llreg
& 0x20) != 0;
2422 segv
= get_user_s64(val
, addr
);
2424 segv
= get_user_s32(val
, addr
);
2427 if (val
!= env
->llval
) {
2428 env
->active_tc
.gpr
[reg
] = 0;
2431 segv
= put_user_u64(env
->llnewval
, addr
);
2433 segv
= put_user_u32(env
->llnewval
, addr
);
2436 env
->active_tc
.gpr
[reg
] = 1;
2443 env
->active_tc
.PC
+= 4;
2456 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2464 info
->si_signo
= TARGET_SIGFPE
;
2466 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2467 queue_signal(env
, info
->si_signo
, QEMU_SI_FAULT
, &*info
);
2471 info
->si_signo
= TARGET_SIGTRAP
;
2473 queue_signal(env
, info
->si_signo
, QEMU_SI_FAULT
, &*info
);
2481 void cpu_loop(CPUMIPSState
*env
)
2483 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2484 target_siginfo_t info
;
2487 # ifdef TARGET_ABI_MIPSO32
2488 unsigned int syscall_num
;
2493 trapnr
= cpu_exec(cs
);
2497 env
->active_tc
.PC
+= 4;
2498 # ifdef TARGET_ABI_MIPSO32
2499 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2500 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2501 ret
= -TARGET_ENOSYS
;
2505 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2507 nb_args
= mips_syscall_args
[syscall_num
];
2508 sp_reg
= env
->active_tc
.gpr
[29];
2510 /* these arguments are taken from the stack */
2512 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2516 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2520 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2524 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2530 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2531 env
->active_tc
.gpr
[4],
2532 env
->active_tc
.gpr
[5],
2533 env
->active_tc
.gpr
[6],
2534 env
->active_tc
.gpr
[7],
2535 arg5
, arg6
, arg7
, arg8
);
2539 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2540 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2541 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2542 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2543 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2545 if (ret
== -TARGET_ERESTARTSYS
) {
2546 env
->active_tc
.PC
-= 4;
2549 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2550 /* Returning from a successful sigreturn syscall.
2551 Avoid clobbering register state. */
2554 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2555 env
->active_tc
.gpr
[7] = 1; /* error flag */
2558 env
->active_tc
.gpr
[7] = 0; /* error flag */
2560 env
->active_tc
.gpr
[2] = ret
;
2566 info
.si_signo
= TARGET_SIGSEGV
;
2568 /* XXX: check env->error_code */
2569 info
.si_code
= TARGET_SEGV_MAPERR
;
2570 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2571 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2575 info
.si_signo
= TARGET_SIGILL
;
2578 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2580 case EXCP_INTERRUPT
:
2581 /* just indicate that signals should be handled asap */
2587 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2590 info
.si_signo
= sig
;
2592 info
.si_code
= TARGET_TRAP_BRKPT
;
2593 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2598 if (do_store_exclusive(env
)) {
2599 info
.si_signo
= TARGET_SIGSEGV
;
2601 info
.si_code
= TARGET_SEGV_MAPERR
;
2602 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2603 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2607 info
.si_signo
= TARGET_SIGILL
;
2609 info
.si_code
= TARGET_ILL_ILLOPC
;
2610 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2612 /* The code below was inspired by the MIPS Linux kernel trap
2613 * handling code in arch/mips/kernel/traps.c.
2617 abi_ulong trap_instr
;
2620 if (env
->hflags
& MIPS_HFLAG_M16
) {
2621 if (env
->insn_flags
& ASE_MICROMIPS
) {
2622 /* microMIPS mode */
2623 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2628 if ((trap_instr
>> 10) == 0x11) {
2629 /* 16-bit instruction */
2630 code
= trap_instr
& 0xf;
2632 /* 32-bit instruction */
2635 ret
= get_user_u16(instr_lo
,
2636 env
->active_tc
.PC
+ 2);
2640 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2641 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2642 /* Unfortunately, microMIPS also suffers from
2643 the old assembler bug... */
2644 if (code
>= (1 << 10)) {
2650 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2654 code
= (trap_instr
>> 6) & 0x3f;
2657 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2662 /* As described in the original Linux kernel code, the
2663 * below checks on 'code' are to work around an old
2666 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2667 if (code
>= (1 << 10)) {
2672 if (do_break(env
, &info
, code
) != 0) {
2679 abi_ulong trap_instr
;
2680 unsigned int code
= 0;
2682 if (env
->hflags
& MIPS_HFLAG_M16
) {
2683 /* microMIPS mode */
2686 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2687 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2689 trap_instr
= (instr
[0] << 16) | instr
[1];
2691 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2698 /* The immediate versions don't provide a code. */
2699 if (!(trap_instr
& 0xFC000000)) {
2700 if (env
->hflags
& MIPS_HFLAG_M16
) {
2701 /* microMIPS mode */
2702 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2704 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2708 if (do_break(env
, &info
, code
) != 0) {
2715 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2718 process_pending_signals(env
);
2723 #ifdef TARGET_OPENRISC
2725 void cpu_loop(CPUOpenRISCState
*env
)
2727 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2733 trapnr
= cpu_exec(cs
);
2739 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2743 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2744 gdbsig
= TARGET_SIGBUS
;
2748 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2749 gdbsig
= TARGET_SIGSEGV
;
2752 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2755 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2756 gdbsig
= TARGET_SIGBUS
;
2759 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2760 gdbsig
= TARGET_SIGILL
;
2763 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2767 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2770 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2771 gdbsig
= TARGET_SIGSEGV
;
2774 env
->pc
+= 4; /* 0xc00; */
2775 ret
= do_syscall(env
,
2776 env
->gpr
[11], /* return value */
2777 env
->gpr
[3], /* r3 - r7 are params */
2783 if (ret
== -TARGET_ERESTARTSYS
) {
2785 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2790 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2793 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2794 gdbsig
= TARGET_SIGTRAP
;
2797 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2800 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2802 gdbsig
= TARGET_SIGILL
;
2806 gdb_handlesig(cs
, gdbsig
);
2807 if (gdbsig
!= TARGET_SIGTRAP
) {
2812 process_pending_signals(env
);
2816 #endif /* TARGET_OPENRISC */
2819 void cpu_loop(CPUSH4State
*env
)
2821 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2823 target_siginfo_t info
;
2827 trapnr
= cpu_exec(cs
);
2833 ret
= do_syscall(env
,
2842 if (ret
== -TARGET_ERESTARTSYS
) {
2844 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2845 env
->gregs
[0] = ret
;
2848 case EXCP_INTERRUPT
:
2849 /* just indicate that signals should be handled asap */
2855 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2858 info
.si_signo
= sig
;
2860 info
.si_code
= TARGET_TRAP_BRKPT
;
2861 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2867 info
.si_signo
= TARGET_SIGSEGV
;
2869 info
.si_code
= TARGET_SEGV_MAPERR
;
2870 info
._sifields
._sigfault
._addr
= env
->tea
;
2871 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2875 printf ("Unhandled trap: 0x%x\n", trapnr
);
2876 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2879 process_pending_signals (env
);
2885 void cpu_loop(CPUCRISState
*env
)
2887 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2889 target_siginfo_t info
;
2893 trapnr
= cpu_exec(cs
);
2898 info
.si_signo
= TARGET_SIGSEGV
;
2900 /* XXX: check env->error_code */
2901 info
.si_code
= TARGET_SEGV_MAPERR
;
2902 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2903 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2906 case EXCP_INTERRUPT
:
2907 /* just indicate that signals should be handled asap */
2910 ret
= do_syscall(env
,
2919 if (ret
== -TARGET_ERESTARTSYS
) {
2921 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2922 env
->regs
[10] = ret
;
2929 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2932 info
.si_signo
= sig
;
2934 info
.si_code
= TARGET_TRAP_BRKPT
;
2935 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2940 printf ("Unhandled trap: 0x%x\n", trapnr
);
2941 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2944 process_pending_signals (env
);
2949 #ifdef TARGET_MICROBLAZE
2950 void cpu_loop(CPUMBState
*env
)
2952 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2954 target_siginfo_t info
;
2958 trapnr
= cpu_exec(cs
);
2963 info
.si_signo
= TARGET_SIGSEGV
;
2965 /* XXX: check env->error_code */
2966 info
.si_code
= TARGET_SEGV_MAPERR
;
2967 info
._sifields
._sigfault
._addr
= 0;
2968 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2971 case EXCP_INTERRUPT
:
2972 /* just indicate that signals should be handled asap */
2975 /* Return address is 4 bytes after the call. */
2977 env
->sregs
[SR_PC
] = env
->regs
[14];
2978 ret
= do_syscall(env
,
2987 if (ret
== -TARGET_ERESTARTSYS
) {
2988 /* Wind back to before the syscall. */
2989 env
->sregs
[SR_PC
] -= 4;
2990 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2993 /* All syscall exits result in guest r14 being equal to the
2994 * PC we return to, because the kernel syscall exit "rtbd" does
2995 * this. (This is true even for sigreturn(); note that r14 is
2996 * not a userspace-usable register, as the kernel may clobber it
2999 env
->regs
[14] = env
->sregs
[SR_PC
];
3002 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
3003 if (env
->iflags
& D_FLAG
) {
3004 env
->sregs
[SR_ESR
] |= 1 << 12;
3005 env
->sregs
[SR_PC
] -= 4;
3006 /* FIXME: if branch was immed, replay the imm as well. */
3009 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
3011 switch (env
->sregs
[SR_ESR
] & 31) {
3012 case ESR_EC_DIVZERO
:
3013 info
.si_signo
= TARGET_SIGFPE
;
3015 info
.si_code
= TARGET_FPE_FLTDIV
;
3016 info
._sifields
._sigfault
._addr
= 0;
3017 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3020 info
.si_signo
= TARGET_SIGFPE
;
3022 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
3023 info
.si_code
= TARGET_FPE_FLTINV
;
3025 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
3026 info
.si_code
= TARGET_FPE_FLTDIV
;
3028 info
._sifields
._sigfault
._addr
= 0;
3029 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3032 printf ("Unhandled hw-exception: 0x%x\n",
3033 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
3034 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3043 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3046 info
.si_signo
= sig
;
3048 info
.si_code
= TARGET_TRAP_BRKPT
;
3049 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3054 printf ("Unhandled trap: 0x%x\n", trapnr
);
3055 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3058 process_pending_signals (env
);
3065 void cpu_loop(CPUM68KState
*env
)
3067 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3070 target_siginfo_t info
;
3071 TaskState
*ts
= cs
->opaque
;
3075 trapnr
= cpu_exec(cs
);
3080 if (ts
->sim_syscalls
) {
3082 get_user_u16(nr
, env
->pc
+ 2);
3084 do_m68k_simcall(env
, nr
);
3090 case EXCP_HALT_INSN
:
3091 /* Semihosing syscall. */
3093 do_m68k_semihosting(env
, env
->dregs
[0]);
3097 case EXCP_UNSUPPORTED
:
3099 info
.si_signo
= TARGET_SIGILL
;
3101 info
.si_code
= TARGET_ILL_ILLOPN
;
3102 info
._sifields
._sigfault
._addr
= env
->pc
;
3103 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3108 ts
->sim_syscalls
= 0;
3111 ret
= do_syscall(env
,
3120 if (ret
== -TARGET_ERESTARTSYS
) {
3122 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3123 env
->dregs
[0] = ret
;
3127 case EXCP_INTERRUPT
:
3128 /* just indicate that signals should be handled asap */
3132 info
.si_signo
= TARGET_SIGSEGV
;
3134 /* XXX: check env->error_code */
3135 info
.si_code
= TARGET_SEGV_MAPERR
;
3136 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3137 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3144 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3147 info
.si_signo
= sig
;
3149 info
.si_code
= TARGET_TRAP_BRKPT
;
3150 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3155 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3158 process_pending_signals(env
);
3161 #endif /* TARGET_M68K */
3164 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3166 target_ulong addr
, val
, tmp
;
3167 target_siginfo_t info
;
3170 addr
= env
->lock_addr
;
3171 tmp
= env
->lock_st_addr
;
3172 env
->lock_addr
= -1;
3173 env
->lock_st_addr
= 0;
3179 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3183 if (val
== env
->lock_value
) {
3185 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3202 info
.si_signo
= TARGET_SIGSEGV
;
3204 info
.si_code
= TARGET_SEGV_MAPERR
;
3205 info
._sifields
._sigfault
._addr
= addr
;
3206 queue_signal(env
, TARGET_SIGSEGV
, QEMU_SI_FAULT
, &info
);
3209 void cpu_loop(CPUAlphaState
*env
)
3211 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3213 target_siginfo_t info
;
3218 trapnr
= cpu_exec(cs
);
3221 /* All of the traps imply a transition through PALcode, which
3222 implies an REI instruction has been executed. Which means
3223 that the intr_flag should be cleared. */
3228 fprintf(stderr
, "Reset requested. Exit\n");
3232 fprintf(stderr
, "Machine check exception. Exit\n");
3235 case EXCP_SMP_INTERRUPT
:
3236 case EXCP_CLK_INTERRUPT
:
3237 case EXCP_DEV_INTERRUPT
:
3238 fprintf(stderr
, "External interrupt. Exit\n");
3242 env
->lock_addr
= -1;
3243 info
.si_signo
= TARGET_SIGSEGV
;
3245 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3246 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3247 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3248 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3251 env
->lock_addr
= -1;
3252 info
.si_signo
= TARGET_SIGBUS
;
3254 info
.si_code
= TARGET_BUS_ADRALN
;
3255 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3256 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3260 env
->lock_addr
= -1;
3261 info
.si_signo
= TARGET_SIGILL
;
3263 info
.si_code
= TARGET_ILL_ILLOPC
;
3264 info
._sifields
._sigfault
._addr
= env
->pc
;
3265 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3268 env
->lock_addr
= -1;
3269 info
.si_signo
= TARGET_SIGFPE
;
3271 info
.si_code
= TARGET_FPE_FLTINV
;
3272 info
._sifields
._sigfault
._addr
= env
->pc
;
3273 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3276 /* No-op. Linux simply re-enables the FPU. */
3279 env
->lock_addr
= -1;
3280 switch (env
->error_code
) {
3283 info
.si_signo
= TARGET_SIGTRAP
;
3285 info
.si_code
= TARGET_TRAP_BRKPT
;
3286 info
._sifields
._sigfault
._addr
= env
->pc
;
3287 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3291 info
.si_signo
= TARGET_SIGTRAP
;
3294 info
._sifields
._sigfault
._addr
= env
->pc
;
3295 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3299 trapnr
= env
->ir
[IR_V0
];
3300 sysret
= do_syscall(env
, trapnr
,
3301 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3302 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3303 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3305 if (sysret
== -TARGET_ERESTARTSYS
) {
3309 if (sysret
== -TARGET_QEMU_ESIGRETURN
) {
3312 /* Syscall writes 0 to V0 to bypass error check, similar
3313 to how this is handled internal to Linux kernel.
3314 (Ab)use trapnr temporarily as boolean indicating error. */
3315 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3316 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3317 env
->ir
[IR_A3
] = trapnr
;
3321 /* ??? We can probably elide the code using page_unprotect
3322 that is checking for self-modifying code. Instead we
3323 could simply call tb_flush here. Until we work out the
3324 changes required to turn off the extra write protection,
3325 this can be a no-op. */
3329 /* Handled in the translator for usermode. */
3333 /* Handled in the translator for usermode. */
3337 info
.si_signo
= TARGET_SIGFPE
;
3338 switch (env
->ir
[IR_A0
]) {
3339 case TARGET_GEN_INTOVF
:
3340 info
.si_code
= TARGET_FPE_INTOVF
;
3342 case TARGET_GEN_INTDIV
:
3343 info
.si_code
= TARGET_FPE_INTDIV
;
3345 case TARGET_GEN_FLTOVF
:
3346 info
.si_code
= TARGET_FPE_FLTOVF
;
3348 case TARGET_GEN_FLTUND
:
3349 info
.si_code
= TARGET_FPE_FLTUND
;
3351 case TARGET_GEN_FLTINV
:
3352 info
.si_code
= TARGET_FPE_FLTINV
;
3354 case TARGET_GEN_FLTINE
:
3355 info
.si_code
= TARGET_FPE_FLTRES
;
3357 case TARGET_GEN_ROPRAND
:
3361 info
.si_signo
= TARGET_SIGTRAP
;
3366 info
._sifields
._sigfault
._addr
= env
->pc
;
3367 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3374 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3375 if (info
.si_signo
) {
3376 env
->lock_addr
= -1;
3378 info
.si_code
= TARGET_TRAP_BRKPT
;
3379 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3384 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3386 case EXCP_INTERRUPT
:
3387 /* Just indicate that signals should be handled asap. */
3390 printf ("Unhandled trap: 0x%x\n", trapnr
);
3391 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3394 process_pending_signals (env
);
3397 #endif /* TARGET_ALPHA */
3400 void cpu_loop(CPUS390XState
*env
)
3402 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3404 target_siginfo_t info
;
3410 trapnr
= cpu_exec(cs
);
3413 case EXCP_INTERRUPT
:
3414 /* Just indicate that signals should be handled asap. */
3418 n
= env
->int_svc_code
;
3420 /* syscalls > 255 */
3423 env
->psw
.addr
+= env
->int_svc_ilen
;
3424 ret
= do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3425 env
->regs
[4], env
->regs
[5],
3426 env
->regs
[6], env
->regs
[7], 0, 0);
3427 if (ret
== -TARGET_ERESTARTSYS
) {
3428 env
->psw
.addr
-= env
->int_svc_ilen
;
3429 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3435 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3437 n
= TARGET_TRAP_BRKPT
;
3442 n
= env
->int_pgm_code
;
3445 case PGM_PRIVILEGED
:
3446 sig
= TARGET_SIGILL
;
3447 n
= TARGET_ILL_ILLOPC
;
3449 case PGM_PROTECTION
:
3450 case PGM_ADDRESSING
:
3451 sig
= TARGET_SIGSEGV
;
3452 /* XXX: check env->error_code */
3453 n
= TARGET_SEGV_MAPERR
;
3454 addr
= env
->__excp_addr
;
3457 case PGM_SPECIFICATION
:
3458 case PGM_SPECIAL_OP
:
3461 sig
= TARGET_SIGILL
;
3462 n
= TARGET_ILL_ILLOPN
;
3465 case PGM_FIXPT_OVERFLOW
:
3466 sig
= TARGET_SIGFPE
;
3467 n
= TARGET_FPE_INTOVF
;
3469 case PGM_FIXPT_DIVIDE
:
3470 sig
= TARGET_SIGFPE
;
3471 n
= TARGET_FPE_INTDIV
;
3475 n
= (env
->fpc
>> 8) & 0xff;
3477 /* compare-and-trap */
3480 /* An IEEE exception, simulated or otherwise. */
3482 n
= TARGET_FPE_FLTINV
;
3483 } else if (n
& 0x40) {
3484 n
= TARGET_FPE_FLTDIV
;
3485 } else if (n
& 0x20) {
3486 n
= TARGET_FPE_FLTOVF
;
3487 } else if (n
& 0x10) {
3488 n
= TARGET_FPE_FLTUND
;
3489 } else if (n
& 0x08) {
3490 n
= TARGET_FPE_FLTRES
;
3492 /* ??? Quantum exception; BFP, DFP error. */
3495 sig
= TARGET_SIGFPE
;
3500 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3501 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3507 addr
= env
->psw
.addr
;
3509 info
.si_signo
= sig
;
3512 info
._sifields
._sigfault
._addr
= addr
;
3513 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3517 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3518 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3521 process_pending_signals (env
);
3525 #endif /* TARGET_S390X */
3527 #ifdef TARGET_TILEGX
3529 static void gen_sigill_reg(CPUTLGState
*env
)
3531 target_siginfo_t info
;
3533 info
.si_signo
= TARGET_SIGILL
;
3535 info
.si_code
= TARGET_ILL_PRVREG
;
3536 info
._sifields
._sigfault
._addr
= env
->pc
;
3537 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3540 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3542 target_siginfo_t info
;
3544 info
.si_signo
= signo
;
3546 info
._sifields
._sigfault
._addr
= env
->pc
;
3548 if (signo
== TARGET_SIGSEGV
) {
3549 /* The passed in sigcode is a dummy; check for a page mapping
3550 and pass either MAPERR or ACCERR. */
3551 target_ulong addr
= env
->excaddr
;
3552 info
._sifields
._sigfault
._addr
= addr
;
3553 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3554 sigcode
= TARGET_SEGV_MAPERR
;
3556 sigcode
= TARGET_SEGV_ACCERR
;
3559 info
.si_code
= sigcode
;
3561 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3564 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3566 env
->excaddr
= addr
;
3567 do_signal(env
, TARGET_SIGSEGV
, 0);
3570 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3572 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3583 gen_sigill_reg(env
);
3586 g_assert_not_reached();
3589 env
->regs
[reg
] = val
;
3593 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3594 * memory at the address held in the first source register. If the values are
3595 * not equal, then no memory operation is performed. If the values are equal,
3596 * the 8-byte quantity from the second source register is written into memory
3597 * at the address held in the first source register. In either case, the result
3598 * of the instruction is the value read from memory. The compare and write to
3599 * memory are atomic and thus can be used for synchronization purposes. This
3600 * instruction only operates for addresses aligned to a 8-byte boundary.
3601 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3603 * Functional Description (64-bit)
3604 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3605 * rf[Dest] = memVal;
3606 * if (memVal == SPR[CmpValueSPR])
3607 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3609 * Functional Description (32-bit)
3610 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3611 * rf[Dest] = memVal;
3612 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3613 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3616 * This function also processes exch and exch4 which need not process SPR.
3618 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3621 target_long val
, sprval
;
3625 addr
= env
->atomic_srca
;
3626 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3627 goto sigsegv_maperr
;
3632 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3634 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3638 if (!cmp
|| val
== sprval
) {
3639 target_long valb
= env
->atomic_srcb
;
3640 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3641 goto sigsegv_maperr
;
3645 set_regval(env
, env
->atomic_dstr
, val
);
3651 gen_sigsegv_maperr(env
, addr
);
3654 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3658 target_long val
, valb
;
3662 addr
= env
->atomic_srca
;
3663 valb
= env
->atomic_srcb
;
3664 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3665 goto sigsegv_maperr
;
3669 case TILEGX_EXCP_OPCODE_FETCHADD
:
3670 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3673 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3679 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3681 if ((int32_t)valb
< 0) {
3685 case TILEGX_EXCP_OPCODE_FETCHAND
:
3686 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3689 case TILEGX_EXCP_OPCODE_FETCHOR
:
3690 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3694 g_assert_not_reached();
3698 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3699 goto sigsegv_maperr
;
3703 set_regval(env
, env
->atomic_dstr
, val
);
3709 gen_sigsegv_maperr(env
, addr
);
3712 void cpu_loop(CPUTLGState
*env
)
3714 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3719 trapnr
= cpu_exec(cs
);
3722 case TILEGX_EXCP_SYSCALL
:
3724 abi_ulong ret
= do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3725 env
->regs
[0], env
->regs
[1],
3726 env
->regs
[2], env
->regs
[3],
3727 env
->regs
[4], env
->regs
[5],
3728 env
->regs
[6], env
->regs
[7]);
3729 if (ret
== -TARGET_ERESTARTSYS
) {
3731 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3732 env
->regs
[TILEGX_R_RE
] = ret
;
3733 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(ret
) ? -ret
: 0;
3737 case TILEGX_EXCP_OPCODE_EXCH
:
3738 do_exch(env
, true, false);
3740 case TILEGX_EXCP_OPCODE_EXCH4
:
3741 do_exch(env
, false, false);
3743 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3744 do_exch(env
, true, true);
3746 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3747 do_exch(env
, false, true);
3749 case TILEGX_EXCP_OPCODE_FETCHADD
:
3750 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3751 case TILEGX_EXCP_OPCODE_FETCHAND
:
3752 case TILEGX_EXCP_OPCODE_FETCHOR
:
3753 do_fetch(env
, trapnr
, true);
3755 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3756 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3757 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3758 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3759 do_fetch(env
, trapnr
, false);
3761 case TILEGX_EXCP_SIGNAL
:
3762 do_signal(env
, env
->signo
, env
->sigcode
);
3764 case TILEGX_EXCP_REG_IDN_ACCESS
:
3765 case TILEGX_EXCP_REG_UDN_ACCESS
:
3766 gen_sigill_reg(env
);
3769 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3770 g_assert_not_reached();
3772 process_pending_signals(env
);
3778 THREAD CPUState
*thread_cpu
;
3780 void task_settid(TaskState
*ts
)
3782 if (ts
->ts_tid
== 0) {
3783 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3787 void stop_all_tasks(void)
3790 * We trust that when using NPTL, start_exclusive()
3791 * handles thread stopping correctly.
3796 /* Assumes contents are already zeroed. */
3797 void init_task_state(TaskState
*ts
)
3802 CPUArchState
*cpu_copy(CPUArchState
*env
)
3804 CPUState
*cpu
= ENV_GET_CPU(env
);
3805 CPUState
*new_cpu
= cpu_init(cpu_model
);
3806 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3810 /* Reset non arch specific state */
3813 memcpy(new_env
, env
, sizeof(CPUArchState
));
3815 /* Clone all break/watchpoints.
3816 Note: Once we support ptrace with hw-debug register access, make sure
3817 BP_CPU break/watchpoints are handled correctly on clone. */
3818 QTAILQ_INIT(&new_cpu
->breakpoints
);
3819 QTAILQ_INIT(&new_cpu
->watchpoints
);
3820 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3821 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3823 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3824 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3830 static void handle_arg_help(const char *arg
)
3832 usage(EXIT_SUCCESS
);
3835 static void handle_arg_log(const char *arg
)
3839 mask
= qemu_str_to_log_mask(arg
);
3841 qemu_print_log_usage(stdout
);
3844 qemu_log_needs_buffers();
3848 static void handle_arg_log_filename(const char *arg
)
3850 qemu_set_log_filename(arg
, &error_fatal
);
3853 static void handle_arg_set_env(const char *arg
)
3855 char *r
, *p
, *token
;
3856 r
= p
= strdup(arg
);
3857 while ((token
= strsep(&p
, ",")) != NULL
) {
3858 if (envlist_setenv(envlist
, token
) != 0) {
3859 usage(EXIT_FAILURE
);
3865 static void handle_arg_unset_env(const char *arg
)
3867 char *r
, *p
, *token
;
3868 r
= p
= strdup(arg
);
3869 while ((token
= strsep(&p
, ",")) != NULL
) {
3870 if (envlist_unsetenv(envlist
, token
) != 0) {
3871 usage(EXIT_FAILURE
);
3877 static void handle_arg_argv0(const char *arg
)
3879 argv0
= strdup(arg
);
3882 static void handle_arg_stack_size(const char *arg
)
3885 guest_stack_size
= strtoul(arg
, &p
, 0);
3886 if (guest_stack_size
== 0) {
3887 usage(EXIT_FAILURE
);
3891 guest_stack_size
*= 1024 * 1024;
3892 } else if (*p
== 'k' || *p
== 'K') {
3893 guest_stack_size
*= 1024;
3897 static void handle_arg_ld_prefix(const char *arg
)
3899 interp_prefix
= strdup(arg
);
3902 static void handle_arg_pagesize(const char *arg
)
3904 qemu_host_page_size
= atoi(arg
);
3905 if (qemu_host_page_size
== 0 ||
3906 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3907 fprintf(stderr
, "page size must be a power of two\n");
3912 static void handle_arg_randseed(const char *arg
)
3914 unsigned long long seed
;
3916 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3917 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3923 static void handle_arg_gdb(const char *arg
)
3925 gdbstub_port
= atoi(arg
);
3928 static void handle_arg_uname(const char *arg
)
3930 qemu_uname_release
= strdup(arg
);
3933 static void handle_arg_cpu(const char *arg
)
3935 cpu_model
= strdup(arg
);
3936 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3937 /* XXX: implement xxx_cpu_list for targets that still miss it */
3938 #if defined(cpu_list)
3939 cpu_list(stdout
, &fprintf
);
3945 static void handle_arg_guest_base(const char *arg
)
3947 guest_base
= strtol(arg
, NULL
, 0);
3948 have_guest_base
= 1;
3951 static void handle_arg_reserved_va(const char *arg
)
3955 reserved_va
= strtoul(arg
, &p
, 0);
3969 unsigned long unshifted
= reserved_va
;
3971 reserved_va
<<= shift
;
3972 if (((reserved_va
>> shift
) != unshifted
)
3973 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3974 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3977 fprintf(stderr
, "Reserved virtual address too big\n");
3982 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3987 static void handle_arg_singlestep(const char *arg
)
3992 static void handle_arg_strace(const char *arg
)
3997 static void handle_arg_version(const char *arg
)
3999 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
4000 ", " QEMU_COPYRIGHT
"\n");
4004 static char *trace_file
;
4005 static void handle_arg_trace(const char *arg
)
4008 trace_file
= trace_opt_parse(arg
);
4011 struct qemu_argument
{
4015 void (*handle_opt
)(const char *arg
);
4016 const char *example
;
4020 static const struct qemu_argument arg_table
[] = {
4021 {"h", "", false, handle_arg_help
,
4022 "", "print this help"},
4023 {"help", "", false, handle_arg_help
,
4025 {"g", "QEMU_GDB", true, handle_arg_gdb
,
4026 "port", "wait gdb connection to 'port'"},
4027 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
4028 "path", "set the elf interpreter prefix to 'path'"},
4029 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
4030 "size", "set the stack size to 'size' bytes"},
4031 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
4032 "model", "select CPU (-cpu help for list)"},
4033 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
4034 "var=value", "sets targets environment variable (see below)"},
4035 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
4036 "var", "unsets targets environment variable (see below)"},
4037 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
4038 "argv0", "forces target process argv[0] to be 'argv0'"},
4039 {"r", "QEMU_UNAME", true, handle_arg_uname
,
4040 "uname", "set qemu uname release string to 'uname'"},
4041 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
4042 "address", "set guest_base address to 'address'"},
4043 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
4044 "size", "reserve 'size' bytes for guest virtual address space"},
4045 {"d", "QEMU_LOG", true, handle_arg_log
,
4046 "item[,...]", "enable logging of specified items "
4047 "(use '-d help' for a list of items)"},
4048 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
4049 "logfile", "write logs to 'logfile' (default stderr)"},
4050 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
4051 "pagesize", "set the host page size to 'pagesize'"},
4052 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
4053 "", "run in singlestep mode"},
4054 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
4055 "", "log system calls"},
4056 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
4057 "", "Seed for pseudo-random number generator"},
4058 {"trace", "QEMU_TRACE", true, handle_arg_trace
,
4059 "", "[[enable=]<pattern>][,events=<file>][,file=<file>]"},
4060 {"version", "QEMU_VERSION", false, handle_arg_version
,
4061 "", "display version information and exit"},
4062 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
4065 static void usage(int exitcode
)
4067 const struct qemu_argument
*arginfo
;
4071 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
4072 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
4074 "Options and associated environment variables:\n"
4077 /* Calculate column widths. We must always have at least enough space
4078 * for the column header.
4080 maxarglen
= strlen("Argument");
4081 maxenvlen
= strlen("Env-variable");
4083 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4084 int arglen
= strlen(arginfo
->argv
);
4085 if (arginfo
->has_arg
) {
4086 arglen
+= strlen(arginfo
->example
) + 1;
4088 if (strlen(arginfo
->env
) > maxenvlen
) {
4089 maxenvlen
= strlen(arginfo
->env
);
4091 if (arglen
> maxarglen
) {
4096 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4097 maxenvlen
, "Env-variable");
4099 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4100 if (arginfo
->has_arg
) {
4101 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4102 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4103 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4105 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4106 maxenvlen
, arginfo
->env
,
4113 "QEMU_LD_PREFIX = %s\n"
4114 "QEMU_STACK_SIZE = %ld byte\n",
4119 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4120 "QEMU_UNSET_ENV environment variables to set and unset\n"
4121 "environment variables for the target process.\n"
4122 "It is possible to provide several variables by separating them\n"
4123 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4124 "provide the -E and -U options multiple times.\n"
4125 "The following lines are equivalent:\n"
4126 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4127 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4128 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4129 "Note that if you provide several changes to a single variable\n"
4130 "the last change will stay in effect.\n");
4135 static int parse_args(int argc
, char **argv
)
4139 const struct qemu_argument
*arginfo
;
4141 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4142 if (arginfo
->env
== NULL
) {
4146 r
= getenv(arginfo
->env
);
4148 arginfo
->handle_opt(r
);
4154 if (optind
>= argc
) {
4163 if (!strcmp(r
, "-")) {
4166 /* Treat --foo the same as -foo. */
4171 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4172 if (!strcmp(r
, arginfo
->argv
)) {
4173 if (arginfo
->has_arg
) {
4174 if (optind
>= argc
) {
4175 (void) fprintf(stderr
,
4176 "qemu: missing argument for option '%s'\n", r
);
4179 arginfo
->handle_opt(argv
[optind
]);
4182 arginfo
->handle_opt(NULL
);
4188 /* no option matched the current argv */
4189 if (arginfo
->handle_opt
== NULL
) {
4190 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4195 if (optind
>= argc
) {
4196 (void) fprintf(stderr
, "qemu: no user program specified\n");
4200 filename
= argv
[optind
];
4201 exec_path
= argv
[optind
];
4206 int main(int argc
, char **argv
, char **envp
)
4208 struct target_pt_regs regs1
, *regs
= ®s1
;
4209 struct image_info info1
, *info
= &info1
;
4210 struct linux_binprm bprm
;
4215 char **target_environ
, **wrk
;
4222 qemu_init_cpu_loop();
4223 module_call_init(MODULE_INIT_QOM
);
4225 if ((envlist
= envlist_create()) == NULL
) {
4226 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4230 /* add current environment into the list */
4231 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4232 (void) envlist_setenv(envlist
, *wrk
);
4235 /* Read the stack limit from the kernel. If it's "unlimited",
4236 then we can do little else besides use the default. */
4239 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4240 && lim
.rlim_cur
!= RLIM_INFINITY
4241 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4242 guest_stack_size
= lim
.rlim_cur
;
4250 qemu_add_opts(&qemu_trace_opts
);
4252 optind
= parse_args(argc
, argv
);
4254 if (!trace_init_backends()) {
4257 trace_init_file(trace_file
);
4260 memset(regs
, 0, sizeof(struct target_pt_regs
));
4262 /* Zero out image_info */
4263 memset(info
, 0, sizeof(struct image_info
));
4265 memset(&bprm
, 0, sizeof (bprm
));
4267 /* Scan interp_prefix dir for replacement files. */
4268 init_paths(interp_prefix
);
4270 init_qemu_uname_release();
4272 if (cpu_model
== NULL
) {
4273 #if defined(TARGET_I386)
4274 #ifdef TARGET_X86_64
4275 cpu_model
= "qemu64";
4277 cpu_model
= "qemu32";
4279 #elif defined(TARGET_ARM)
4281 #elif defined(TARGET_UNICORE32)
4283 #elif defined(TARGET_M68K)
4285 #elif defined(TARGET_SPARC)
4286 #ifdef TARGET_SPARC64
4287 cpu_model
= "TI UltraSparc II";
4289 cpu_model
= "Fujitsu MB86904";
4291 #elif defined(TARGET_MIPS)
4292 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4297 #elif defined TARGET_OPENRISC
4298 cpu_model
= "or1200";
4299 #elif defined(TARGET_PPC)
4300 # ifdef TARGET_PPC64
4301 cpu_model
= "POWER8";
4305 #elif defined TARGET_SH4
4306 cpu_model
= TYPE_SH7785_CPU
;
4312 /* NOTE: we need to init the CPU at this stage to get
4313 qemu_host_page_size */
4314 cpu
= cpu_init(cpu_model
);
4316 fprintf(stderr
, "Unable to find CPU definition\n");
4324 if (getenv("QEMU_STRACE")) {
4328 if (getenv("QEMU_RAND_SEED")) {
4329 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4332 target_environ
= envlist_to_environ(envlist
, NULL
);
4333 envlist_free(envlist
);
4336 * Now that page sizes are configured in cpu_init() we can do
4337 * proper page alignment for guest_base.
4339 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4341 if (reserved_va
|| have_guest_base
) {
4342 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4344 if (guest_base
== (unsigned long)-1) {
4345 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4346 "space for use as guest address space (check your virtual "
4347 "memory ulimit setting or reserve less using -R option)\n",
4353 mmap_next_start
= reserved_va
;
4358 * Read in mmap_min_addr kernel parameter. This value is used
4359 * When loading the ELF image to determine whether guest_base
4360 * is needed. It is also used in mmap_find_vma.
4365 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4367 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4368 mmap_min_addr
= tmp
;
4369 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4376 * Prepare copy of argv vector for target.
4378 target_argc
= argc
- optind
;
4379 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4380 if (target_argv
== NULL
) {
4381 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4386 * If argv0 is specified (using '-0' switch) we replace
4387 * argv[0] pointer with the given one.
4390 if (argv0
!= NULL
) {
4391 target_argv
[i
++] = strdup(argv0
);
4393 for (; i
< target_argc
; i
++) {
4394 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4396 target_argv
[target_argc
] = NULL
;
4398 ts
= g_new0(TaskState
, 1);
4399 init_task_state(ts
);
4400 /* build Task State */
4406 execfd
= qemu_getauxval(AT_EXECFD
);
4408 execfd
= open(filename
, O_RDONLY
);
4410 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4411 _exit(EXIT_FAILURE
);
4415 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4418 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4419 _exit(EXIT_FAILURE
);
4422 for (wrk
= target_environ
; *wrk
; wrk
++) {
4426 free(target_environ
);
4428 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4429 qemu_log("guest_base 0x%lx\n", guest_base
);
4432 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4433 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4434 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4436 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4438 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4439 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4441 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4442 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4445 target_set_brk(info
->brk
);
4449 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4450 generating the prologue until now so that the prologue can take
4451 the real value of GUEST_BASE into account. */
4452 tcg_prologue_init(&tcg_ctx
);
4454 #if defined(TARGET_I386)
4455 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4456 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4457 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4458 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4459 env
->hflags
|= HF_OSFXSR_MASK
;
4461 #ifndef TARGET_ABI32
4462 /* enable 64 bit mode if possible */
4463 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4464 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4467 env
->cr
[4] |= CR4_PAE_MASK
;
4468 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4469 env
->hflags
|= HF_LMA_MASK
;
4472 /* flags setup : we activate the IRQs by default as in user mode */
4473 env
->eflags
|= IF_MASK
;
4475 /* linux register setup */
4476 #ifndef TARGET_ABI32
4477 env
->regs
[R_EAX
] = regs
->rax
;
4478 env
->regs
[R_EBX
] = regs
->rbx
;
4479 env
->regs
[R_ECX
] = regs
->rcx
;
4480 env
->regs
[R_EDX
] = regs
->rdx
;
4481 env
->regs
[R_ESI
] = regs
->rsi
;
4482 env
->regs
[R_EDI
] = regs
->rdi
;
4483 env
->regs
[R_EBP
] = regs
->rbp
;
4484 env
->regs
[R_ESP
] = regs
->rsp
;
4485 env
->eip
= regs
->rip
;
4487 env
->regs
[R_EAX
] = regs
->eax
;
4488 env
->regs
[R_EBX
] = regs
->ebx
;
4489 env
->regs
[R_ECX
] = regs
->ecx
;
4490 env
->regs
[R_EDX
] = regs
->edx
;
4491 env
->regs
[R_ESI
] = regs
->esi
;
4492 env
->regs
[R_EDI
] = regs
->edi
;
4493 env
->regs
[R_EBP
] = regs
->ebp
;
4494 env
->regs
[R_ESP
] = regs
->esp
;
4495 env
->eip
= regs
->eip
;
4498 /* linux interrupt setup */
4499 #ifndef TARGET_ABI32
4500 env
->idt
.limit
= 511;
4502 env
->idt
.limit
= 255;
4504 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4505 PROT_READ
|PROT_WRITE
,
4506 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4507 idt_table
= g2h(env
->idt
.base
);
4530 /* linux segment setup */
4532 uint64_t *gdt_table
;
4533 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4534 PROT_READ
|PROT_WRITE
,
4535 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4536 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4537 gdt_table
= g2h(env
->gdt
.base
);
4539 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4540 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4541 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4543 /* 64 bit code segment */
4544 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4545 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4547 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4549 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4550 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4551 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4553 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4554 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4556 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4557 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4558 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4559 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4560 /* This hack makes Wine work... */
4561 env
->segs
[R_FS
].selector
= 0;
4563 cpu_x86_load_seg(env
, R_DS
, 0);
4564 cpu_x86_load_seg(env
, R_ES
, 0);
4565 cpu_x86_load_seg(env
, R_FS
, 0);
4566 cpu_x86_load_seg(env
, R_GS
, 0);
4568 #elif defined(TARGET_AARCH64)
4572 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4574 "The selected ARM CPU does not support 64 bit mode\n");
4578 for (i
= 0; i
< 31; i
++) {
4579 env
->xregs
[i
] = regs
->regs
[i
];
4582 env
->xregs
[31] = regs
->sp
;
4584 #elif defined(TARGET_ARM)
4587 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4589 for(i
= 0; i
< 16; i
++) {
4590 env
->regs
[i
] = regs
->uregs
[i
];
4592 #ifdef TARGET_WORDS_BIGENDIAN
4594 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4595 && (info
->elf_flags
& EF_ARM_BE8
)) {
4596 env
->uncached_cpsr
|= CPSR_E
;
4597 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4599 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4603 #elif defined(TARGET_UNICORE32)
4606 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4607 for (i
= 0; i
< 32; i
++) {
4608 env
->regs
[i
] = regs
->uregs
[i
];
4611 #elif defined(TARGET_SPARC)
4615 env
->npc
= regs
->npc
;
4617 for(i
= 0; i
< 8; i
++)
4618 env
->gregs
[i
] = regs
->u_regs
[i
];
4619 for(i
= 0; i
< 8; i
++)
4620 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4622 #elif defined(TARGET_PPC)
4626 #if defined(TARGET_PPC64)
4627 int flag
= (env
->insns_flags2
& PPC2_BOOKE206
) ? MSR_CM
: MSR_SF
;
4628 #if defined(TARGET_ABI32)
4629 env
->msr
&= ~((target_ulong
)1 << flag
);
4631 env
->msr
|= (target_ulong
)1 << flag
;
4634 env
->nip
= regs
->nip
;
4635 for(i
= 0; i
< 32; i
++) {
4636 env
->gpr
[i
] = regs
->gpr
[i
];
4639 #elif defined(TARGET_M68K)
4642 env
->dregs
[0] = regs
->d0
;
4643 env
->dregs
[1] = regs
->d1
;
4644 env
->dregs
[2] = regs
->d2
;
4645 env
->dregs
[3] = regs
->d3
;
4646 env
->dregs
[4] = regs
->d4
;
4647 env
->dregs
[5] = regs
->d5
;
4648 env
->dregs
[6] = regs
->d6
;
4649 env
->dregs
[7] = regs
->d7
;
4650 env
->aregs
[0] = regs
->a0
;
4651 env
->aregs
[1] = regs
->a1
;
4652 env
->aregs
[2] = regs
->a2
;
4653 env
->aregs
[3] = regs
->a3
;
4654 env
->aregs
[4] = regs
->a4
;
4655 env
->aregs
[5] = regs
->a5
;
4656 env
->aregs
[6] = regs
->a6
;
4657 env
->aregs
[7] = regs
->usp
;
4659 ts
->sim_syscalls
= 1;
4661 #elif defined(TARGET_MICROBLAZE)
4663 env
->regs
[0] = regs
->r0
;
4664 env
->regs
[1] = regs
->r1
;
4665 env
->regs
[2] = regs
->r2
;
4666 env
->regs
[3] = regs
->r3
;
4667 env
->regs
[4] = regs
->r4
;
4668 env
->regs
[5] = regs
->r5
;
4669 env
->regs
[6] = regs
->r6
;
4670 env
->regs
[7] = regs
->r7
;
4671 env
->regs
[8] = regs
->r8
;
4672 env
->regs
[9] = regs
->r9
;
4673 env
->regs
[10] = regs
->r10
;
4674 env
->regs
[11] = regs
->r11
;
4675 env
->regs
[12] = regs
->r12
;
4676 env
->regs
[13] = regs
->r13
;
4677 env
->regs
[14] = regs
->r14
;
4678 env
->regs
[15] = regs
->r15
;
4679 env
->regs
[16] = regs
->r16
;
4680 env
->regs
[17] = regs
->r17
;
4681 env
->regs
[18] = regs
->r18
;
4682 env
->regs
[19] = regs
->r19
;
4683 env
->regs
[20] = regs
->r20
;
4684 env
->regs
[21] = regs
->r21
;
4685 env
->regs
[22] = regs
->r22
;
4686 env
->regs
[23] = regs
->r23
;
4687 env
->regs
[24] = regs
->r24
;
4688 env
->regs
[25] = regs
->r25
;
4689 env
->regs
[26] = regs
->r26
;
4690 env
->regs
[27] = regs
->r27
;
4691 env
->regs
[28] = regs
->r28
;
4692 env
->regs
[29] = regs
->r29
;
4693 env
->regs
[30] = regs
->r30
;
4694 env
->regs
[31] = regs
->r31
;
4695 env
->sregs
[SR_PC
] = regs
->pc
;
4697 #elif defined(TARGET_MIPS)
4701 for(i
= 0; i
< 32; i
++) {
4702 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4704 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4705 if (regs
->cp0_epc
& 1) {
4706 env
->hflags
|= MIPS_HFLAG_M16
;
4708 if (((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) !=
4709 ((env
->active_fpu
.fcr31
& (1 << FCR31_NAN2008
)) != 0)) {
4710 if ((env
->active_fpu
.fcr31_rw_bitmask
&
4711 (1 << FCR31_NAN2008
)) == 0) {
4712 fprintf(stderr
, "ELF binary's NaN mode not supported by CPU\n");
4715 if ((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) {
4716 env
->active_fpu
.fcr31
|= (1 << FCR31_NAN2008
);
4718 env
->active_fpu
.fcr31
&= ~(1 << FCR31_NAN2008
);
4720 restore_snan_bit_mode(env
);
4723 #elif defined(TARGET_OPENRISC)
4727 for (i
= 0; i
< 32; i
++) {
4728 env
->gpr
[i
] = regs
->gpr
[i
];
4734 #elif defined(TARGET_SH4)
4738 for(i
= 0; i
< 16; i
++) {
4739 env
->gregs
[i
] = regs
->regs
[i
];
4743 #elif defined(TARGET_ALPHA)
4747 for(i
= 0; i
< 28; i
++) {
4748 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4750 env
->ir
[IR_SP
] = regs
->usp
;
4753 #elif defined(TARGET_CRIS)
4755 env
->regs
[0] = regs
->r0
;
4756 env
->regs
[1] = regs
->r1
;
4757 env
->regs
[2] = regs
->r2
;
4758 env
->regs
[3] = regs
->r3
;
4759 env
->regs
[4] = regs
->r4
;
4760 env
->regs
[5] = regs
->r5
;
4761 env
->regs
[6] = regs
->r6
;
4762 env
->regs
[7] = regs
->r7
;
4763 env
->regs
[8] = regs
->r8
;
4764 env
->regs
[9] = regs
->r9
;
4765 env
->regs
[10] = regs
->r10
;
4766 env
->regs
[11] = regs
->r11
;
4767 env
->regs
[12] = regs
->r12
;
4768 env
->regs
[13] = regs
->r13
;
4769 env
->regs
[14] = info
->start_stack
;
4770 env
->regs
[15] = regs
->acr
;
4771 env
->pc
= regs
->erp
;
4773 #elif defined(TARGET_S390X)
4776 for (i
= 0; i
< 16; i
++) {
4777 env
->regs
[i
] = regs
->gprs
[i
];
4779 env
->psw
.mask
= regs
->psw
.mask
;
4780 env
->psw
.addr
= regs
->psw
.addr
;
4782 #elif defined(TARGET_TILEGX)
4785 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4786 env
->regs
[i
] = regs
->regs
[i
];
4788 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4794 #error unsupported target CPU
4797 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4798 ts
->stack_base
= info
->start_stack
;
4799 ts
->heap_base
= info
->brk
;
4800 /* This will be filled in on the first SYS_HEAPINFO call. */
4805 if (gdbserver_start(gdbstub_port
) < 0) {
4806 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4810 gdb_handlesig(cpu
, 0);
4812 trace_init_vcpu_events();