4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
24 #include "qapi/error.h"
26 #include "qemu/path.h"
27 #include "qemu/config-file.h"
28 #include "qemu/cutils.h"
29 #include "qemu/help_option.h"
31 #include "exec/exec-all.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
37 #include "trace/control.h"
38 #include "glib-compat.h"
43 static const char *filename
;
44 static const char *argv0
;
45 static int gdbstub_port
;
46 static envlist_t
*envlist
;
47 static const char *cpu_model
;
48 unsigned long mmap_min_addr
;
49 unsigned long guest_base
;
52 #define EXCP_DUMP(env, fmt, ...) \
54 CPUState *cs = ENV_GET_CPU(env); \
55 fprintf(stderr, fmt , ## __VA_ARGS__); \
56 cpu_dump_state(cs, stderr, fprintf, 0); \
57 if (qemu_log_separate()) { \
58 qemu_log(fmt, ## __VA_ARGS__); \
59 log_cpu_state(cs, 0); \
64 * When running 32-on-64 we should make sure we can fit all of the possible
65 * guest address space into a contiguous chunk of virtual host memory.
67 * This way we will never overlap with our own libraries or binaries or stack
68 * or anything else that QEMU maps.
70 * Many cpus reserve the high bit (or more than one for some 64-bit cpus)
71 * of the address for the kernel. Some cpus rely on this and user space
72 * uses the high bit(s) for pointer tagging and the like. For them, we
73 * must preserve the expected address space.
75 #ifndef MAX_RESERVED_VA
76 # if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
77 # if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
78 (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
79 /* There are a number of places where we assign reserved_va to a variable
80 of type abi_ulong and expect it to fit. Avoid the last page. */
81 # define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
83 # define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
86 # define MAX_RESERVED_VA 0
90 /* That said, reserving *too* much vm space via mmap can run into problems
91 with rlimits, oom due to page table creation, etc. We will still try it,
92 if directed by the command-line option, but not by default. */
93 #if HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32
94 unsigned long reserved_va
= MAX_RESERVED_VA
;
96 unsigned long reserved_va
;
99 static void usage(int exitcode
);
101 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
102 const char *qemu_uname_release
;
104 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
105 we allocate a bigger stack. Need a better solution, for example
106 by remapping the process stack directly at the right place */
107 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
109 void gemu_log(const char *fmt
, ...)
114 vfprintf(stderr
, fmt
, ap
);
118 #if defined(TARGET_I386)
119 int cpu_get_pic_interrupt(CPUX86State
*env
)
125 /***********************************************************/
126 /* Helper routines for implementing atomic operations. */
128 /* Make sure everything is in a consistent state for calling fork(). */
129 void fork_start(void)
132 qemu_mutex_lock(&tb_ctx
.tb_lock
);
136 void fork_end(int child
)
138 mmap_fork_end(child
);
140 CPUState
*cpu
, *next_cpu
;
141 /* Child processes created by fork() only have a single thread.
142 Discard information about the parent threads. */
143 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
144 if (cpu
!= thread_cpu
) {
145 QTAILQ_REMOVE(&cpus
, cpu
, node
);
148 qemu_mutex_init(&tb_ctx
.tb_lock
);
149 qemu_init_cpu_list();
150 gdbserver_fork(thread_cpu
);
152 qemu_mutex_unlock(&tb_ctx
.tb_lock
);
158 /***********************************************************/
159 /* CPUX86 core interface */
161 uint64_t cpu_get_tsc(CPUX86State
*env
)
163 return cpu_get_host_ticks();
166 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
171 e1
= (addr
<< 16) | (limit
& 0xffff);
172 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
179 static uint64_t *idt_table
;
181 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
182 uint64_t addr
, unsigned int sel
)
185 e1
= (addr
& 0xffff) | (sel
<< 16);
186 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
190 p
[2] = tswap32(addr
>> 32);
193 /* only dpl matters as we do only user space emulation */
194 static void set_idt(int n
, unsigned int dpl
)
196 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
199 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
200 uint32_t addr
, unsigned int sel
)
203 e1
= (addr
& 0xffff) | (sel
<< 16);
204 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
210 /* only dpl matters as we do only user space emulation */
211 static void set_idt(int n
, unsigned int dpl
)
213 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
217 void cpu_loop(CPUX86State
*env
)
219 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
223 target_siginfo_t info
;
227 trapnr
= cpu_exec(cs
);
229 process_queued_cpu_work(cs
);
233 /* linux syscall from int $0x80 */
234 ret
= do_syscall(env
,
243 if (ret
== -TARGET_ERESTARTSYS
) {
245 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
246 env
->regs
[R_EAX
] = ret
;
251 /* linux syscall from syscall instruction */
252 ret
= do_syscall(env
,
261 if (ret
== -TARGET_ERESTARTSYS
) {
263 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
264 env
->regs
[R_EAX
] = ret
;
270 info
.si_signo
= TARGET_SIGBUS
;
272 info
.si_code
= TARGET_SI_KERNEL
;
273 info
._sifields
._sigfault
._addr
= 0;
274 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
277 /* XXX: potential problem if ABI32 */
278 #ifndef TARGET_X86_64
279 if (env
->eflags
& VM_MASK
) {
280 handle_vm86_fault(env
);
284 info
.si_signo
= TARGET_SIGSEGV
;
286 info
.si_code
= TARGET_SI_KERNEL
;
287 info
._sifields
._sigfault
._addr
= 0;
288 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
292 info
.si_signo
= TARGET_SIGSEGV
;
294 if (!(env
->error_code
& 1))
295 info
.si_code
= TARGET_SEGV_MAPERR
;
297 info
.si_code
= TARGET_SEGV_ACCERR
;
298 info
._sifields
._sigfault
._addr
= env
->cr
[2];
299 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
302 #ifndef TARGET_X86_64
303 if (env
->eflags
& VM_MASK
) {
304 handle_vm86_trap(env
, trapnr
);
308 /* division by zero */
309 info
.si_signo
= TARGET_SIGFPE
;
311 info
.si_code
= TARGET_FPE_INTDIV
;
312 info
._sifields
._sigfault
._addr
= env
->eip
;
313 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
318 #ifndef TARGET_X86_64
319 if (env
->eflags
& VM_MASK
) {
320 handle_vm86_trap(env
, trapnr
);
324 info
.si_signo
= TARGET_SIGTRAP
;
326 if (trapnr
== EXCP01_DB
) {
327 info
.si_code
= TARGET_TRAP_BRKPT
;
328 info
._sifields
._sigfault
._addr
= env
->eip
;
330 info
.si_code
= TARGET_SI_KERNEL
;
331 info
._sifields
._sigfault
._addr
= 0;
333 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
338 #ifndef TARGET_X86_64
339 if (env
->eflags
& VM_MASK
) {
340 handle_vm86_trap(env
, trapnr
);
344 info
.si_signo
= TARGET_SIGSEGV
;
346 info
.si_code
= TARGET_SI_KERNEL
;
347 info
._sifields
._sigfault
._addr
= 0;
348 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
352 info
.si_signo
= TARGET_SIGILL
;
354 info
.si_code
= TARGET_ILL_ILLOPN
;
355 info
._sifields
._sigfault
._addr
= env
->eip
;
356 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
359 /* just indicate that signals should be handled asap */
365 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
370 info
.si_code
= TARGET_TRAP_BRKPT
;
371 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
376 cpu_exec_step_atomic(cs
);
379 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
380 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
384 process_pending_signals(env
);
391 #define get_user_code_u32(x, gaddr, env) \
392 ({ abi_long __r = get_user_u32((x), (gaddr)); \
393 if (!__r && bswap_code(arm_sctlr_b(env))) { \
399 #define get_user_code_u16(x, gaddr, env) \
400 ({ abi_long __r = get_user_u16((x), (gaddr)); \
401 if (!__r && bswap_code(arm_sctlr_b(env))) { \
407 #define get_user_data_u32(x, gaddr, env) \
408 ({ abi_long __r = get_user_u32((x), (gaddr)); \
409 if (!__r && arm_cpu_bswap_data(env)) { \
415 #define get_user_data_u16(x, gaddr, env) \
416 ({ abi_long __r = get_user_u16((x), (gaddr)); \
417 if (!__r && arm_cpu_bswap_data(env)) { \
423 #define put_user_data_u32(x, gaddr, env) \
424 ({ typeof(x) __x = (x); \
425 if (arm_cpu_bswap_data(env)) { \
426 __x = bswap32(__x); \
428 put_user_u32(__x, (gaddr)); \
431 #define put_user_data_u16(x, gaddr, env) \
432 ({ typeof(x) __x = (x); \
433 if (arm_cpu_bswap_data(env)) { \
434 __x = bswap16(__x); \
436 put_user_u16(__x, (gaddr)); \
440 /* Commpage handling -- there is no commpage for AArch64 */
443 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
445 * r0 = pointer to oldval
446 * r1 = pointer to newval
447 * r2 = pointer to target value
450 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
451 * C set if *ptr was changed, clear if no exchange happened
453 * Note segv's in kernel helpers are a bit tricky, we can set the
454 * data address sensibly but the PC address is just the entry point.
456 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
458 uint64_t oldval
, newval
, val
;
460 target_siginfo_t info
;
462 /* Based on the 32 bit code in do_kernel_trap */
464 /* XXX: This only works between threads, not between processes.
465 It's probably possible to implement this with native host
466 operations. However things like ldrex/strex are much harder so
467 there's not much point trying. */
469 cpsr
= cpsr_read(env
);
472 if (get_user_u64(oldval
, env
->regs
[0])) {
473 env
->exception
.vaddress
= env
->regs
[0];
477 if (get_user_u64(newval
, env
->regs
[1])) {
478 env
->exception
.vaddress
= env
->regs
[1];
482 if (get_user_u64(val
, addr
)) {
483 env
->exception
.vaddress
= addr
;
490 if (put_user_u64(val
, addr
)) {
491 env
->exception
.vaddress
= addr
;
501 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
507 /* We get the PC of the entry address - which is as good as anything,
508 on a real kernel what you get depends on which mode it uses. */
509 info
.si_signo
= TARGET_SIGSEGV
;
511 /* XXX: check env->error_code */
512 info
.si_code
= TARGET_SEGV_MAPERR
;
513 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
514 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
517 /* Handle a jump to the kernel code page. */
519 do_kernel_trap(CPUARMState
*env
)
525 switch (env
->regs
[15]) {
526 case 0xffff0fa0: /* __kernel_memory_barrier */
527 /* ??? No-op. Will need to do better for SMP. */
529 case 0xffff0fc0: /* __kernel_cmpxchg */
530 /* XXX: This only works between threads, not between processes.
531 It's probably possible to implement this with native host
532 operations. However things like ldrex/strex are much harder so
533 there's not much point trying. */
535 cpsr
= cpsr_read(env
);
537 /* FIXME: This should SEGV if the access fails. */
538 if (get_user_u32(val
, addr
))
540 if (val
== env
->regs
[0]) {
542 /* FIXME: Check for segfaults. */
543 put_user_u32(val
, addr
);
550 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
553 case 0xffff0fe0: /* __kernel_get_tls */
554 env
->regs
[0] = cpu_get_tls(env
);
556 case 0xffff0f60: /* __kernel_cmpxchg64 */
557 arm_kernel_cmpxchg64_helper(env
);
563 /* Jump back to the caller. */
564 addr
= env
->regs
[14];
569 env
->regs
[15] = addr
;
574 void cpu_loop(CPUARMState
*env
)
576 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
578 unsigned int n
, insn
;
579 target_siginfo_t info
;
585 trapnr
= cpu_exec(cs
);
587 process_queued_cpu_work(cs
);
594 TaskState
*ts
= cs
->opaque
;
598 /* we handle the FPU emulation here, as Linux */
599 /* we get the opcode */
600 /* FIXME - what to do if get_user() fails? */
601 get_user_code_u32(opcode
, env
->regs
[15], env
);
603 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
604 if (rc
== 0) { /* illegal instruction */
605 info
.si_signo
= TARGET_SIGILL
;
607 info
.si_code
= TARGET_ILL_ILLOPN
;
608 info
._sifields
._sigfault
._addr
= env
->regs
[15];
609 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
610 } else if (rc
< 0) { /* FP exception */
613 /* translate softfloat flags to FPSR flags */
614 if (-rc
& float_flag_invalid
)
616 if (-rc
& float_flag_divbyzero
)
618 if (-rc
& float_flag_overflow
)
620 if (-rc
& float_flag_underflow
)
622 if (-rc
& float_flag_inexact
)
625 FPSR fpsr
= ts
->fpa
.fpsr
;
626 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
628 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
629 info
.si_signo
= TARGET_SIGFPE
;
632 /* ordered by priority, least first */
633 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
634 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
635 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
636 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
637 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
639 info
._sifields
._sigfault
._addr
= env
->regs
[15];
640 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
645 /* accumulate unenabled exceptions */
646 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
648 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
650 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
652 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
654 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
657 } else { /* everything OK */
668 if (trapnr
== EXCP_BKPT
) {
670 /* FIXME - what to do if get_user() fails? */
671 get_user_code_u16(insn
, env
->regs
[15], env
);
675 /* FIXME - what to do if get_user() fails? */
676 get_user_code_u32(insn
, env
->regs
[15], env
);
677 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
682 /* FIXME - what to do if get_user() fails? */
683 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
686 /* FIXME - what to do if get_user() fails? */
687 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
692 if (n
== ARM_NR_cacheflush
) {
694 } else if (n
== ARM_NR_semihosting
695 || n
== ARM_NR_thumb_semihosting
) {
696 env
->regs
[0] = do_arm_semihosting (env
);
697 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
699 if (env
->thumb
|| n
== 0) {
702 n
-= ARM_SYSCALL_BASE
;
705 if ( n
> ARM_NR_BASE
) {
707 case ARM_NR_cacheflush
:
711 cpu_set_tls(env
, env
->regs
[0]);
714 case ARM_NR_breakpoint
:
715 env
->regs
[15] -= env
->thumb
? 2 : 4;
718 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
720 env
->regs
[0] = -TARGET_ENOSYS
;
724 ret
= do_syscall(env
,
733 if (ret
== -TARGET_ERESTARTSYS
) {
734 env
->regs
[15] -= env
->thumb
? 2 : 4;
735 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
745 env
->regs
[0] = do_arm_semihosting(env
);
748 /* just indicate that signals should be handled asap */
750 case EXCP_PREFETCH_ABORT
:
751 case EXCP_DATA_ABORT
:
752 addr
= env
->exception
.vaddress
;
754 info
.si_signo
= TARGET_SIGSEGV
;
756 /* XXX: check env->error_code */
757 info
.si_code
= TARGET_SEGV_MAPERR
;
758 info
._sifields
._sigfault
._addr
= addr
;
759 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
767 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
772 info
.si_code
= TARGET_TRAP_BRKPT
;
773 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
777 case EXCP_KERNEL_TRAP
:
778 if (do_kernel_trap(env
))
782 /* nothing to do here for user-mode, just resume guest code */
785 cpu_exec_step_atomic(cs
);
789 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
792 process_pending_signals(env
);
798 /* AArch64 main loop */
799 void cpu_loop(CPUARMState
*env
)
801 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
804 target_siginfo_t info
;
808 trapnr
= cpu_exec(cs
);
810 process_queued_cpu_work(cs
);
814 ret
= do_syscall(env
,
823 if (ret
== -TARGET_ERESTARTSYS
) {
825 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
830 /* just indicate that signals should be handled asap */
833 info
.si_signo
= TARGET_SIGILL
;
835 info
.si_code
= TARGET_ILL_ILLOPN
;
836 info
._sifields
._sigfault
._addr
= env
->pc
;
837 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
839 case EXCP_PREFETCH_ABORT
:
840 case EXCP_DATA_ABORT
:
841 info
.si_signo
= TARGET_SIGSEGV
;
843 /* XXX: check env->error_code */
844 info
.si_code
= TARGET_SEGV_MAPERR
;
845 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
846 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
850 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
854 info
.si_code
= TARGET_TRAP_BRKPT
;
855 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
859 env
->xregs
[0] = do_arm_semihosting(env
);
862 /* nothing to do here for user-mode, just resume guest code */
865 cpu_exec_step_atomic(cs
);
868 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
871 process_pending_signals(env
);
872 /* Exception return on AArch64 always clears the exclusive monitor,
873 * so any return to running guest code implies this.
875 env
->exclusive_addr
= -1;
878 #endif /* ndef TARGET_ABI32 */
882 #ifdef TARGET_UNICORE32
884 void cpu_loop(CPUUniCore32State
*env
)
886 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
888 unsigned int n
, insn
;
889 target_siginfo_t info
;
893 trapnr
= cpu_exec(cs
);
895 process_queued_cpu_work(cs
);
901 get_user_u32(insn
, env
->regs
[31] - 4);
904 if (n
>= UC32_SYSCALL_BASE
) {
906 n
-= UC32_SYSCALL_BASE
;
907 if (n
== UC32_SYSCALL_NR_set_tls
) {
908 cpu_set_tls(env
, env
->regs
[0]);
911 abi_long ret
= do_syscall(env
,
920 if (ret
== -TARGET_ERESTARTSYS
) {
922 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
931 case UC32_EXCP_DTRAP
:
932 case UC32_EXCP_ITRAP
:
933 info
.si_signo
= TARGET_SIGSEGV
;
935 /* XXX: check env->error_code */
936 info
.si_code
= TARGET_SEGV_MAPERR
;
937 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
938 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
941 /* just indicate that signals should be handled asap */
947 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
951 info
.si_code
= TARGET_TRAP_BRKPT
;
952 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
957 cpu_exec_step_atomic(cs
);
962 process_pending_signals(env
);
966 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
972 #define SPARC64_STACK_BIAS 2047
976 /* WARNING: dealing with register windows _is_ complicated. More info
977 can be found at http://www.sics.se/~psm/sparcstack.html */
978 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
980 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
981 /* wrap handling : if cwp is on the last window, then we use the
982 registers 'after' the end */
983 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
984 index
+= 16 * env
->nwindows
;
988 /* save the register window 'cwp1' */
989 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
994 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
995 #ifdef TARGET_SPARC64
997 sp_ptr
+= SPARC64_STACK_BIAS
;
999 #if defined(DEBUG_WIN)
1000 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1003 for(i
= 0; i
< 16; i
++) {
1004 /* FIXME - what to do if put_user() fails? */
1005 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1006 sp_ptr
+= sizeof(abi_ulong
);
1010 static void save_window(CPUSPARCState
*env
)
1012 #ifndef TARGET_SPARC64
1013 unsigned int new_wim
;
1014 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1015 ((1LL << env
->nwindows
) - 1);
1016 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1019 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1025 static void restore_window(CPUSPARCState
*env
)
1027 #ifndef TARGET_SPARC64
1028 unsigned int new_wim
;
1030 unsigned int i
, cwp1
;
1033 #ifndef TARGET_SPARC64
1034 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1035 ((1LL << env
->nwindows
) - 1);
1038 /* restore the invalid window */
1039 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1040 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1041 #ifdef TARGET_SPARC64
1043 sp_ptr
+= SPARC64_STACK_BIAS
;
1045 #if defined(DEBUG_WIN)
1046 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1049 for(i
= 0; i
< 16; i
++) {
1050 /* FIXME - what to do if get_user() fails? */
1051 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1052 sp_ptr
+= sizeof(abi_ulong
);
1054 #ifdef TARGET_SPARC64
1056 if (env
->cleanwin
< env
->nwindows
- 1)
1064 static void flush_windows(CPUSPARCState
*env
)
1070 /* if restore would invoke restore_window(), then we can stop */
1071 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1072 #ifndef TARGET_SPARC64
1073 if (env
->wim
& (1 << cwp1
))
1076 if (env
->canrestore
== 0)
1081 save_window_offset(env
, cwp1
);
1084 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1085 #ifndef TARGET_SPARC64
1086 /* set wim so that restore will reload the registers */
1087 env
->wim
= 1 << cwp1
;
1089 #if defined(DEBUG_WIN)
1090 printf("flush_windows: nb=%d\n", offset
- 1);
1094 void cpu_loop (CPUSPARCState
*env
)
1096 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1099 target_siginfo_t info
;
1103 trapnr
= cpu_exec(cs
);
1105 process_queued_cpu_work(cs
);
1107 /* Compute PSR before exposing state. */
1108 if (env
->cc_op
!= CC_OP_FLAGS
) {
1113 #ifndef TARGET_SPARC64
1120 ret
= do_syscall (env
, env
->gregs
[1],
1121 env
->regwptr
[0], env
->regwptr
[1],
1122 env
->regwptr
[2], env
->regwptr
[3],
1123 env
->regwptr
[4], env
->regwptr
[5],
1125 if (ret
== -TARGET_ERESTARTSYS
|| ret
== -TARGET_QEMU_ESIGRETURN
) {
1128 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1129 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1130 env
->xcc
|= PSR_CARRY
;
1132 env
->psr
|= PSR_CARRY
;
1136 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1137 env
->xcc
&= ~PSR_CARRY
;
1139 env
->psr
&= ~PSR_CARRY
;
1142 env
->regwptr
[0] = ret
;
1143 /* next instruction */
1145 env
->npc
= env
->npc
+ 4;
1147 case 0x83: /* flush windows */
1152 /* next instruction */
1154 env
->npc
= env
->npc
+ 4;
1156 #ifndef TARGET_SPARC64
1157 case TT_WIN_OVF
: /* window overflow */
1160 case TT_WIN_UNF
: /* window underflow */
1161 restore_window(env
);
1166 info
.si_signo
= TARGET_SIGSEGV
;
1168 /* XXX: check env->error_code */
1169 info
.si_code
= TARGET_SEGV_MAPERR
;
1170 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1171 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1175 case TT_SPILL
: /* window overflow */
1178 case TT_FILL
: /* window underflow */
1179 restore_window(env
);
1184 info
.si_signo
= TARGET_SIGSEGV
;
1186 /* XXX: check env->error_code */
1187 info
.si_code
= TARGET_SEGV_MAPERR
;
1188 if (trapnr
== TT_DFAULT
)
1189 info
._sifields
._sigfault
._addr
= env
->dmmu
.mmuregs
[4];
1191 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1192 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1195 #ifndef TARGET_ABI32
1198 sparc64_get_context(env
);
1202 sparc64_set_context(env
);
1206 case EXCP_INTERRUPT
:
1207 /* just indicate that signals should be handled asap */
1211 info
.si_signo
= TARGET_SIGILL
;
1213 info
.si_code
= TARGET_ILL_ILLOPC
;
1214 info
._sifields
._sigfault
._addr
= env
->pc
;
1215 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1222 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1225 info
.si_signo
= sig
;
1227 info
.si_code
= TARGET_TRAP_BRKPT
;
1228 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1233 cpu_exec_step_atomic(cs
);
1236 printf ("Unhandled trap: 0x%x\n", trapnr
);
1237 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1240 process_pending_signals (env
);
1247 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1249 return cpu_get_host_ticks();
1252 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1254 return cpu_ppc_get_tb(env
);
1257 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1259 return cpu_ppc_get_tb(env
) >> 32;
1262 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1264 return cpu_ppc_get_tb(env
);
1267 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1269 return cpu_ppc_get_tb(env
) >> 32;
1272 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1273 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1275 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1277 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1280 /* XXX: to be fixed */
1281 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1286 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1291 static int do_store_exclusive(CPUPPCState
*env
)
1294 target_ulong page_addr
;
1295 target_ulong val
, val2
__attribute__((unused
)) = 0;
1299 addr
= env
->reserve_ea
;
1300 page_addr
= addr
& TARGET_PAGE_MASK
;
1303 flags
= page_get_flags(page_addr
);
1304 if ((flags
& PAGE_READ
) == 0) {
1307 int reg
= env
->reserve_info
& 0x1f;
1308 int size
= env
->reserve_info
>> 5;
1311 if (addr
== env
->reserve_addr
) {
1313 case 1: segv
= get_user_u8(val
, addr
); break;
1314 case 2: segv
= get_user_u16(val
, addr
); break;
1315 case 4: segv
= get_user_u32(val
, addr
); break;
1316 #if defined(TARGET_PPC64)
1317 case 8: segv
= get_user_u64(val
, addr
); break;
1319 segv
= get_user_u64(val
, addr
);
1321 segv
= get_user_u64(val2
, addr
+ 8);
1328 if (!segv
&& val
== env
->reserve_val
) {
1329 val
= env
->gpr
[reg
];
1331 case 1: segv
= put_user_u8(val
, addr
); break;
1332 case 2: segv
= put_user_u16(val
, addr
); break;
1333 case 4: segv
= put_user_u32(val
, addr
); break;
1334 #if defined(TARGET_PPC64)
1335 case 8: segv
= put_user_u64(val
, addr
); break;
1337 if (val2
== env
->reserve_val2
) {
1340 val
= env
->gpr
[reg
+1];
1342 val2
= env
->gpr
[reg
+1];
1344 segv
= put_user_u64(val
, addr
);
1346 segv
= put_user_u64(val2
, addr
+ 8);
1359 env
->crf
[0] = (stored
<< 1) | xer_so
;
1360 env
->reserve_addr
= (target_ulong
)-1;
1370 void cpu_loop(CPUPPCState
*env
)
1372 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1373 target_siginfo_t info
;
1379 trapnr
= cpu_exec(cs
);
1381 process_queued_cpu_work(cs
);
1384 case POWERPC_EXCP_NONE
:
1387 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1388 cpu_abort(cs
, "Critical interrupt while in user mode. "
1391 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1392 cpu_abort(cs
, "Machine check exception while in user mode. "
1395 case POWERPC_EXCP_DSI
: /* Data storage exception */
1396 /* XXX: check this. Seems bugged */
1397 switch (env
->error_code
& 0xFF000000) {
1400 info
.si_signo
= TARGET_SIGSEGV
;
1402 info
.si_code
= TARGET_SEGV_MAPERR
;
1405 info
.si_signo
= TARGET_SIGILL
;
1407 info
.si_code
= TARGET_ILL_ILLADR
;
1410 info
.si_signo
= TARGET_SIGSEGV
;
1412 info
.si_code
= TARGET_SEGV_ACCERR
;
1415 /* Let's send a regular segfault... */
1416 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1418 info
.si_signo
= TARGET_SIGSEGV
;
1420 info
.si_code
= TARGET_SEGV_MAPERR
;
1423 info
._sifields
._sigfault
._addr
= env
->nip
;
1424 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1426 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1427 /* XXX: check this */
1428 switch (env
->error_code
& 0xFF000000) {
1430 info
.si_signo
= TARGET_SIGSEGV
;
1432 info
.si_code
= TARGET_SEGV_MAPERR
;
1436 info
.si_signo
= TARGET_SIGSEGV
;
1438 info
.si_code
= TARGET_SEGV_ACCERR
;
1441 /* Let's send a regular segfault... */
1442 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1444 info
.si_signo
= TARGET_SIGSEGV
;
1446 info
.si_code
= TARGET_SEGV_MAPERR
;
1449 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1450 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1452 case POWERPC_EXCP_EXTERNAL
: /* External input */
1453 cpu_abort(cs
, "External interrupt while in user mode. "
1456 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1457 /* XXX: check this */
1458 info
.si_signo
= TARGET_SIGBUS
;
1460 info
.si_code
= TARGET_BUS_ADRALN
;
1461 info
._sifields
._sigfault
._addr
= env
->nip
;
1462 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1464 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1465 case POWERPC_EXCP_HV_EMU
: /* HV emulation */
1466 /* XXX: check this */
1467 switch (env
->error_code
& ~0xF) {
1468 case POWERPC_EXCP_FP
:
1469 info
.si_signo
= TARGET_SIGFPE
;
1471 switch (env
->error_code
& 0xF) {
1472 case POWERPC_EXCP_FP_OX
:
1473 info
.si_code
= TARGET_FPE_FLTOVF
;
1475 case POWERPC_EXCP_FP_UX
:
1476 info
.si_code
= TARGET_FPE_FLTUND
;
1478 case POWERPC_EXCP_FP_ZX
:
1479 case POWERPC_EXCP_FP_VXZDZ
:
1480 info
.si_code
= TARGET_FPE_FLTDIV
;
1482 case POWERPC_EXCP_FP_XX
:
1483 info
.si_code
= TARGET_FPE_FLTRES
;
1485 case POWERPC_EXCP_FP_VXSOFT
:
1486 info
.si_code
= TARGET_FPE_FLTINV
;
1488 case POWERPC_EXCP_FP_VXSNAN
:
1489 case POWERPC_EXCP_FP_VXISI
:
1490 case POWERPC_EXCP_FP_VXIDI
:
1491 case POWERPC_EXCP_FP_VXIMZ
:
1492 case POWERPC_EXCP_FP_VXVC
:
1493 case POWERPC_EXCP_FP_VXSQRT
:
1494 case POWERPC_EXCP_FP_VXCVI
:
1495 info
.si_code
= TARGET_FPE_FLTSUB
;
1498 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1503 case POWERPC_EXCP_INVAL
:
1504 info
.si_signo
= TARGET_SIGILL
;
1506 switch (env
->error_code
& 0xF) {
1507 case POWERPC_EXCP_INVAL_INVAL
:
1508 info
.si_code
= TARGET_ILL_ILLOPC
;
1510 case POWERPC_EXCP_INVAL_LSWX
:
1511 info
.si_code
= TARGET_ILL_ILLOPN
;
1513 case POWERPC_EXCP_INVAL_SPR
:
1514 info
.si_code
= TARGET_ILL_PRVREG
;
1516 case POWERPC_EXCP_INVAL_FP
:
1517 info
.si_code
= TARGET_ILL_COPROC
;
1520 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1521 env
->error_code
& 0xF);
1522 info
.si_code
= TARGET_ILL_ILLADR
;
1526 case POWERPC_EXCP_PRIV
:
1527 info
.si_signo
= TARGET_SIGILL
;
1529 switch (env
->error_code
& 0xF) {
1530 case POWERPC_EXCP_PRIV_OPC
:
1531 info
.si_code
= TARGET_ILL_PRVOPC
;
1533 case POWERPC_EXCP_PRIV_REG
:
1534 info
.si_code
= TARGET_ILL_PRVREG
;
1537 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1538 env
->error_code
& 0xF);
1539 info
.si_code
= TARGET_ILL_PRVOPC
;
1543 case POWERPC_EXCP_TRAP
:
1544 cpu_abort(cs
, "Tried to call a TRAP\n");
1547 /* Should not happen ! */
1548 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1552 info
._sifields
._sigfault
._addr
= env
->nip
;
1553 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1555 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1556 info
.si_signo
= TARGET_SIGILL
;
1558 info
.si_code
= TARGET_ILL_COPROC
;
1559 info
._sifields
._sigfault
._addr
= env
->nip
;
1560 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1562 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1563 cpu_abort(cs
, "Syscall exception while in user mode. "
1566 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1567 info
.si_signo
= TARGET_SIGILL
;
1569 info
.si_code
= TARGET_ILL_COPROC
;
1570 info
._sifields
._sigfault
._addr
= env
->nip
;
1571 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1573 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1574 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1577 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1578 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1581 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1582 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1585 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1586 cpu_abort(cs
, "Data TLB exception while in user mode. "
1589 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1590 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1593 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1594 info
.si_signo
= TARGET_SIGILL
;
1596 info
.si_code
= TARGET_ILL_COPROC
;
1597 info
._sifields
._sigfault
._addr
= env
->nip
;
1598 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1600 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1601 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1603 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1604 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1606 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1607 cpu_abort(cs
, "Performance monitor exception not handled\n");
1609 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1610 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1613 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1614 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1617 case POWERPC_EXCP_RESET
: /* System reset exception */
1618 cpu_abort(cs
, "Reset interrupt while in user mode. "
1621 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1622 cpu_abort(cs
, "Data segment exception while in user mode. "
1625 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1626 cpu_abort(cs
, "Instruction segment exception "
1627 "while in user mode. Aborting\n");
1629 /* PowerPC 64 with hypervisor mode support */
1630 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1631 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1632 "while in user mode. Aborting\n");
1634 case POWERPC_EXCP_TRACE
: /* Trace exception */
1636 * we use this exception to emulate step-by-step execution mode.
1639 /* PowerPC 64 with hypervisor mode support */
1640 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1641 cpu_abort(cs
, "Hypervisor data storage exception "
1642 "while in user mode. Aborting\n");
1644 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1645 cpu_abort(cs
, "Hypervisor instruction storage exception "
1646 "while in user mode. Aborting\n");
1648 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1649 cpu_abort(cs
, "Hypervisor data segment exception "
1650 "while in user mode. Aborting\n");
1652 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1653 cpu_abort(cs
, "Hypervisor instruction segment exception "
1654 "while in user mode. Aborting\n");
1656 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1657 info
.si_signo
= TARGET_SIGILL
;
1659 info
.si_code
= TARGET_ILL_COPROC
;
1660 info
._sifields
._sigfault
._addr
= env
->nip
;
1661 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1663 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1664 cpu_abort(cs
, "Programmable interval timer interrupt "
1665 "while in user mode. Aborting\n");
1667 case POWERPC_EXCP_IO
: /* IO error exception */
1668 cpu_abort(cs
, "IO error exception while in user mode. "
1671 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1672 cpu_abort(cs
, "Run mode exception while in user mode. "
1675 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1676 cpu_abort(cs
, "Emulation trap exception not handled\n");
1678 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1679 cpu_abort(cs
, "Instruction fetch TLB exception "
1680 "while in user-mode. Aborting");
1682 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1683 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1686 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1687 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1690 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1691 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1693 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1694 cpu_abort(cs
, "Instruction address breakpoint exception "
1697 case POWERPC_EXCP_SMI
: /* System management interrupt */
1698 cpu_abort(cs
, "System management interrupt while in user mode. "
1701 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1702 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1705 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1706 cpu_abort(cs
, "Performance monitor exception not handled\n");
1708 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1709 cpu_abort(cs
, "Vector assist exception not handled\n");
1711 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1712 cpu_abort(cs
, "Soft patch exception not handled\n");
1714 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1715 cpu_abort(cs
, "Maintenance exception while in user mode. "
1718 case POWERPC_EXCP_STOP
: /* stop translation */
1719 /* We did invalidate the instruction cache. Go on */
1721 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1722 /* We just stopped because of a branch. Go on */
1724 case POWERPC_EXCP_SYSCALL_USER
:
1725 /* system call in user-mode emulation */
1727 * PPC ABI uses overflow flag in cr0 to signal an error
1730 env
->crf
[0] &= ~0x1;
1732 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1733 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1735 if (ret
== -TARGET_ERESTARTSYS
) {
1739 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1740 /* Returning from a successful sigreturn syscall.
1741 Avoid corrupting register state. */
1744 if (ret
> (target_ulong
)(-515)) {
1750 case POWERPC_EXCP_STCX
:
1751 if (do_store_exclusive(env
)) {
1752 info
.si_signo
= TARGET_SIGSEGV
;
1754 info
.si_code
= TARGET_SEGV_MAPERR
;
1755 info
._sifields
._sigfault
._addr
= env
->nip
;
1756 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1763 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1765 info
.si_signo
= sig
;
1767 info
.si_code
= TARGET_TRAP_BRKPT
;
1768 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1772 case EXCP_INTERRUPT
:
1773 /* just indicate that signals should be handled asap */
1776 cpu_exec_step_atomic(cs
);
1779 cpu_abort(cs
, "Unknown exception 0x%x. Aborting\n", trapnr
);
1782 process_pending_signals(env
);
1789 # ifdef TARGET_ABI_MIPSO32
1790 # define MIPS_SYS(name, args) args,
1791 static const uint8_t mips_syscall_args
[] = {
1792 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1793 MIPS_SYS(sys_exit
, 1)
1794 MIPS_SYS(sys_fork
, 0)
1795 MIPS_SYS(sys_read
, 3)
1796 MIPS_SYS(sys_write
, 3)
1797 MIPS_SYS(sys_open
, 3) /* 4005 */
1798 MIPS_SYS(sys_close
, 1)
1799 MIPS_SYS(sys_waitpid
, 3)
1800 MIPS_SYS(sys_creat
, 2)
1801 MIPS_SYS(sys_link
, 2)
1802 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1803 MIPS_SYS(sys_execve
, 0)
1804 MIPS_SYS(sys_chdir
, 1)
1805 MIPS_SYS(sys_time
, 1)
1806 MIPS_SYS(sys_mknod
, 3)
1807 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1808 MIPS_SYS(sys_lchown
, 3)
1809 MIPS_SYS(sys_ni_syscall
, 0)
1810 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
1811 MIPS_SYS(sys_lseek
, 3)
1812 MIPS_SYS(sys_getpid
, 0) /* 4020 */
1813 MIPS_SYS(sys_mount
, 5)
1814 MIPS_SYS(sys_umount
, 1)
1815 MIPS_SYS(sys_setuid
, 1)
1816 MIPS_SYS(sys_getuid
, 0)
1817 MIPS_SYS(sys_stime
, 1) /* 4025 */
1818 MIPS_SYS(sys_ptrace
, 4)
1819 MIPS_SYS(sys_alarm
, 1)
1820 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
1821 MIPS_SYS(sys_pause
, 0)
1822 MIPS_SYS(sys_utime
, 2) /* 4030 */
1823 MIPS_SYS(sys_ni_syscall
, 0)
1824 MIPS_SYS(sys_ni_syscall
, 0)
1825 MIPS_SYS(sys_access
, 2)
1826 MIPS_SYS(sys_nice
, 1)
1827 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
1828 MIPS_SYS(sys_sync
, 0)
1829 MIPS_SYS(sys_kill
, 2)
1830 MIPS_SYS(sys_rename
, 2)
1831 MIPS_SYS(sys_mkdir
, 2)
1832 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
1833 MIPS_SYS(sys_dup
, 1)
1834 MIPS_SYS(sys_pipe
, 0)
1835 MIPS_SYS(sys_times
, 1)
1836 MIPS_SYS(sys_ni_syscall
, 0)
1837 MIPS_SYS(sys_brk
, 1) /* 4045 */
1838 MIPS_SYS(sys_setgid
, 1)
1839 MIPS_SYS(sys_getgid
, 0)
1840 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
1841 MIPS_SYS(sys_geteuid
, 0)
1842 MIPS_SYS(sys_getegid
, 0) /* 4050 */
1843 MIPS_SYS(sys_acct
, 0)
1844 MIPS_SYS(sys_umount2
, 2)
1845 MIPS_SYS(sys_ni_syscall
, 0)
1846 MIPS_SYS(sys_ioctl
, 3)
1847 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
1848 MIPS_SYS(sys_ni_syscall
, 2)
1849 MIPS_SYS(sys_setpgid
, 2)
1850 MIPS_SYS(sys_ni_syscall
, 0)
1851 MIPS_SYS(sys_olduname
, 1)
1852 MIPS_SYS(sys_umask
, 1) /* 4060 */
1853 MIPS_SYS(sys_chroot
, 1)
1854 MIPS_SYS(sys_ustat
, 2)
1855 MIPS_SYS(sys_dup2
, 2)
1856 MIPS_SYS(sys_getppid
, 0)
1857 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
1858 MIPS_SYS(sys_setsid
, 0)
1859 MIPS_SYS(sys_sigaction
, 3)
1860 MIPS_SYS(sys_sgetmask
, 0)
1861 MIPS_SYS(sys_ssetmask
, 1)
1862 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
1863 MIPS_SYS(sys_setregid
, 2)
1864 MIPS_SYS(sys_sigsuspend
, 0)
1865 MIPS_SYS(sys_sigpending
, 1)
1866 MIPS_SYS(sys_sethostname
, 2)
1867 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
1868 MIPS_SYS(sys_getrlimit
, 2)
1869 MIPS_SYS(sys_getrusage
, 2)
1870 MIPS_SYS(sys_gettimeofday
, 2)
1871 MIPS_SYS(sys_settimeofday
, 2)
1872 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
1873 MIPS_SYS(sys_setgroups
, 2)
1874 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
1875 MIPS_SYS(sys_symlink
, 2)
1876 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
1877 MIPS_SYS(sys_readlink
, 3) /* 4085 */
1878 MIPS_SYS(sys_uselib
, 1)
1879 MIPS_SYS(sys_swapon
, 2)
1880 MIPS_SYS(sys_reboot
, 3)
1881 MIPS_SYS(old_readdir
, 3)
1882 MIPS_SYS(old_mmap
, 6) /* 4090 */
1883 MIPS_SYS(sys_munmap
, 2)
1884 MIPS_SYS(sys_truncate
, 2)
1885 MIPS_SYS(sys_ftruncate
, 2)
1886 MIPS_SYS(sys_fchmod
, 2)
1887 MIPS_SYS(sys_fchown
, 3) /* 4095 */
1888 MIPS_SYS(sys_getpriority
, 2)
1889 MIPS_SYS(sys_setpriority
, 3)
1890 MIPS_SYS(sys_ni_syscall
, 0)
1891 MIPS_SYS(sys_statfs
, 2)
1892 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
1893 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
1894 MIPS_SYS(sys_socketcall
, 2)
1895 MIPS_SYS(sys_syslog
, 3)
1896 MIPS_SYS(sys_setitimer
, 3)
1897 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
1898 MIPS_SYS(sys_newstat
, 2)
1899 MIPS_SYS(sys_newlstat
, 2)
1900 MIPS_SYS(sys_newfstat
, 2)
1901 MIPS_SYS(sys_uname
, 1)
1902 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
1903 MIPS_SYS(sys_vhangup
, 0)
1904 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
1905 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
1906 MIPS_SYS(sys_wait4
, 4)
1907 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
1908 MIPS_SYS(sys_sysinfo
, 1)
1909 MIPS_SYS(sys_ipc
, 6)
1910 MIPS_SYS(sys_fsync
, 1)
1911 MIPS_SYS(sys_sigreturn
, 0)
1912 MIPS_SYS(sys_clone
, 6) /* 4120 */
1913 MIPS_SYS(sys_setdomainname
, 2)
1914 MIPS_SYS(sys_newuname
, 1)
1915 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
1916 MIPS_SYS(sys_adjtimex
, 1)
1917 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
1918 MIPS_SYS(sys_sigprocmask
, 3)
1919 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
1920 MIPS_SYS(sys_init_module
, 5)
1921 MIPS_SYS(sys_delete_module
, 1)
1922 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
1923 MIPS_SYS(sys_quotactl
, 0)
1924 MIPS_SYS(sys_getpgid
, 1)
1925 MIPS_SYS(sys_fchdir
, 1)
1926 MIPS_SYS(sys_bdflush
, 2)
1927 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
1928 MIPS_SYS(sys_personality
, 1)
1929 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
1930 MIPS_SYS(sys_setfsuid
, 1)
1931 MIPS_SYS(sys_setfsgid
, 1)
1932 MIPS_SYS(sys_llseek
, 5) /* 4140 */
1933 MIPS_SYS(sys_getdents
, 3)
1934 MIPS_SYS(sys_select
, 5)
1935 MIPS_SYS(sys_flock
, 2)
1936 MIPS_SYS(sys_msync
, 3)
1937 MIPS_SYS(sys_readv
, 3) /* 4145 */
1938 MIPS_SYS(sys_writev
, 3)
1939 MIPS_SYS(sys_cacheflush
, 3)
1940 MIPS_SYS(sys_cachectl
, 3)
1941 MIPS_SYS(sys_sysmips
, 4)
1942 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
1943 MIPS_SYS(sys_getsid
, 1)
1944 MIPS_SYS(sys_fdatasync
, 0)
1945 MIPS_SYS(sys_sysctl
, 1)
1946 MIPS_SYS(sys_mlock
, 2)
1947 MIPS_SYS(sys_munlock
, 2) /* 4155 */
1948 MIPS_SYS(sys_mlockall
, 1)
1949 MIPS_SYS(sys_munlockall
, 0)
1950 MIPS_SYS(sys_sched_setparam
, 2)
1951 MIPS_SYS(sys_sched_getparam
, 2)
1952 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
1953 MIPS_SYS(sys_sched_getscheduler
, 1)
1954 MIPS_SYS(sys_sched_yield
, 0)
1955 MIPS_SYS(sys_sched_get_priority_max
, 1)
1956 MIPS_SYS(sys_sched_get_priority_min
, 1)
1957 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
1958 MIPS_SYS(sys_nanosleep
, 2)
1959 MIPS_SYS(sys_mremap
, 5)
1960 MIPS_SYS(sys_accept
, 3)
1961 MIPS_SYS(sys_bind
, 3)
1962 MIPS_SYS(sys_connect
, 3) /* 4170 */
1963 MIPS_SYS(sys_getpeername
, 3)
1964 MIPS_SYS(sys_getsockname
, 3)
1965 MIPS_SYS(sys_getsockopt
, 5)
1966 MIPS_SYS(sys_listen
, 2)
1967 MIPS_SYS(sys_recv
, 4) /* 4175 */
1968 MIPS_SYS(sys_recvfrom
, 6)
1969 MIPS_SYS(sys_recvmsg
, 3)
1970 MIPS_SYS(sys_send
, 4)
1971 MIPS_SYS(sys_sendmsg
, 3)
1972 MIPS_SYS(sys_sendto
, 6) /* 4180 */
1973 MIPS_SYS(sys_setsockopt
, 5)
1974 MIPS_SYS(sys_shutdown
, 2)
1975 MIPS_SYS(sys_socket
, 3)
1976 MIPS_SYS(sys_socketpair
, 4)
1977 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
1978 MIPS_SYS(sys_getresuid
, 3)
1979 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
1980 MIPS_SYS(sys_poll
, 3)
1981 MIPS_SYS(sys_nfsservctl
, 3)
1982 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
1983 MIPS_SYS(sys_getresgid
, 3)
1984 MIPS_SYS(sys_prctl
, 5)
1985 MIPS_SYS(sys_rt_sigreturn
, 0)
1986 MIPS_SYS(sys_rt_sigaction
, 4)
1987 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
1988 MIPS_SYS(sys_rt_sigpending
, 2)
1989 MIPS_SYS(sys_rt_sigtimedwait
, 4)
1990 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
1991 MIPS_SYS(sys_rt_sigsuspend
, 0)
1992 MIPS_SYS(sys_pread64
, 6) /* 4200 */
1993 MIPS_SYS(sys_pwrite64
, 6)
1994 MIPS_SYS(sys_chown
, 3)
1995 MIPS_SYS(sys_getcwd
, 2)
1996 MIPS_SYS(sys_capget
, 2)
1997 MIPS_SYS(sys_capset
, 2) /* 4205 */
1998 MIPS_SYS(sys_sigaltstack
, 2)
1999 MIPS_SYS(sys_sendfile
, 4)
2000 MIPS_SYS(sys_ni_syscall
, 0)
2001 MIPS_SYS(sys_ni_syscall
, 0)
2002 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2003 MIPS_SYS(sys_truncate64
, 4)
2004 MIPS_SYS(sys_ftruncate64
, 4)
2005 MIPS_SYS(sys_stat64
, 2)
2006 MIPS_SYS(sys_lstat64
, 2)
2007 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2008 MIPS_SYS(sys_pivot_root
, 2)
2009 MIPS_SYS(sys_mincore
, 3)
2010 MIPS_SYS(sys_madvise
, 3)
2011 MIPS_SYS(sys_getdents64
, 3)
2012 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2013 MIPS_SYS(sys_ni_syscall
, 0)
2014 MIPS_SYS(sys_gettid
, 0)
2015 MIPS_SYS(sys_readahead
, 5)
2016 MIPS_SYS(sys_setxattr
, 5)
2017 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2018 MIPS_SYS(sys_fsetxattr
, 5)
2019 MIPS_SYS(sys_getxattr
, 4)
2020 MIPS_SYS(sys_lgetxattr
, 4)
2021 MIPS_SYS(sys_fgetxattr
, 4)
2022 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2023 MIPS_SYS(sys_llistxattr
, 3)
2024 MIPS_SYS(sys_flistxattr
, 3)
2025 MIPS_SYS(sys_removexattr
, 2)
2026 MIPS_SYS(sys_lremovexattr
, 2)
2027 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2028 MIPS_SYS(sys_tkill
, 2)
2029 MIPS_SYS(sys_sendfile64
, 5)
2030 MIPS_SYS(sys_futex
, 6)
2031 MIPS_SYS(sys_sched_setaffinity
, 3)
2032 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2033 MIPS_SYS(sys_io_setup
, 2)
2034 MIPS_SYS(sys_io_destroy
, 1)
2035 MIPS_SYS(sys_io_getevents
, 5)
2036 MIPS_SYS(sys_io_submit
, 3)
2037 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2038 MIPS_SYS(sys_exit_group
, 1)
2039 MIPS_SYS(sys_lookup_dcookie
, 3)
2040 MIPS_SYS(sys_epoll_create
, 1)
2041 MIPS_SYS(sys_epoll_ctl
, 4)
2042 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2043 MIPS_SYS(sys_remap_file_pages
, 5)
2044 MIPS_SYS(sys_set_tid_address
, 1)
2045 MIPS_SYS(sys_restart_syscall
, 0)
2046 MIPS_SYS(sys_fadvise64_64
, 7)
2047 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2048 MIPS_SYS(sys_fstatfs64
, 2)
2049 MIPS_SYS(sys_timer_create
, 3)
2050 MIPS_SYS(sys_timer_settime
, 4)
2051 MIPS_SYS(sys_timer_gettime
, 2)
2052 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2053 MIPS_SYS(sys_timer_delete
, 1)
2054 MIPS_SYS(sys_clock_settime
, 2)
2055 MIPS_SYS(sys_clock_gettime
, 2)
2056 MIPS_SYS(sys_clock_getres
, 2)
2057 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2058 MIPS_SYS(sys_tgkill
, 3)
2059 MIPS_SYS(sys_utimes
, 2)
2060 MIPS_SYS(sys_mbind
, 4)
2061 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2062 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2063 MIPS_SYS(sys_mq_open
, 4)
2064 MIPS_SYS(sys_mq_unlink
, 1)
2065 MIPS_SYS(sys_mq_timedsend
, 5)
2066 MIPS_SYS(sys_mq_timedreceive
, 5)
2067 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2068 MIPS_SYS(sys_mq_getsetattr
, 3)
2069 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2070 MIPS_SYS(sys_waitid
, 4)
2071 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2072 MIPS_SYS(sys_add_key
, 5)
2073 MIPS_SYS(sys_request_key
, 4)
2074 MIPS_SYS(sys_keyctl
, 5)
2075 MIPS_SYS(sys_set_thread_area
, 1)
2076 MIPS_SYS(sys_inotify_init
, 0)
2077 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2078 MIPS_SYS(sys_inotify_rm_watch
, 2)
2079 MIPS_SYS(sys_migrate_pages
, 4)
2080 MIPS_SYS(sys_openat
, 4)
2081 MIPS_SYS(sys_mkdirat
, 3)
2082 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2083 MIPS_SYS(sys_fchownat
, 5)
2084 MIPS_SYS(sys_futimesat
, 3)
2085 MIPS_SYS(sys_fstatat64
, 4)
2086 MIPS_SYS(sys_unlinkat
, 3)
2087 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2088 MIPS_SYS(sys_linkat
, 5)
2089 MIPS_SYS(sys_symlinkat
, 3)
2090 MIPS_SYS(sys_readlinkat
, 4)
2091 MIPS_SYS(sys_fchmodat
, 3)
2092 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2093 MIPS_SYS(sys_pselect6
, 6)
2094 MIPS_SYS(sys_ppoll
, 5)
2095 MIPS_SYS(sys_unshare
, 1)
2096 MIPS_SYS(sys_splice
, 6)
2097 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2098 MIPS_SYS(sys_tee
, 4)
2099 MIPS_SYS(sys_vmsplice
, 4)
2100 MIPS_SYS(sys_move_pages
, 6)
2101 MIPS_SYS(sys_set_robust_list
, 2)
2102 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2103 MIPS_SYS(sys_kexec_load
, 4)
2104 MIPS_SYS(sys_getcpu
, 3)
2105 MIPS_SYS(sys_epoll_pwait
, 6)
2106 MIPS_SYS(sys_ioprio_set
, 3)
2107 MIPS_SYS(sys_ioprio_get
, 2)
2108 MIPS_SYS(sys_utimensat
, 4)
2109 MIPS_SYS(sys_signalfd
, 3)
2110 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2111 MIPS_SYS(sys_eventfd
, 1)
2112 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2113 MIPS_SYS(sys_timerfd_create
, 2)
2114 MIPS_SYS(sys_timerfd_gettime
, 2)
2115 MIPS_SYS(sys_timerfd_settime
, 4)
2116 MIPS_SYS(sys_signalfd4
, 4)
2117 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2118 MIPS_SYS(sys_epoll_create1
, 1)
2119 MIPS_SYS(sys_dup3
, 3)
2120 MIPS_SYS(sys_pipe2
, 2)
2121 MIPS_SYS(sys_inotify_init1
, 1)
2122 MIPS_SYS(sys_preadv
, 5) /* 4330 */
2123 MIPS_SYS(sys_pwritev
, 5)
2124 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2125 MIPS_SYS(sys_perf_event_open
, 5)
2126 MIPS_SYS(sys_accept4
, 4)
2127 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2128 MIPS_SYS(sys_fanotify_init
, 2)
2129 MIPS_SYS(sys_fanotify_mark
, 6)
2130 MIPS_SYS(sys_prlimit64
, 4)
2131 MIPS_SYS(sys_name_to_handle_at
, 5)
2132 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2133 MIPS_SYS(sys_clock_adjtime
, 2)
2134 MIPS_SYS(sys_syncfs
, 1)
2135 MIPS_SYS(sys_sendmmsg
, 4)
2136 MIPS_SYS(sys_setns
, 2)
2137 MIPS_SYS(sys_process_vm_readv
, 6) /* 345 */
2138 MIPS_SYS(sys_process_vm_writev
, 6)
2139 MIPS_SYS(sys_kcmp
, 5)
2140 MIPS_SYS(sys_finit_module
, 3)
2141 MIPS_SYS(sys_sched_setattr
, 2)
2142 MIPS_SYS(sys_sched_getattr
, 3) /* 350 */
2143 MIPS_SYS(sys_renameat2
, 5)
2144 MIPS_SYS(sys_seccomp
, 3)
2145 MIPS_SYS(sys_getrandom
, 3)
2146 MIPS_SYS(sys_memfd_create
, 2)
2147 MIPS_SYS(sys_bpf
, 3) /* 355 */
2148 MIPS_SYS(sys_execveat
, 5)
2149 MIPS_SYS(sys_userfaultfd
, 1)
2150 MIPS_SYS(sys_membarrier
, 2)
2151 MIPS_SYS(sys_mlock2
, 3)
2152 MIPS_SYS(sys_copy_file_range
, 6) /* 360 */
2153 MIPS_SYS(sys_preadv2
, 6)
2154 MIPS_SYS(sys_pwritev2
, 6)
2159 static int do_store_exclusive(CPUMIPSState
*env
)
2162 target_ulong page_addr
;
2170 page_addr
= addr
& TARGET_PAGE_MASK
;
2173 flags
= page_get_flags(page_addr
);
2174 if ((flags
& PAGE_READ
) == 0) {
2177 reg
= env
->llreg
& 0x1f;
2178 d
= (env
->llreg
& 0x20) != 0;
2180 segv
= get_user_s64(val
, addr
);
2182 segv
= get_user_s32(val
, addr
);
2185 if (val
!= env
->llval
) {
2186 env
->active_tc
.gpr
[reg
] = 0;
2189 segv
= put_user_u64(env
->llnewval
, addr
);
2191 segv
= put_user_u32(env
->llnewval
, addr
);
2194 env
->active_tc
.gpr
[reg
] = 1;
2201 env
->active_tc
.PC
+= 4;
2214 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2222 info
->si_signo
= TARGET_SIGFPE
;
2224 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2225 queue_signal(env
, info
->si_signo
, QEMU_SI_FAULT
, &*info
);
2229 info
->si_signo
= TARGET_SIGTRAP
;
2231 queue_signal(env
, info
->si_signo
, QEMU_SI_FAULT
, &*info
);
2239 void cpu_loop(CPUMIPSState
*env
)
2241 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2242 target_siginfo_t info
;
2245 # ifdef TARGET_ABI_MIPSO32
2246 unsigned int syscall_num
;
2251 trapnr
= cpu_exec(cs
);
2253 process_queued_cpu_work(cs
);
2257 env
->active_tc
.PC
+= 4;
2258 # ifdef TARGET_ABI_MIPSO32
2259 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2260 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2261 ret
= -TARGET_ENOSYS
;
2265 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2267 nb_args
= mips_syscall_args
[syscall_num
];
2268 sp_reg
= env
->active_tc
.gpr
[29];
2270 /* these arguments are taken from the stack */
2272 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2276 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2280 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2284 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2290 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2291 env
->active_tc
.gpr
[4],
2292 env
->active_tc
.gpr
[5],
2293 env
->active_tc
.gpr
[6],
2294 env
->active_tc
.gpr
[7],
2295 arg5
, arg6
, arg7
, arg8
);
2299 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2300 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2301 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2302 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2303 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2305 if (ret
== -TARGET_ERESTARTSYS
) {
2306 env
->active_tc
.PC
-= 4;
2309 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2310 /* Returning from a successful sigreturn syscall.
2311 Avoid clobbering register state. */
2314 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2315 env
->active_tc
.gpr
[7] = 1; /* error flag */
2318 env
->active_tc
.gpr
[7] = 0; /* error flag */
2320 env
->active_tc
.gpr
[2] = ret
;
2326 info
.si_signo
= TARGET_SIGSEGV
;
2328 /* XXX: check env->error_code */
2329 info
.si_code
= TARGET_SEGV_MAPERR
;
2330 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2331 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2335 info
.si_signo
= TARGET_SIGILL
;
2338 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2340 case EXCP_INTERRUPT
:
2341 /* just indicate that signals should be handled asap */
2347 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2350 info
.si_signo
= sig
;
2352 info
.si_code
= TARGET_TRAP_BRKPT
;
2353 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2358 if (do_store_exclusive(env
)) {
2359 info
.si_signo
= TARGET_SIGSEGV
;
2361 info
.si_code
= TARGET_SEGV_MAPERR
;
2362 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2363 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2367 info
.si_signo
= TARGET_SIGILL
;
2369 info
.si_code
= TARGET_ILL_ILLOPC
;
2370 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2372 /* The code below was inspired by the MIPS Linux kernel trap
2373 * handling code in arch/mips/kernel/traps.c.
2377 abi_ulong trap_instr
;
2380 if (env
->hflags
& MIPS_HFLAG_M16
) {
2381 if (env
->insn_flags
& ASE_MICROMIPS
) {
2382 /* microMIPS mode */
2383 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2388 if ((trap_instr
>> 10) == 0x11) {
2389 /* 16-bit instruction */
2390 code
= trap_instr
& 0xf;
2392 /* 32-bit instruction */
2395 ret
= get_user_u16(instr_lo
,
2396 env
->active_tc
.PC
+ 2);
2400 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2401 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2402 /* Unfortunately, microMIPS also suffers from
2403 the old assembler bug... */
2404 if (code
>= (1 << 10)) {
2410 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2414 code
= (trap_instr
>> 6) & 0x3f;
2417 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2422 /* As described in the original Linux kernel code, the
2423 * below checks on 'code' are to work around an old
2426 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2427 if (code
>= (1 << 10)) {
2432 if (do_break(env
, &info
, code
) != 0) {
2439 abi_ulong trap_instr
;
2440 unsigned int code
= 0;
2442 if (env
->hflags
& MIPS_HFLAG_M16
) {
2443 /* microMIPS mode */
2446 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2447 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2449 trap_instr
= (instr
[0] << 16) | instr
[1];
2451 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2458 /* The immediate versions don't provide a code. */
2459 if (!(trap_instr
& 0xFC000000)) {
2460 if (env
->hflags
& MIPS_HFLAG_M16
) {
2461 /* microMIPS mode */
2462 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2464 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2468 if (do_break(env
, &info
, code
) != 0) {
2474 cpu_exec_step_atomic(cs
);
2478 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2481 process_pending_signals(env
);
2488 void cpu_loop(CPUNios2State
*env
)
2490 CPUState
*cs
= ENV_GET_CPU(env
);
2491 Nios2CPU
*cpu
= NIOS2_CPU(cs
);
2492 target_siginfo_t info
;
2493 int trapnr
, gdbsig
, ret
;
2497 trapnr
= cpu_exec(cs
);
2502 case EXCP_INTERRUPT
:
2503 /* just indicate that signals should be handled asap */
2506 if (env
->regs
[R_AT
] == 0) {
2508 qemu_log_mask(CPU_LOG_INT
, "\nSyscall\n");
2510 ret
= do_syscall(env
, env
->regs
[2],
2511 env
->regs
[4], env
->regs
[5], env
->regs
[6],
2512 env
->regs
[7], env
->regs
[8], env
->regs
[9],
2515 if (env
->regs
[2] == 0) { /* FIXME: syscall 0 workaround */
2519 env
->regs
[2] = abs(ret
);
2520 /* Return value is 0..4096 */
2521 env
->regs
[7] = (ret
> 0xfffffffffffff000ULL
);
2522 env
->regs
[CR_ESTATUS
] = env
->regs
[CR_STATUS
];
2523 env
->regs
[CR_STATUS
] &= ~0x3;
2524 env
->regs
[R_EA
] = env
->regs
[R_PC
] + 4;
2525 env
->regs
[R_PC
] += 4;
2528 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2530 env
->regs
[CR_ESTATUS
] = env
->regs
[CR_STATUS
];
2531 env
->regs
[CR_STATUS
] &= ~0x3;
2532 env
->regs
[R_EA
] = env
->regs
[R_PC
] + 4;
2533 env
->regs
[R_PC
] = cpu
->exception_addr
;
2535 gdbsig
= TARGET_SIGTRAP
;
2539 switch (env
->regs
[R_PC
]) {
2540 /*case 0x1000:*/ /* TODO:__kuser_helper_version */
2541 case 0x1004: /* __kuser_cmpxchg */
2543 if (env
->regs
[4] & 0x3) {
2546 ret
= get_user_u32(env
->regs
[2], env
->regs
[4]);
2551 env
->regs
[2] -= env
->regs
[5];
2552 if (env
->regs
[2] == 0) {
2553 put_user_u32(env
->regs
[6], env
->regs
[4]);
2556 env
->regs
[R_PC
] = env
->regs
[R_RA
];
2558 /*case 0x1040:*/ /* TODO:__kuser_sigtramp */
2562 info
.si_signo
= TARGET_SIGSEGV
;
2564 /* TODO: check env->error_code */
2565 info
.si_code
= TARGET_SEGV_MAPERR
;
2566 info
._sifields
._sigfault
._addr
= env
->regs
[R_PC
];
2567 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2571 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2573 gdbsig
= TARGET_SIGILL
;
2577 gdb_handlesig(cs
, gdbsig
);
2578 if (gdbsig
!= TARGET_SIGTRAP
) {
2583 process_pending_signals(env
);
2587 #endif /* TARGET_NIOS2 */
2589 #ifdef TARGET_OPENRISC
2591 void cpu_loop(CPUOpenRISCState
*env
)
2593 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2596 target_siginfo_t info
;
2600 trapnr
= cpu_exec(cs
);
2602 process_queued_cpu_work(cs
);
2606 env
->pc
+= 4; /* 0xc00; */
2607 ret
= do_syscall(env
,
2608 cpu_get_gpr(env
, 11), /* return value */
2609 cpu_get_gpr(env
, 3), /* r3 - r7 are params */
2610 cpu_get_gpr(env
, 4),
2611 cpu_get_gpr(env
, 5),
2612 cpu_get_gpr(env
, 6),
2613 cpu_get_gpr(env
, 7),
2614 cpu_get_gpr(env
, 8), 0, 0);
2615 if (ret
== -TARGET_ERESTARTSYS
) {
2617 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2618 cpu_set_gpr(env
, 11, ret
);
2624 info
.si_signo
= TARGET_SIGSEGV
;
2626 info
.si_code
= TARGET_SEGV_MAPERR
;
2627 info
._sifields
._sigfault
._addr
= env
->pc
;
2628 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2631 info
.si_signo
= TARGET_SIGBUS
;
2633 info
.si_code
= TARGET_BUS_ADRALN
;
2634 info
._sifields
._sigfault
._addr
= env
->pc
;
2635 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2638 info
.si_signo
= TARGET_SIGILL
;
2640 info
.si_code
= TARGET_ILL_ILLOPC
;
2641 info
._sifields
._sigfault
._addr
= env
->pc
;
2642 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2645 info
.si_signo
= TARGET_SIGFPE
;
2648 info
._sifields
._sigfault
._addr
= env
->pc
;
2649 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2651 case EXCP_INTERRUPT
:
2652 /* We processed the pending cpu work above. */
2655 trapnr
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2657 info
.si_signo
= trapnr
;
2659 info
.si_code
= TARGET_TRAP_BRKPT
;
2660 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2664 cpu_exec_step_atomic(cs
);
2667 g_assert_not_reached();
2669 process_pending_signals(env
);
2673 #endif /* TARGET_OPENRISC */
2676 void cpu_loop(CPUSH4State
*env
)
2678 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2680 target_siginfo_t info
;
2684 trapnr
= cpu_exec(cs
);
2686 process_queued_cpu_work(cs
);
2691 ret
= do_syscall(env
,
2700 if (ret
== -TARGET_ERESTARTSYS
) {
2702 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2703 env
->gregs
[0] = ret
;
2706 case EXCP_INTERRUPT
:
2707 /* just indicate that signals should be handled asap */
2713 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2716 info
.si_signo
= sig
;
2718 info
.si_code
= TARGET_TRAP_BRKPT
;
2719 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2725 info
.si_signo
= TARGET_SIGSEGV
;
2727 info
.si_code
= TARGET_SEGV_MAPERR
;
2728 info
._sifields
._sigfault
._addr
= env
->tea
;
2729 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2733 cpu_exec_step_atomic(cs
);
2736 printf ("Unhandled trap: 0x%x\n", trapnr
);
2737 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2740 process_pending_signals (env
);
2746 void cpu_loop(CPUCRISState
*env
)
2748 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2750 target_siginfo_t info
;
2754 trapnr
= cpu_exec(cs
);
2756 process_queued_cpu_work(cs
);
2761 info
.si_signo
= TARGET_SIGSEGV
;
2763 /* XXX: check env->error_code */
2764 info
.si_code
= TARGET_SEGV_MAPERR
;
2765 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2766 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2769 case EXCP_INTERRUPT
:
2770 /* just indicate that signals should be handled asap */
2773 ret
= do_syscall(env
,
2782 if (ret
== -TARGET_ERESTARTSYS
) {
2784 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2785 env
->regs
[10] = ret
;
2792 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2795 info
.si_signo
= sig
;
2797 info
.si_code
= TARGET_TRAP_BRKPT
;
2798 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2803 cpu_exec_step_atomic(cs
);
2806 printf ("Unhandled trap: 0x%x\n", trapnr
);
2807 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2810 process_pending_signals (env
);
2815 #ifdef TARGET_MICROBLAZE
2816 void cpu_loop(CPUMBState
*env
)
2818 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2820 target_siginfo_t info
;
2824 trapnr
= cpu_exec(cs
);
2826 process_queued_cpu_work(cs
);
2831 info
.si_signo
= TARGET_SIGSEGV
;
2833 /* XXX: check env->error_code */
2834 info
.si_code
= TARGET_SEGV_MAPERR
;
2835 info
._sifields
._sigfault
._addr
= 0;
2836 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2839 case EXCP_INTERRUPT
:
2840 /* just indicate that signals should be handled asap */
2843 /* Return address is 4 bytes after the call. */
2845 env
->sregs
[SR_PC
] = env
->regs
[14];
2846 ret
= do_syscall(env
,
2855 if (ret
== -TARGET_ERESTARTSYS
) {
2856 /* Wind back to before the syscall. */
2857 env
->sregs
[SR_PC
] -= 4;
2858 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2861 /* All syscall exits result in guest r14 being equal to the
2862 * PC we return to, because the kernel syscall exit "rtbd" does
2863 * this. (This is true even for sigreturn(); note that r14 is
2864 * not a userspace-usable register, as the kernel may clobber it
2867 env
->regs
[14] = env
->sregs
[SR_PC
];
2870 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2871 if (env
->iflags
& D_FLAG
) {
2872 env
->sregs
[SR_ESR
] |= 1 << 12;
2873 env
->sregs
[SR_PC
] -= 4;
2874 /* FIXME: if branch was immed, replay the imm as well. */
2877 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2879 switch (env
->sregs
[SR_ESR
] & 31) {
2880 case ESR_EC_DIVZERO
:
2881 info
.si_signo
= TARGET_SIGFPE
;
2883 info
.si_code
= TARGET_FPE_FLTDIV
;
2884 info
._sifields
._sigfault
._addr
= 0;
2885 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2888 info
.si_signo
= TARGET_SIGFPE
;
2890 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2891 info
.si_code
= TARGET_FPE_FLTINV
;
2893 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2894 info
.si_code
= TARGET_FPE_FLTDIV
;
2896 info
._sifields
._sigfault
._addr
= 0;
2897 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2900 printf ("Unhandled hw-exception: 0x%x\n",
2901 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2902 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2911 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2914 info
.si_signo
= sig
;
2916 info
.si_code
= TARGET_TRAP_BRKPT
;
2917 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2922 cpu_exec_step_atomic(cs
);
2925 printf ("Unhandled trap: 0x%x\n", trapnr
);
2926 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2929 process_pending_signals (env
);
2936 void cpu_loop(CPUM68KState
*env
)
2938 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2941 target_siginfo_t info
;
2942 TaskState
*ts
= cs
->opaque
;
2946 trapnr
= cpu_exec(cs
);
2948 process_queued_cpu_work(cs
);
2953 if (ts
->sim_syscalls
) {
2955 get_user_u16(nr
, env
->pc
+ 2);
2957 do_m68k_simcall(env
, nr
);
2963 case EXCP_HALT_INSN
:
2964 /* Semihosing syscall. */
2966 do_m68k_semihosting(env
, env
->dregs
[0]);
2970 case EXCP_UNSUPPORTED
:
2972 info
.si_signo
= TARGET_SIGILL
;
2974 info
.si_code
= TARGET_ILL_ILLOPN
;
2975 info
._sifields
._sigfault
._addr
= env
->pc
;
2976 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2979 info
.si_signo
= TARGET_SIGFPE
;
2981 info
.si_code
= TARGET_FPE_INTDIV
;
2982 info
._sifields
._sigfault
._addr
= env
->pc
;
2983 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2988 ts
->sim_syscalls
= 0;
2991 ret
= do_syscall(env
,
3000 if (ret
== -TARGET_ERESTARTSYS
) {
3002 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3003 env
->dregs
[0] = ret
;
3007 case EXCP_INTERRUPT
:
3008 /* just indicate that signals should be handled asap */
3012 info
.si_signo
= TARGET_SIGSEGV
;
3014 /* XXX: check env->error_code */
3015 info
.si_code
= TARGET_SEGV_MAPERR
;
3016 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3017 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3024 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3027 info
.si_signo
= sig
;
3029 info
.si_code
= TARGET_TRAP_BRKPT
;
3030 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3035 cpu_exec_step_atomic(cs
);
3038 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3041 process_pending_signals(env
);
3044 #endif /* TARGET_M68K */
3047 void cpu_loop(CPUAlphaState
*env
)
3049 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3051 target_siginfo_t info
;
3055 bool arch_interrupt
= true;
3058 trapnr
= cpu_exec(cs
);
3060 process_queued_cpu_work(cs
);
3064 fprintf(stderr
, "Reset requested. Exit\n");
3068 fprintf(stderr
, "Machine check exception. Exit\n");
3071 case EXCP_SMP_INTERRUPT
:
3072 case EXCP_CLK_INTERRUPT
:
3073 case EXCP_DEV_INTERRUPT
:
3074 fprintf(stderr
, "External interrupt. Exit\n");
3078 info
.si_signo
= TARGET_SIGSEGV
;
3080 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3081 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3082 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3083 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3086 info
.si_signo
= TARGET_SIGBUS
;
3088 info
.si_code
= TARGET_BUS_ADRALN
;
3089 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3090 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3094 info
.si_signo
= TARGET_SIGILL
;
3096 info
.si_code
= TARGET_ILL_ILLOPC
;
3097 info
._sifields
._sigfault
._addr
= env
->pc
;
3098 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3101 info
.si_signo
= TARGET_SIGFPE
;
3103 info
.si_code
= TARGET_FPE_FLTINV
;
3104 info
._sifields
._sigfault
._addr
= env
->pc
;
3105 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3108 /* No-op. Linux simply re-enables the FPU. */
3111 switch (env
->error_code
) {
3114 info
.si_signo
= TARGET_SIGTRAP
;
3116 info
.si_code
= TARGET_TRAP_BRKPT
;
3117 info
._sifields
._sigfault
._addr
= env
->pc
;
3118 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3122 info
.si_signo
= TARGET_SIGTRAP
;
3125 info
._sifields
._sigfault
._addr
= env
->pc
;
3126 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3130 trapnr
= env
->ir
[IR_V0
];
3131 sysret
= do_syscall(env
, trapnr
,
3132 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3133 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3134 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3136 if (sysret
== -TARGET_ERESTARTSYS
) {
3140 if (sysret
== -TARGET_QEMU_ESIGRETURN
) {
3143 /* Syscall writes 0 to V0 to bypass error check, similar
3144 to how this is handled internal to Linux kernel.
3145 (Ab)use trapnr temporarily as boolean indicating error. */
3146 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3147 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3148 env
->ir
[IR_A3
] = trapnr
;
3152 /* ??? We can probably elide the code using page_unprotect
3153 that is checking for self-modifying code. Instead we
3154 could simply call tb_flush here. Until we work out the
3155 changes required to turn off the extra write protection,
3156 this can be a no-op. */
3160 /* Handled in the translator for usermode. */
3164 /* Handled in the translator for usermode. */
3168 info
.si_signo
= TARGET_SIGFPE
;
3169 switch (env
->ir
[IR_A0
]) {
3170 case TARGET_GEN_INTOVF
:
3171 info
.si_code
= TARGET_FPE_INTOVF
;
3173 case TARGET_GEN_INTDIV
:
3174 info
.si_code
= TARGET_FPE_INTDIV
;
3176 case TARGET_GEN_FLTOVF
:
3177 info
.si_code
= TARGET_FPE_FLTOVF
;
3179 case TARGET_GEN_FLTUND
:
3180 info
.si_code
= TARGET_FPE_FLTUND
;
3182 case TARGET_GEN_FLTINV
:
3183 info
.si_code
= TARGET_FPE_FLTINV
;
3185 case TARGET_GEN_FLTINE
:
3186 info
.si_code
= TARGET_FPE_FLTRES
;
3188 case TARGET_GEN_ROPRAND
:
3192 info
.si_signo
= TARGET_SIGTRAP
;
3197 info
._sifields
._sigfault
._addr
= env
->pc
;
3198 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3205 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3206 if (info
.si_signo
) {
3208 info
.si_code
= TARGET_TRAP_BRKPT
;
3209 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3211 arch_interrupt
= false;
3214 case EXCP_INTERRUPT
:
3215 /* Just indicate that signals should be handled asap. */
3218 cpu_exec_step_atomic(cs
);
3219 arch_interrupt
= false;
3222 printf ("Unhandled trap: 0x%x\n", trapnr
);
3223 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3226 process_pending_signals (env
);
3228 /* Most of the traps imply a transition through PALcode, which
3229 implies an REI instruction has been executed. Which means
3230 that RX and LOCK_ADDR should be cleared. But there are a
3231 few exceptions for traps internal to QEMU. */
3232 if (arch_interrupt
) {
3233 env
->flags
&= ~ENV_FLAG_RX_FLAG
;
3234 env
->lock_addr
= -1;
3238 #endif /* TARGET_ALPHA */
3241 void cpu_loop(CPUS390XState
*env
)
3243 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3245 target_siginfo_t info
;
3251 trapnr
= cpu_exec(cs
);
3253 process_queued_cpu_work(cs
);
3256 case EXCP_INTERRUPT
:
3257 /* Just indicate that signals should be handled asap. */
3261 n
= env
->int_svc_code
;
3263 /* syscalls > 255 */
3266 env
->psw
.addr
+= env
->int_svc_ilen
;
3267 ret
= do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3268 env
->regs
[4], env
->regs
[5],
3269 env
->regs
[6], env
->regs
[7], 0, 0);
3270 if (ret
== -TARGET_ERESTARTSYS
) {
3271 env
->psw
.addr
-= env
->int_svc_ilen
;
3272 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3278 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3280 n
= TARGET_TRAP_BRKPT
;
3285 n
= env
->int_pgm_code
;
3288 case PGM_PRIVILEGED
:
3289 sig
= TARGET_SIGILL
;
3290 n
= TARGET_ILL_ILLOPC
;
3292 case PGM_PROTECTION
:
3293 case PGM_ADDRESSING
:
3294 sig
= TARGET_SIGSEGV
;
3295 /* XXX: check env->error_code */
3296 n
= TARGET_SEGV_MAPERR
;
3297 addr
= env
->__excp_addr
;
3300 case PGM_SPECIFICATION
:
3301 case PGM_SPECIAL_OP
:
3304 sig
= TARGET_SIGILL
;
3305 n
= TARGET_ILL_ILLOPN
;
3308 case PGM_FIXPT_OVERFLOW
:
3309 sig
= TARGET_SIGFPE
;
3310 n
= TARGET_FPE_INTOVF
;
3312 case PGM_FIXPT_DIVIDE
:
3313 sig
= TARGET_SIGFPE
;
3314 n
= TARGET_FPE_INTDIV
;
3318 n
= (env
->fpc
>> 8) & 0xff;
3320 /* compare-and-trap */
3323 /* An IEEE exception, simulated or otherwise. */
3325 n
= TARGET_FPE_FLTINV
;
3326 } else if (n
& 0x40) {
3327 n
= TARGET_FPE_FLTDIV
;
3328 } else if (n
& 0x20) {
3329 n
= TARGET_FPE_FLTOVF
;
3330 } else if (n
& 0x10) {
3331 n
= TARGET_FPE_FLTUND
;
3332 } else if (n
& 0x08) {
3333 n
= TARGET_FPE_FLTRES
;
3335 /* ??? Quantum exception; BFP, DFP error. */
3338 sig
= TARGET_SIGFPE
;
3343 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3344 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3350 addr
= env
->psw
.addr
;
3352 info
.si_signo
= sig
;
3355 info
._sifields
._sigfault
._addr
= addr
;
3356 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3360 cpu_exec_step_atomic(cs
);
3363 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3364 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3367 process_pending_signals (env
);
3371 #endif /* TARGET_S390X */
3373 #ifdef TARGET_TILEGX
3375 static void gen_sigill_reg(CPUTLGState
*env
)
3377 target_siginfo_t info
;
3379 info
.si_signo
= TARGET_SIGILL
;
3381 info
.si_code
= TARGET_ILL_PRVREG
;
3382 info
._sifields
._sigfault
._addr
= env
->pc
;
3383 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3386 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3388 target_siginfo_t info
;
3390 info
.si_signo
= signo
;
3392 info
._sifields
._sigfault
._addr
= env
->pc
;
3394 if (signo
== TARGET_SIGSEGV
) {
3395 /* The passed in sigcode is a dummy; check for a page mapping
3396 and pass either MAPERR or ACCERR. */
3397 target_ulong addr
= env
->excaddr
;
3398 info
._sifields
._sigfault
._addr
= addr
;
3399 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3400 sigcode
= TARGET_SEGV_MAPERR
;
3402 sigcode
= TARGET_SEGV_ACCERR
;
3405 info
.si_code
= sigcode
;
3407 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3410 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3412 env
->excaddr
= addr
;
3413 do_signal(env
, TARGET_SIGSEGV
, 0);
3416 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3418 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3429 gen_sigill_reg(env
);
3432 g_assert_not_reached();
3435 env
->regs
[reg
] = val
;
3439 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3440 * memory at the address held in the first source register. If the values are
3441 * not equal, then no memory operation is performed. If the values are equal,
3442 * the 8-byte quantity from the second source register is written into memory
3443 * at the address held in the first source register. In either case, the result
3444 * of the instruction is the value read from memory. The compare and write to
3445 * memory are atomic and thus can be used for synchronization purposes. This
3446 * instruction only operates for addresses aligned to a 8-byte boundary.
3447 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3449 * Functional Description (64-bit)
3450 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3451 * rf[Dest] = memVal;
3452 * if (memVal == SPR[CmpValueSPR])
3453 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3455 * Functional Description (32-bit)
3456 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3457 * rf[Dest] = memVal;
3458 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3459 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3462 * This function also processes exch and exch4 which need not process SPR.
3464 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3467 target_long val
, sprval
;
3471 addr
= env
->atomic_srca
;
3472 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3473 goto sigsegv_maperr
;
3478 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3480 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3484 if (!cmp
|| val
== sprval
) {
3485 target_long valb
= env
->atomic_srcb
;
3486 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3487 goto sigsegv_maperr
;
3491 set_regval(env
, env
->atomic_dstr
, val
);
3497 gen_sigsegv_maperr(env
, addr
);
3500 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3504 target_long val
, valb
;
3508 addr
= env
->atomic_srca
;
3509 valb
= env
->atomic_srcb
;
3510 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3511 goto sigsegv_maperr
;
3515 case TILEGX_EXCP_OPCODE_FETCHADD
:
3516 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3519 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3525 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3527 if ((int32_t)valb
< 0) {
3531 case TILEGX_EXCP_OPCODE_FETCHAND
:
3532 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3535 case TILEGX_EXCP_OPCODE_FETCHOR
:
3536 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3540 g_assert_not_reached();
3544 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3545 goto sigsegv_maperr
;
3549 set_regval(env
, env
->atomic_dstr
, val
);
3555 gen_sigsegv_maperr(env
, addr
);
3558 void cpu_loop(CPUTLGState
*env
)
3560 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3565 trapnr
= cpu_exec(cs
);
3567 process_queued_cpu_work(cs
);
3570 case TILEGX_EXCP_SYSCALL
:
3572 abi_ulong ret
= do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3573 env
->regs
[0], env
->regs
[1],
3574 env
->regs
[2], env
->regs
[3],
3575 env
->regs
[4], env
->regs
[5],
3576 env
->regs
[6], env
->regs
[7]);
3577 if (ret
== -TARGET_ERESTARTSYS
) {
3579 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3580 env
->regs
[TILEGX_R_RE
] = ret
;
3581 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(ret
) ? -ret
: 0;
3585 case TILEGX_EXCP_OPCODE_EXCH
:
3586 do_exch(env
, true, false);
3588 case TILEGX_EXCP_OPCODE_EXCH4
:
3589 do_exch(env
, false, false);
3591 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3592 do_exch(env
, true, true);
3594 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3595 do_exch(env
, false, true);
3597 case TILEGX_EXCP_OPCODE_FETCHADD
:
3598 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3599 case TILEGX_EXCP_OPCODE_FETCHAND
:
3600 case TILEGX_EXCP_OPCODE_FETCHOR
:
3601 do_fetch(env
, trapnr
, true);
3603 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3604 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3605 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3606 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3607 do_fetch(env
, trapnr
, false);
3609 case TILEGX_EXCP_SIGNAL
:
3610 do_signal(env
, env
->signo
, env
->sigcode
);
3612 case TILEGX_EXCP_REG_IDN_ACCESS
:
3613 case TILEGX_EXCP_REG_UDN_ACCESS
:
3614 gen_sigill_reg(env
);
3617 cpu_exec_step_atomic(cs
);
3620 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3621 g_assert_not_reached();
3623 process_pending_signals(env
);
3631 static abi_ulong
hppa_lws(CPUHPPAState
*env
)
3633 uint32_t which
= env
->gr
[20];
3634 abi_ulong addr
= env
->gr
[26];
3635 abi_ulong old
= env
->gr
[25];
3636 abi_ulong
new = env
->gr
[24];
3637 abi_ulong size
, ret
;
3641 return -TARGET_ENOSYS
;
3643 case 0: /* elf32 atomic 32bit cmpxchg */
3644 if ((addr
& 3) || !access_ok(VERIFY_WRITE
, addr
, 4)) {
3645 return -TARGET_EFAULT
;
3649 ret
= atomic_cmpxchg((uint32_t *)g2h(addr
), old
, new);
3653 case 2: /* elf32 atomic "new" cmpxchg */
3656 return -TARGET_ENOSYS
;
3658 if (((addr
| old
| new) & ((1 << size
) - 1))
3659 || !access_ok(VERIFY_WRITE
, addr
, 1 << size
)
3660 || !access_ok(VERIFY_READ
, old
, 1 << size
)
3661 || !access_ok(VERIFY_READ
, new, 1 << size
)) {
3662 return -TARGET_EFAULT
;
3664 /* Note that below we use host-endian loads so that the cmpxchg
3665 can be host-endian as well. */
3668 old
= *(uint8_t *)g2h(old
);
3669 new = *(uint8_t *)g2h(new);
3670 ret
= atomic_cmpxchg((uint8_t *)g2h(addr
), old
, new);
3674 old
= *(uint16_t *)g2h(old
);
3675 new = *(uint16_t *)g2h(new);
3676 ret
= atomic_cmpxchg((uint16_t *)g2h(addr
), old
, new);
3680 old
= *(uint32_t *)g2h(old
);
3681 new = *(uint32_t *)g2h(new);
3682 ret
= atomic_cmpxchg((uint32_t *)g2h(addr
), old
, new);
3687 uint64_t o64
, n64
, r64
;
3688 o64
= *(uint64_t *)g2h(old
);
3689 n64
= *(uint64_t *)g2h(new);
3690 #ifdef CONFIG_ATOMIC64
3691 r64
= atomic_cmpxchg__nocheck((uint64_t *)g2h(addr
), o64
, n64
);
3695 r64
= *(uint64_t *)g2h(addr
);
3698 *(uint64_t *)g2h(addr
) = n64
;
3713 void cpu_loop(CPUHPPAState
*env
)
3715 CPUState
*cs
= CPU(hppa_env_get_cpu(env
));
3716 target_siginfo_t info
;
3722 trapnr
= cpu_exec(cs
);
3724 process_queued_cpu_work(cs
);
3728 ret
= do_syscall(env
, env
->gr
[20],
3729 env
->gr
[26], env
->gr
[25],
3730 env
->gr
[24], env
->gr
[23],
3731 env
->gr
[22], env
->gr
[21], 0, 0);
3735 /* We arrived here by faking the gateway page. Return. */
3736 env
->iaoq_f
= env
->gr
[31];
3737 env
->iaoq_b
= env
->gr
[31] + 4;
3739 case -TARGET_ERESTARTSYS
:
3740 case -TARGET_QEMU_ESIGRETURN
:
3744 case EXCP_SYSCALL_LWS
:
3745 env
->gr
[21] = hppa_lws(env
);
3746 /* We arrived here by faking the gateway page. Return. */
3747 env
->iaoq_f
= env
->gr
[31];
3748 env
->iaoq_b
= env
->gr
[31] + 4;
3751 info
.si_signo
= TARGET_SIGSEGV
;
3753 info
.si_code
= TARGET_SEGV_ACCERR
;
3754 info
._sifields
._sigfault
._addr
= env
->ior
;
3755 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3758 info
.si_signo
= TARGET_SIGILL
;
3760 info
.si_code
= TARGET_ILL_ILLOPN
;
3761 info
._sifields
._sigfault
._addr
= env
->iaoq_f
;
3762 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3765 info
.si_signo
= TARGET_SIGFPE
;
3768 info
._sifields
._sigfault
._addr
= env
->iaoq_f
;
3769 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3772 trapnr
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3774 info
.si_signo
= trapnr
;
3776 info
.si_code
= TARGET_TRAP_BRKPT
;
3777 queue_signal(env
, trapnr
, QEMU_SI_FAULT
, &info
);
3780 case EXCP_INTERRUPT
:
3781 /* just indicate that signals should be handled asap */
3784 g_assert_not_reached();
3786 process_pending_signals(env
);
3790 #endif /* TARGET_HPPA */
3792 THREAD CPUState
*thread_cpu
;
3794 bool qemu_cpu_is_self(CPUState
*cpu
)
3796 return thread_cpu
== cpu
;
3799 void qemu_cpu_kick(CPUState
*cpu
)
3804 void task_settid(TaskState
*ts
)
3806 if (ts
->ts_tid
== 0) {
3807 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3811 void stop_all_tasks(void)
3814 * We trust that when using NPTL, start_exclusive()
3815 * handles thread stopping correctly.
3820 /* Assumes contents are already zeroed. */
3821 void init_task_state(TaskState
*ts
)
3826 CPUArchState
*cpu_copy(CPUArchState
*env
)
3828 CPUState
*cpu
= ENV_GET_CPU(env
);
3829 CPUState
*new_cpu
= cpu_init(cpu_model
);
3830 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3834 /* Reset non arch specific state */
3837 memcpy(new_env
, env
, sizeof(CPUArchState
));
3839 /* Clone all break/watchpoints.
3840 Note: Once we support ptrace with hw-debug register access, make sure
3841 BP_CPU break/watchpoints are handled correctly on clone. */
3842 QTAILQ_INIT(&new_cpu
->breakpoints
);
3843 QTAILQ_INIT(&new_cpu
->watchpoints
);
3844 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3845 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3847 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3848 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3854 static void handle_arg_help(const char *arg
)
3856 usage(EXIT_SUCCESS
);
3859 static void handle_arg_log(const char *arg
)
3863 mask
= qemu_str_to_log_mask(arg
);
3865 qemu_print_log_usage(stdout
);
3868 qemu_log_needs_buffers();
3872 static void handle_arg_dfilter(const char *arg
)
3874 qemu_set_dfilter_ranges(arg
, NULL
);
3877 static void handle_arg_log_filename(const char *arg
)
3879 qemu_set_log_filename(arg
, &error_fatal
);
3882 static void handle_arg_set_env(const char *arg
)
3884 char *r
, *p
, *token
;
3885 r
= p
= strdup(arg
);
3886 while ((token
= strsep(&p
, ",")) != NULL
) {
3887 if (envlist_setenv(envlist
, token
) != 0) {
3888 usage(EXIT_FAILURE
);
3894 static void handle_arg_unset_env(const char *arg
)
3896 char *r
, *p
, *token
;
3897 r
= p
= strdup(arg
);
3898 while ((token
= strsep(&p
, ",")) != NULL
) {
3899 if (envlist_unsetenv(envlist
, token
) != 0) {
3900 usage(EXIT_FAILURE
);
3906 static void handle_arg_argv0(const char *arg
)
3908 argv0
= strdup(arg
);
3911 static void handle_arg_stack_size(const char *arg
)
3914 guest_stack_size
= strtoul(arg
, &p
, 0);
3915 if (guest_stack_size
== 0) {
3916 usage(EXIT_FAILURE
);
3920 guest_stack_size
*= 1024 * 1024;
3921 } else if (*p
== 'k' || *p
== 'K') {
3922 guest_stack_size
*= 1024;
3926 static void handle_arg_ld_prefix(const char *arg
)
3928 interp_prefix
= strdup(arg
);
3931 static void handle_arg_pagesize(const char *arg
)
3933 qemu_host_page_size
= atoi(arg
);
3934 if (qemu_host_page_size
== 0 ||
3935 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3936 fprintf(stderr
, "page size must be a power of two\n");
3941 static void handle_arg_randseed(const char *arg
)
3943 unsigned long long seed
;
3945 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3946 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3952 static void handle_arg_gdb(const char *arg
)
3954 gdbstub_port
= atoi(arg
);
3957 static void handle_arg_uname(const char *arg
)
3959 qemu_uname_release
= strdup(arg
);
3962 static void handle_arg_cpu(const char *arg
)
3964 cpu_model
= strdup(arg
);
3965 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3966 /* XXX: implement xxx_cpu_list for targets that still miss it */
3967 #if defined(cpu_list)
3968 cpu_list(stdout
, &fprintf
);
3974 static void handle_arg_guest_base(const char *arg
)
3976 guest_base
= strtol(arg
, NULL
, 0);
3977 have_guest_base
= 1;
3980 static void handle_arg_reserved_va(const char *arg
)
3984 reserved_va
= strtoul(arg
, &p
, 0);
3998 unsigned long unshifted
= reserved_va
;
4000 reserved_va
<<= shift
;
4001 if (reserved_va
>> shift
!= unshifted
4002 || (MAX_RESERVED_VA
&& reserved_va
> MAX_RESERVED_VA
)) {
4003 fprintf(stderr
, "Reserved virtual address too big\n");
4008 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
4013 static void handle_arg_singlestep(const char *arg
)
4018 static void handle_arg_strace(const char *arg
)
4023 static void handle_arg_version(const char *arg
)
4025 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
4026 "\n" QEMU_COPYRIGHT
"\n");
4030 static char *trace_file
;
4031 static void handle_arg_trace(const char *arg
)
4034 trace_file
= trace_opt_parse(arg
);
4037 struct qemu_argument
{
4041 void (*handle_opt
)(const char *arg
);
4042 const char *example
;
4046 static const struct qemu_argument arg_table
[] = {
4047 {"h", "", false, handle_arg_help
,
4048 "", "print this help"},
4049 {"help", "", false, handle_arg_help
,
4051 {"g", "QEMU_GDB", true, handle_arg_gdb
,
4052 "port", "wait gdb connection to 'port'"},
4053 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
4054 "path", "set the elf interpreter prefix to 'path'"},
4055 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
4056 "size", "set the stack size to 'size' bytes"},
4057 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
4058 "model", "select CPU (-cpu help for list)"},
4059 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
4060 "var=value", "sets targets environment variable (see below)"},
4061 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
4062 "var", "unsets targets environment variable (see below)"},
4063 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
4064 "argv0", "forces target process argv[0] to be 'argv0'"},
4065 {"r", "QEMU_UNAME", true, handle_arg_uname
,
4066 "uname", "set qemu uname release string to 'uname'"},
4067 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
4068 "address", "set guest_base address to 'address'"},
4069 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
4070 "size", "reserve 'size' bytes for guest virtual address space"},
4071 {"d", "QEMU_LOG", true, handle_arg_log
,
4072 "item[,...]", "enable logging of specified items "
4073 "(use '-d help' for a list of items)"},
4074 {"dfilter", "QEMU_DFILTER", true, handle_arg_dfilter
,
4075 "range[,...]","filter logging based on address range"},
4076 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
4077 "logfile", "write logs to 'logfile' (default stderr)"},
4078 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
4079 "pagesize", "set the host page size to 'pagesize'"},
4080 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
4081 "", "run in singlestep mode"},
4082 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
4083 "", "log system calls"},
4084 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
4085 "", "Seed for pseudo-random number generator"},
4086 {"trace", "QEMU_TRACE", true, handle_arg_trace
,
4087 "", "[[enable=]<pattern>][,events=<file>][,file=<file>]"},
4088 {"version", "QEMU_VERSION", false, handle_arg_version
,
4089 "", "display version information and exit"},
4090 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
4093 static void usage(int exitcode
)
4095 const struct qemu_argument
*arginfo
;
4099 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
4100 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
4102 "Options and associated environment variables:\n"
4105 /* Calculate column widths. We must always have at least enough space
4106 * for the column header.
4108 maxarglen
= strlen("Argument");
4109 maxenvlen
= strlen("Env-variable");
4111 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4112 int arglen
= strlen(arginfo
->argv
);
4113 if (arginfo
->has_arg
) {
4114 arglen
+= strlen(arginfo
->example
) + 1;
4116 if (strlen(arginfo
->env
) > maxenvlen
) {
4117 maxenvlen
= strlen(arginfo
->env
);
4119 if (arglen
> maxarglen
) {
4124 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4125 maxenvlen
, "Env-variable");
4127 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4128 if (arginfo
->has_arg
) {
4129 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4130 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4131 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4133 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4134 maxenvlen
, arginfo
->env
,
4141 "QEMU_LD_PREFIX = %s\n"
4142 "QEMU_STACK_SIZE = %ld byte\n",
4147 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4148 "QEMU_UNSET_ENV environment variables to set and unset\n"
4149 "environment variables for the target process.\n"
4150 "It is possible to provide several variables by separating them\n"
4151 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4152 "provide the -E and -U options multiple times.\n"
4153 "The following lines are equivalent:\n"
4154 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4155 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4156 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4157 "Note that if you provide several changes to a single variable\n"
4158 "the last change will stay in effect.\n"
4160 QEMU_HELP_BOTTOM
"\n");
4165 static int parse_args(int argc
, char **argv
)
4169 const struct qemu_argument
*arginfo
;
4171 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4172 if (arginfo
->env
== NULL
) {
4176 r
= getenv(arginfo
->env
);
4178 arginfo
->handle_opt(r
);
4184 if (optind
>= argc
) {
4193 if (!strcmp(r
, "-")) {
4196 /* Treat --foo the same as -foo. */
4201 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4202 if (!strcmp(r
, arginfo
->argv
)) {
4203 if (arginfo
->has_arg
) {
4204 if (optind
>= argc
) {
4205 (void) fprintf(stderr
,
4206 "qemu: missing argument for option '%s'\n", r
);
4209 arginfo
->handle_opt(argv
[optind
]);
4212 arginfo
->handle_opt(NULL
);
4218 /* no option matched the current argv */
4219 if (arginfo
->handle_opt
== NULL
) {
4220 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4225 if (optind
>= argc
) {
4226 (void) fprintf(stderr
, "qemu: no user program specified\n");
4230 filename
= argv
[optind
];
4231 exec_path
= argv
[optind
];
4236 int main(int argc
, char **argv
, char **envp
)
4238 struct target_pt_regs regs1
, *regs
= ®s1
;
4239 struct image_info info1
, *info
= &info1
;
4240 struct linux_binprm bprm
;
4245 char **target_environ
, **wrk
;
4252 module_call_init(MODULE_INIT_TRACE
);
4253 qemu_init_cpu_list();
4254 module_call_init(MODULE_INIT_QOM
);
4256 envlist
= envlist_create();
4258 /* add current environment into the list */
4259 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4260 (void) envlist_setenv(envlist
, *wrk
);
4263 /* Read the stack limit from the kernel. If it's "unlimited",
4264 then we can do little else besides use the default. */
4267 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4268 && lim
.rlim_cur
!= RLIM_INFINITY
4269 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4270 guest_stack_size
= lim
.rlim_cur
;
4278 qemu_add_opts(&qemu_trace_opts
);
4280 optind
= parse_args(argc
, argv
);
4282 if (!trace_init_backends()) {
4285 trace_init_file(trace_file
);
4288 memset(regs
, 0, sizeof(struct target_pt_regs
));
4290 /* Zero out image_info */
4291 memset(info
, 0, sizeof(struct image_info
));
4293 memset(&bprm
, 0, sizeof (bprm
));
4295 /* Scan interp_prefix dir for replacement files. */
4296 init_paths(interp_prefix
);
4298 init_qemu_uname_release();
4300 if (cpu_model
== NULL
) {
4301 #if defined(TARGET_I386)
4302 #ifdef TARGET_X86_64
4303 cpu_model
= "qemu64";
4305 cpu_model
= "qemu32";
4307 #elif defined(TARGET_ARM)
4309 #elif defined(TARGET_UNICORE32)
4311 #elif defined(TARGET_M68K)
4313 #elif defined(TARGET_SPARC)
4314 #ifdef TARGET_SPARC64
4315 cpu_model
= "TI UltraSparc II";
4317 cpu_model
= "Fujitsu MB86904";
4319 #elif defined(TARGET_MIPS)
4320 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4325 #elif defined TARGET_OPENRISC
4326 cpu_model
= "or1200";
4327 #elif defined(TARGET_PPC)
4328 # ifdef TARGET_PPC64
4329 cpu_model
= "POWER8";
4333 #elif defined TARGET_SH4
4334 cpu_model
= "sh7785";
4335 #elif defined TARGET_S390X
4342 /* NOTE: we need to init the CPU at this stage to get
4343 qemu_host_page_size */
4344 cpu
= cpu_init(cpu_model
);
4350 if (getenv("QEMU_STRACE")) {
4354 if (getenv("QEMU_RAND_SEED")) {
4355 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4358 target_environ
= envlist_to_environ(envlist
, NULL
);
4359 envlist_free(envlist
);
4362 * Now that page sizes are configured in cpu_init() we can do
4363 * proper page alignment for guest_base.
4365 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4367 if (reserved_va
|| have_guest_base
) {
4368 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4370 if (guest_base
== (unsigned long)-1) {
4371 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4372 "space for use as guest address space (check your virtual "
4373 "memory ulimit setting or reserve less using -R option)\n",
4379 mmap_next_start
= reserved_va
;
4384 * Read in mmap_min_addr kernel parameter. This value is used
4385 * When loading the ELF image to determine whether guest_base
4386 * is needed. It is also used in mmap_find_vma.
4391 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4393 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4394 mmap_min_addr
= tmp
;
4395 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4402 * Prepare copy of argv vector for target.
4404 target_argc
= argc
- optind
;
4405 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4406 if (target_argv
== NULL
) {
4407 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4412 * If argv0 is specified (using '-0' switch) we replace
4413 * argv[0] pointer with the given one.
4416 if (argv0
!= NULL
) {
4417 target_argv
[i
++] = strdup(argv0
);
4419 for (; i
< target_argc
; i
++) {
4420 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4422 target_argv
[target_argc
] = NULL
;
4424 ts
= g_new0(TaskState
, 1);
4425 init_task_state(ts
);
4426 /* build Task State */
4432 execfd
= qemu_getauxval(AT_EXECFD
);
4434 execfd
= open(filename
, O_RDONLY
);
4436 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4437 _exit(EXIT_FAILURE
);
4441 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4444 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4445 _exit(EXIT_FAILURE
);
4448 for (wrk
= target_environ
; *wrk
; wrk
++) {
4452 g_free(target_environ
);
4454 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4455 qemu_log("guest_base 0x%lx\n", guest_base
);
4458 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4459 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4460 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n", info
->start_code
);
4461 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n", info
->start_data
);
4462 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4463 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n", info
->start_stack
);
4464 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4465 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4466 qemu_log("argv_start 0x" TARGET_ABI_FMT_lx
"\n", info
->arg_start
);
4467 qemu_log("env_start 0x" TARGET_ABI_FMT_lx
"\n",
4468 info
->arg_end
+ (abi_ulong
)sizeof(abi_ulong
));
4469 qemu_log("auxv_start 0x" TARGET_ABI_FMT_lx
"\n", info
->saved_auxv
);
4472 target_set_brk(info
->brk
);
4476 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4477 generating the prologue until now so that the prologue can take
4478 the real value of GUEST_BASE into account. */
4479 tcg_prologue_init(tcg_ctx
);
4482 #if defined(TARGET_I386)
4483 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4484 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4485 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4486 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4487 env
->hflags
|= HF_OSFXSR_MASK
;
4489 #ifndef TARGET_ABI32
4490 /* enable 64 bit mode if possible */
4491 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4492 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4495 env
->cr
[4] |= CR4_PAE_MASK
;
4496 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4497 env
->hflags
|= HF_LMA_MASK
;
4500 /* flags setup : we activate the IRQs by default as in user mode */
4501 env
->eflags
|= IF_MASK
;
4503 /* linux register setup */
4504 #ifndef TARGET_ABI32
4505 env
->regs
[R_EAX
] = regs
->rax
;
4506 env
->regs
[R_EBX
] = regs
->rbx
;
4507 env
->regs
[R_ECX
] = regs
->rcx
;
4508 env
->regs
[R_EDX
] = regs
->rdx
;
4509 env
->regs
[R_ESI
] = regs
->rsi
;
4510 env
->regs
[R_EDI
] = regs
->rdi
;
4511 env
->regs
[R_EBP
] = regs
->rbp
;
4512 env
->regs
[R_ESP
] = regs
->rsp
;
4513 env
->eip
= regs
->rip
;
4515 env
->regs
[R_EAX
] = regs
->eax
;
4516 env
->regs
[R_EBX
] = regs
->ebx
;
4517 env
->regs
[R_ECX
] = regs
->ecx
;
4518 env
->regs
[R_EDX
] = regs
->edx
;
4519 env
->regs
[R_ESI
] = regs
->esi
;
4520 env
->regs
[R_EDI
] = regs
->edi
;
4521 env
->regs
[R_EBP
] = regs
->ebp
;
4522 env
->regs
[R_ESP
] = regs
->esp
;
4523 env
->eip
= regs
->eip
;
4526 /* linux interrupt setup */
4527 #ifndef TARGET_ABI32
4528 env
->idt
.limit
= 511;
4530 env
->idt
.limit
= 255;
4532 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4533 PROT_READ
|PROT_WRITE
,
4534 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4535 idt_table
= g2h(env
->idt
.base
);
4558 /* linux segment setup */
4560 uint64_t *gdt_table
;
4561 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4562 PROT_READ
|PROT_WRITE
,
4563 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4564 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4565 gdt_table
= g2h(env
->gdt
.base
);
4567 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4568 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4569 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4571 /* 64 bit code segment */
4572 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4573 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4575 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4577 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4578 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4579 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4581 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4582 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4584 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4585 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4586 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4587 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4588 /* This hack makes Wine work... */
4589 env
->segs
[R_FS
].selector
= 0;
4591 cpu_x86_load_seg(env
, R_DS
, 0);
4592 cpu_x86_load_seg(env
, R_ES
, 0);
4593 cpu_x86_load_seg(env
, R_FS
, 0);
4594 cpu_x86_load_seg(env
, R_GS
, 0);
4596 #elif defined(TARGET_AARCH64)
4600 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4602 "The selected ARM CPU does not support 64 bit mode\n");
4606 for (i
= 0; i
< 31; i
++) {
4607 env
->xregs
[i
] = regs
->regs
[i
];
4610 env
->xregs
[31] = regs
->sp
;
4612 #elif defined(TARGET_ARM)
4615 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4617 for(i
= 0; i
< 16; i
++) {
4618 env
->regs
[i
] = regs
->uregs
[i
];
4620 #ifdef TARGET_WORDS_BIGENDIAN
4622 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4623 && (info
->elf_flags
& EF_ARM_BE8
)) {
4624 env
->uncached_cpsr
|= CPSR_E
;
4625 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4627 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4631 #elif defined(TARGET_UNICORE32)
4634 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4635 for (i
= 0; i
< 32; i
++) {
4636 env
->regs
[i
] = regs
->uregs
[i
];
4639 #elif defined(TARGET_SPARC)
4643 env
->npc
= regs
->npc
;
4645 for(i
= 0; i
< 8; i
++)
4646 env
->gregs
[i
] = regs
->u_regs
[i
];
4647 for(i
= 0; i
< 8; i
++)
4648 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4650 #elif defined(TARGET_PPC)
4654 #if defined(TARGET_PPC64)
4655 int flag
= (env
->insns_flags2
& PPC2_BOOKE206
) ? MSR_CM
: MSR_SF
;
4656 #if defined(TARGET_ABI32)
4657 env
->msr
&= ~((target_ulong
)1 << flag
);
4659 env
->msr
|= (target_ulong
)1 << flag
;
4662 env
->nip
= regs
->nip
;
4663 for(i
= 0; i
< 32; i
++) {
4664 env
->gpr
[i
] = regs
->gpr
[i
];
4667 #elif defined(TARGET_M68K)
4670 env
->dregs
[0] = regs
->d0
;
4671 env
->dregs
[1] = regs
->d1
;
4672 env
->dregs
[2] = regs
->d2
;
4673 env
->dregs
[3] = regs
->d3
;
4674 env
->dregs
[4] = regs
->d4
;
4675 env
->dregs
[5] = regs
->d5
;
4676 env
->dregs
[6] = regs
->d6
;
4677 env
->dregs
[7] = regs
->d7
;
4678 env
->aregs
[0] = regs
->a0
;
4679 env
->aregs
[1] = regs
->a1
;
4680 env
->aregs
[2] = regs
->a2
;
4681 env
->aregs
[3] = regs
->a3
;
4682 env
->aregs
[4] = regs
->a4
;
4683 env
->aregs
[5] = regs
->a5
;
4684 env
->aregs
[6] = regs
->a6
;
4685 env
->aregs
[7] = regs
->usp
;
4687 ts
->sim_syscalls
= 1;
4689 #elif defined(TARGET_MICROBLAZE)
4691 env
->regs
[0] = regs
->r0
;
4692 env
->regs
[1] = regs
->r1
;
4693 env
->regs
[2] = regs
->r2
;
4694 env
->regs
[3] = regs
->r3
;
4695 env
->regs
[4] = regs
->r4
;
4696 env
->regs
[5] = regs
->r5
;
4697 env
->regs
[6] = regs
->r6
;
4698 env
->regs
[7] = regs
->r7
;
4699 env
->regs
[8] = regs
->r8
;
4700 env
->regs
[9] = regs
->r9
;
4701 env
->regs
[10] = regs
->r10
;
4702 env
->regs
[11] = regs
->r11
;
4703 env
->regs
[12] = regs
->r12
;
4704 env
->regs
[13] = regs
->r13
;
4705 env
->regs
[14] = regs
->r14
;
4706 env
->regs
[15] = regs
->r15
;
4707 env
->regs
[16] = regs
->r16
;
4708 env
->regs
[17] = regs
->r17
;
4709 env
->regs
[18] = regs
->r18
;
4710 env
->regs
[19] = regs
->r19
;
4711 env
->regs
[20] = regs
->r20
;
4712 env
->regs
[21] = regs
->r21
;
4713 env
->regs
[22] = regs
->r22
;
4714 env
->regs
[23] = regs
->r23
;
4715 env
->regs
[24] = regs
->r24
;
4716 env
->regs
[25] = regs
->r25
;
4717 env
->regs
[26] = regs
->r26
;
4718 env
->regs
[27] = regs
->r27
;
4719 env
->regs
[28] = regs
->r28
;
4720 env
->regs
[29] = regs
->r29
;
4721 env
->regs
[30] = regs
->r30
;
4722 env
->regs
[31] = regs
->r31
;
4723 env
->sregs
[SR_PC
] = regs
->pc
;
4725 #elif defined(TARGET_MIPS)
4729 for(i
= 0; i
< 32; i
++) {
4730 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4732 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4733 if (regs
->cp0_epc
& 1) {
4734 env
->hflags
|= MIPS_HFLAG_M16
;
4736 if (((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) !=
4737 ((env
->active_fpu
.fcr31
& (1 << FCR31_NAN2008
)) != 0)) {
4738 if ((env
->active_fpu
.fcr31_rw_bitmask
&
4739 (1 << FCR31_NAN2008
)) == 0) {
4740 fprintf(stderr
, "ELF binary's NaN mode not supported by CPU\n");
4743 if ((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) {
4744 env
->active_fpu
.fcr31
|= (1 << FCR31_NAN2008
);
4746 env
->active_fpu
.fcr31
&= ~(1 << FCR31_NAN2008
);
4748 restore_snan_bit_mode(env
);
4751 #elif defined(TARGET_NIOS2)
4754 env
->regs
[1] = regs
->r1
;
4755 env
->regs
[2] = regs
->r2
;
4756 env
->regs
[3] = regs
->r3
;
4757 env
->regs
[4] = regs
->r4
;
4758 env
->regs
[5] = regs
->r5
;
4759 env
->regs
[6] = regs
->r6
;
4760 env
->regs
[7] = regs
->r7
;
4761 env
->regs
[8] = regs
->r8
;
4762 env
->regs
[9] = regs
->r9
;
4763 env
->regs
[10] = regs
->r10
;
4764 env
->regs
[11] = regs
->r11
;
4765 env
->regs
[12] = regs
->r12
;
4766 env
->regs
[13] = regs
->r13
;
4767 env
->regs
[14] = regs
->r14
;
4768 env
->regs
[15] = regs
->r15
;
4769 /* TODO: unsigned long orig_r2; */
4770 env
->regs
[R_RA
] = regs
->ra
;
4771 env
->regs
[R_FP
] = regs
->fp
;
4772 env
->regs
[R_SP
] = regs
->sp
;
4773 env
->regs
[R_GP
] = regs
->gp
;
4774 env
->regs
[CR_ESTATUS
] = regs
->estatus
;
4775 env
->regs
[R_EA
] = regs
->ea
;
4776 /* TODO: unsigned long orig_r7; */
4778 /* Emulate eret when starting thread. */
4779 env
->regs
[R_PC
] = regs
->ea
;
4781 #elif defined(TARGET_OPENRISC)
4785 for (i
= 0; i
< 32; i
++) {
4786 cpu_set_gpr(env
, i
, regs
->gpr
[i
]);
4789 cpu_set_sr(env
, regs
->sr
);
4791 #elif defined(TARGET_SH4)
4795 for(i
= 0; i
< 16; i
++) {
4796 env
->gregs
[i
] = regs
->regs
[i
];
4800 #elif defined(TARGET_ALPHA)
4804 for(i
= 0; i
< 28; i
++) {
4805 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4807 env
->ir
[IR_SP
] = regs
->usp
;
4810 #elif defined(TARGET_CRIS)
4812 env
->regs
[0] = regs
->r0
;
4813 env
->regs
[1] = regs
->r1
;
4814 env
->regs
[2] = regs
->r2
;
4815 env
->regs
[3] = regs
->r3
;
4816 env
->regs
[4] = regs
->r4
;
4817 env
->regs
[5] = regs
->r5
;
4818 env
->regs
[6] = regs
->r6
;
4819 env
->regs
[7] = regs
->r7
;
4820 env
->regs
[8] = regs
->r8
;
4821 env
->regs
[9] = regs
->r9
;
4822 env
->regs
[10] = regs
->r10
;
4823 env
->regs
[11] = regs
->r11
;
4824 env
->regs
[12] = regs
->r12
;
4825 env
->regs
[13] = regs
->r13
;
4826 env
->regs
[14] = info
->start_stack
;
4827 env
->regs
[15] = regs
->acr
;
4828 env
->pc
= regs
->erp
;
4830 #elif defined(TARGET_S390X)
4833 for (i
= 0; i
< 16; i
++) {
4834 env
->regs
[i
] = regs
->gprs
[i
];
4836 env
->psw
.mask
= regs
->psw
.mask
;
4837 env
->psw
.addr
= regs
->psw
.addr
;
4839 #elif defined(TARGET_TILEGX)
4842 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4843 env
->regs
[i
] = regs
->regs
[i
];
4845 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4850 #elif defined(TARGET_HPPA)
4853 for (i
= 1; i
< 32; i
++) {
4854 env
->gr
[i
] = regs
->gr
[i
];
4856 env
->iaoq_f
= regs
->iaoq
[0];
4857 env
->iaoq_b
= regs
->iaoq
[1];
4860 #error unsupported target CPU
4863 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4864 ts
->stack_base
= info
->start_stack
;
4865 ts
->heap_base
= info
->brk
;
4866 /* This will be filled in on the first SYS_HEAPINFO call. */
4871 if (gdbserver_start(gdbstub_port
) < 0) {
4872 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4876 gdb_handlesig(cpu
, 0);