4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
24 #include "qapi/error.h"
26 #include "qemu/path.h"
27 #include "qemu/config-file.h"
28 #include "qemu/cutils.h"
29 #include "qemu/help_option.h"
31 #include "exec/exec-all.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
37 #include "trace/control.h"
42 static const char *filename
;
43 static const char *argv0
;
44 static int gdbstub_port
;
45 static envlist_t
*envlist
;
46 static const char *cpu_model
;
47 unsigned long mmap_min_addr
;
48 unsigned long guest_base
;
51 #define EXCP_DUMP(env, fmt, ...) \
53 CPUState *cs = ENV_GET_CPU(env); \
54 fprintf(stderr, fmt , ## __VA_ARGS__); \
55 cpu_dump_state(cs, stderr, fprintf, 0); \
56 if (qemu_log_separate()) { \
57 qemu_log(fmt, ## __VA_ARGS__); \
58 log_cpu_state(cs, 0); \
63 * When running 32-on-64 we should make sure we can fit all of the possible
64 * guest address space into a contiguous chunk of virtual host memory.
66 * This way we will never overlap with our own libraries or binaries or stack
67 * or anything else that QEMU maps.
69 * Many cpus reserve the high bit (or more than one for some 64-bit cpus)
70 * of the address for the kernel. Some cpus rely on this and user space
71 * uses the high bit(s) for pointer tagging and the like. For them, we
72 * must preserve the expected address space.
74 #ifndef MAX_RESERVED_VA
75 # if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
76 # if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
77 (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
78 /* There are a number of places where we assign reserved_va to a variable
79 of type abi_ulong and expect it to fit. Avoid the last page. */
80 # define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
82 # define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
85 # define MAX_RESERVED_VA 0
89 /* That said, reserving *too* much vm space via mmap can run into problems
90 with rlimits, oom due to page table creation, etc. We will still try it,
91 if directed by the command-line option, but not by default. */
92 #if HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32
93 unsigned long reserved_va
= MAX_RESERVED_VA
;
95 unsigned long reserved_va
;
98 static void usage(int exitcode
);
100 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
101 const char *qemu_uname_release
;
103 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
104 we allocate a bigger stack. Need a better solution, for example
105 by remapping the process stack directly at the right place */
106 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
108 void gemu_log(const char *fmt
, ...)
113 vfprintf(stderr
, fmt
, ap
);
117 #if defined(TARGET_I386)
118 int cpu_get_pic_interrupt(CPUX86State
*env
)
124 /***********************************************************/
125 /* Helper routines for implementing atomic operations. */
127 /* Make sure everything is in a consistent state for calling fork(). */
128 void fork_start(void)
132 qemu_mutex_lock(&tb_ctx
.tb_lock
);
136 void fork_end(int child
)
138 mmap_fork_end(child
);
140 CPUState
*cpu
, *next_cpu
;
141 /* Child processes created by fork() only have a single thread.
142 Discard information about the parent threads. */
143 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
144 if (cpu
!= thread_cpu
) {
145 QTAILQ_REMOVE(&cpus
, cpu
, node
);
148 qemu_mutex_init(&tb_ctx
.tb_lock
);
149 qemu_init_cpu_list();
150 gdbserver_fork(thread_cpu
);
151 /* qemu_init_cpu_list() takes care of reinitializing the
152 * exclusive state, so we don't need to end_exclusive() here.
155 qemu_mutex_unlock(&tb_ctx
.tb_lock
);
162 /***********************************************************/
163 /* CPUX86 core interface */
165 uint64_t cpu_get_tsc(CPUX86State
*env
)
167 return cpu_get_host_ticks();
170 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
175 e1
= (addr
<< 16) | (limit
& 0xffff);
176 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
183 static uint64_t *idt_table
;
185 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
186 uint64_t addr
, unsigned int sel
)
189 e1
= (addr
& 0xffff) | (sel
<< 16);
190 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
194 p
[2] = tswap32(addr
>> 32);
197 /* only dpl matters as we do only user space emulation */
198 static void set_idt(int n
, unsigned int dpl
)
200 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
203 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
204 uint32_t addr
, unsigned int sel
)
207 e1
= (addr
& 0xffff) | (sel
<< 16);
208 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
214 /* only dpl matters as we do only user space emulation */
215 static void set_idt(int n
, unsigned int dpl
)
217 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
221 void cpu_loop(CPUX86State
*env
)
223 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
227 target_siginfo_t info
;
231 trapnr
= cpu_exec(cs
);
233 process_queued_cpu_work(cs
);
237 /* linux syscall from int $0x80 */
238 ret
= do_syscall(env
,
247 if (ret
== -TARGET_ERESTARTSYS
) {
249 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
250 env
->regs
[R_EAX
] = ret
;
255 /* linux syscall from syscall instruction */
256 ret
= do_syscall(env
,
265 if (ret
== -TARGET_ERESTARTSYS
) {
267 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
268 env
->regs
[R_EAX
] = ret
;
274 info
.si_signo
= TARGET_SIGBUS
;
276 info
.si_code
= TARGET_SI_KERNEL
;
277 info
._sifields
._sigfault
._addr
= 0;
278 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
281 /* XXX: potential problem if ABI32 */
282 #ifndef TARGET_X86_64
283 if (env
->eflags
& VM_MASK
) {
284 handle_vm86_fault(env
);
288 info
.si_signo
= TARGET_SIGSEGV
;
290 info
.si_code
= TARGET_SI_KERNEL
;
291 info
._sifields
._sigfault
._addr
= 0;
292 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
296 info
.si_signo
= TARGET_SIGSEGV
;
298 if (!(env
->error_code
& 1))
299 info
.si_code
= TARGET_SEGV_MAPERR
;
301 info
.si_code
= TARGET_SEGV_ACCERR
;
302 info
._sifields
._sigfault
._addr
= env
->cr
[2];
303 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
306 #ifndef TARGET_X86_64
307 if (env
->eflags
& VM_MASK
) {
308 handle_vm86_trap(env
, trapnr
);
312 /* division by zero */
313 info
.si_signo
= TARGET_SIGFPE
;
315 info
.si_code
= TARGET_FPE_INTDIV
;
316 info
._sifields
._sigfault
._addr
= env
->eip
;
317 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
322 #ifndef TARGET_X86_64
323 if (env
->eflags
& VM_MASK
) {
324 handle_vm86_trap(env
, trapnr
);
328 info
.si_signo
= TARGET_SIGTRAP
;
330 if (trapnr
== EXCP01_DB
) {
331 info
.si_code
= TARGET_TRAP_BRKPT
;
332 info
._sifields
._sigfault
._addr
= env
->eip
;
334 info
.si_code
= TARGET_SI_KERNEL
;
335 info
._sifields
._sigfault
._addr
= 0;
337 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
342 #ifndef TARGET_X86_64
343 if (env
->eflags
& VM_MASK
) {
344 handle_vm86_trap(env
, trapnr
);
348 info
.si_signo
= TARGET_SIGSEGV
;
350 info
.si_code
= TARGET_SI_KERNEL
;
351 info
._sifields
._sigfault
._addr
= 0;
352 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
356 info
.si_signo
= TARGET_SIGILL
;
358 info
.si_code
= TARGET_ILL_ILLOPN
;
359 info
._sifields
._sigfault
._addr
= env
->eip
;
360 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
363 /* just indicate that signals should be handled asap */
369 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
374 info
.si_code
= TARGET_TRAP_BRKPT
;
375 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
380 cpu_exec_step_atomic(cs
);
383 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
384 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
388 process_pending_signals(env
);
395 #define get_user_code_u32(x, gaddr, env) \
396 ({ abi_long __r = get_user_u32((x), (gaddr)); \
397 if (!__r && bswap_code(arm_sctlr_b(env))) { \
403 #define get_user_code_u16(x, gaddr, env) \
404 ({ abi_long __r = get_user_u16((x), (gaddr)); \
405 if (!__r && bswap_code(arm_sctlr_b(env))) { \
411 #define get_user_data_u32(x, gaddr, env) \
412 ({ abi_long __r = get_user_u32((x), (gaddr)); \
413 if (!__r && arm_cpu_bswap_data(env)) { \
419 #define get_user_data_u16(x, gaddr, env) \
420 ({ abi_long __r = get_user_u16((x), (gaddr)); \
421 if (!__r && arm_cpu_bswap_data(env)) { \
427 #define put_user_data_u32(x, gaddr, env) \
428 ({ typeof(x) __x = (x); \
429 if (arm_cpu_bswap_data(env)) { \
430 __x = bswap32(__x); \
432 put_user_u32(__x, (gaddr)); \
435 #define put_user_data_u16(x, gaddr, env) \
436 ({ typeof(x) __x = (x); \
437 if (arm_cpu_bswap_data(env)) { \
438 __x = bswap16(__x); \
440 put_user_u16(__x, (gaddr)); \
444 /* Commpage handling -- there is no commpage for AArch64 */
447 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
449 * r0 = pointer to oldval
450 * r1 = pointer to newval
451 * r2 = pointer to target value
454 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
455 * C set if *ptr was changed, clear if no exchange happened
457 * Note segv's in kernel helpers are a bit tricky, we can set the
458 * data address sensibly but the PC address is just the entry point.
460 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
462 uint64_t oldval
, newval
, val
;
464 target_siginfo_t info
;
466 /* Based on the 32 bit code in do_kernel_trap */
468 /* XXX: This only works between threads, not between processes.
469 It's probably possible to implement this with native host
470 operations. However things like ldrex/strex are much harder so
471 there's not much point trying. */
473 cpsr
= cpsr_read(env
);
476 if (get_user_u64(oldval
, env
->regs
[0])) {
477 env
->exception
.vaddress
= env
->regs
[0];
481 if (get_user_u64(newval
, env
->regs
[1])) {
482 env
->exception
.vaddress
= env
->regs
[1];
486 if (get_user_u64(val
, addr
)) {
487 env
->exception
.vaddress
= addr
;
494 if (put_user_u64(val
, addr
)) {
495 env
->exception
.vaddress
= addr
;
505 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
511 /* We get the PC of the entry address - which is as good as anything,
512 on a real kernel what you get depends on which mode it uses. */
513 info
.si_signo
= TARGET_SIGSEGV
;
515 /* XXX: check env->error_code */
516 info
.si_code
= TARGET_SEGV_MAPERR
;
517 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
518 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
521 /* Handle a jump to the kernel code page. */
523 do_kernel_trap(CPUARMState
*env
)
529 switch (env
->regs
[15]) {
530 case 0xffff0fa0: /* __kernel_memory_barrier */
531 /* ??? No-op. Will need to do better for SMP. */
533 case 0xffff0fc0: /* __kernel_cmpxchg */
534 /* XXX: This only works between threads, not between processes.
535 It's probably possible to implement this with native host
536 operations. However things like ldrex/strex are much harder so
537 there's not much point trying. */
539 cpsr
= cpsr_read(env
);
541 /* FIXME: This should SEGV if the access fails. */
542 if (get_user_u32(val
, addr
))
544 if (val
== env
->regs
[0]) {
546 /* FIXME: Check for segfaults. */
547 put_user_u32(val
, addr
);
554 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
557 case 0xffff0fe0: /* __kernel_get_tls */
558 env
->regs
[0] = cpu_get_tls(env
);
560 case 0xffff0f60: /* __kernel_cmpxchg64 */
561 arm_kernel_cmpxchg64_helper(env
);
567 /* Jump back to the caller. */
568 addr
= env
->regs
[14];
573 env
->regs
[15] = addr
;
578 void cpu_loop(CPUARMState
*env
)
580 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
582 unsigned int n
, insn
;
583 target_siginfo_t info
;
589 trapnr
= cpu_exec(cs
);
591 process_queued_cpu_work(cs
);
598 TaskState
*ts
= cs
->opaque
;
602 /* we handle the FPU emulation here, as Linux */
603 /* we get the opcode */
604 /* FIXME - what to do if get_user() fails? */
605 get_user_code_u32(opcode
, env
->regs
[15], env
);
607 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
608 if (rc
== 0) { /* illegal instruction */
609 info
.si_signo
= TARGET_SIGILL
;
611 info
.si_code
= TARGET_ILL_ILLOPN
;
612 info
._sifields
._sigfault
._addr
= env
->regs
[15];
613 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
614 } else if (rc
< 0) { /* FP exception */
617 /* translate softfloat flags to FPSR flags */
618 if (-rc
& float_flag_invalid
)
620 if (-rc
& float_flag_divbyzero
)
622 if (-rc
& float_flag_overflow
)
624 if (-rc
& float_flag_underflow
)
626 if (-rc
& float_flag_inexact
)
629 FPSR fpsr
= ts
->fpa
.fpsr
;
630 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
632 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
633 info
.si_signo
= TARGET_SIGFPE
;
636 /* ordered by priority, least first */
637 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
638 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
639 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
640 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
641 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
643 info
._sifields
._sigfault
._addr
= env
->regs
[15];
644 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
649 /* accumulate unenabled exceptions */
650 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
652 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
654 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
656 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
658 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
661 } else { /* everything OK */
672 if (trapnr
== EXCP_BKPT
) {
674 /* FIXME - what to do if get_user() fails? */
675 get_user_code_u16(insn
, env
->regs
[15], env
);
679 /* FIXME - what to do if get_user() fails? */
680 get_user_code_u32(insn
, env
->regs
[15], env
);
681 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
686 /* FIXME - what to do if get_user() fails? */
687 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
690 /* FIXME - what to do if get_user() fails? */
691 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
696 if (n
== ARM_NR_cacheflush
) {
698 } else if (n
== ARM_NR_semihosting
699 || n
== ARM_NR_thumb_semihosting
) {
700 env
->regs
[0] = do_arm_semihosting (env
);
701 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
703 if (env
->thumb
|| n
== 0) {
706 n
-= ARM_SYSCALL_BASE
;
709 if ( n
> ARM_NR_BASE
) {
711 case ARM_NR_cacheflush
:
715 cpu_set_tls(env
, env
->regs
[0]);
718 case ARM_NR_breakpoint
:
719 env
->regs
[15] -= env
->thumb
? 2 : 4;
722 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
724 env
->regs
[0] = -TARGET_ENOSYS
;
728 ret
= do_syscall(env
,
737 if (ret
== -TARGET_ERESTARTSYS
) {
738 env
->regs
[15] -= env
->thumb
? 2 : 4;
739 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
749 env
->regs
[0] = do_arm_semihosting(env
);
752 /* just indicate that signals should be handled asap */
754 case EXCP_PREFETCH_ABORT
:
755 case EXCP_DATA_ABORT
:
756 addr
= env
->exception
.vaddress
;
758 info
.si_signo
= TARGET_SIGSEGV
;
760 /* XXX: check env->error_code */
761 info
.si_code
= TARGET_SEGV_MAPERR
;
762 info
._sifields
._sigfault
._addr
= addr
;
763 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
771 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
776 info
.si_code
= TARGET_TRAP_BRKPT
;
777 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
781 case EXCP_KERNEL_TRAP
:
782 if (do_kernel_trap(env
))
786 /* nothing to do here for user-mode, just resume guest code */
789 cpu_exec_step_atomic(cs
);
793 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
796 process_pending_signals(env
);
802 /* AArch64 main loop */
803 void cpu_loop(CPUARMState
*env
)
805 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
808 target_siginfo_t info
;
812 trapnr
= cpu_exec(cs
);
814 process_queued_cpu_work(cs
);
818 ret
= do_syscall(env
,
827 if (ret
== -TARGET_ERESTARTSYS
) {
829 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
834 /* just indicate that signals should be handled asap */
837 info
.si_signo
= TARGET_SIGILL
;
839 info
.si_code
= TARGET_ILL_ILLOPN
;
840 info
._sifields
._sigfault
._addr
= env
->pc
;
841 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
843 case EXCP_PREFETCH_ABORT
:
844 case EXCP_DATA_ABORT
:
845 info
.si_signo
= TARGET_SIGSEGV
;
847 /* XXX: check env->error_code */
848 info
.si_code
= TARGET_SEGV_MAPERR
;
849 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
850 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
854 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
858 info
.si_code
= TARGET_TRAP_BRKPT
;
859 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
863 env
->xregs
[0] = do_arm_semihosting(env
);
866 /* nothing to do here for user-mode, just resume guest code */
869 cpu_exec_step_atomic(cs
);
872 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
875 process_pending_signals(env
);
876 /* Exception return on AArch64 always clears the exclusive monitor,
877 * so any return to running guest code implies this.
879 env
->exclusive_addr
= -1;
882 #endif /* ndef TARGET_ABI32 */
886 #ifdef TARGET_UNICORE32
888 void cpu_loop(CPUUniCore32State
*env
)
890 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
892 unsigned int n
, insn
;
893 target_siginfo_t info
;
897 trapnr
= cpu_exec(cs
);
899 process_queued_cpu_work(cs
);
905 get_user_u32(insn
, env
->regs
[31] - 4);
908 if (n
>= UC32_SYSCALL_BASE
) {
910 n
-= UC32_SYSCALL_BASE
;
911 if (n
== UC32_SYSCALL_NR_set_tls
) {
912 cpu_set_tls(env
, env
->regs
[0]);
915 abi_long ret
= do_syscall(env
,
924 if (ret
== -TARGET_ERESTARTSYS
) {
926 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
935 case UC32_EXCP_DTRAP
:
936 case UC32_EXCP_ITRAP
:
937 info
.si_signo
= TARGET_SIGSEGV
;
939 /* XXX: check env->error_code */
940 info
.si_code
= TARGET_SEGV_MAPERR
;
941 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
942 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
945 /* just indicate that signals should be handled asap */
951 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
955 info
.si_code
= TARGET_TRAP_BRKPT
;
956 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
961 cpu_exec_step_atomic(cs
);
966 process_pending_signals(env
);
970 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
976 #define SPARC64_STACK_BIAS 2047
980 /* WARNING: dealing with register windows _is_ complicated. More info
981 can be found at http://www.sics.se/~psm/sparcstack.html */
982 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
984 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
985 /* wrap handling : if cwp is on the last window, then we use the
986 registers 'after' the end */
987 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
988 index
+= 16 * env
->nwindows
;
992 /* save the register window 'cwp1' */
993 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
998 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
999 #ifdef TARGET_SPARC64
1001 sp_ptr
+= SPARC64_STACK_BIAS
;
1003 #if defined(DEBUG_WIN)
1004 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1007 for(i
= 0; i
< 16; i
++) {
1008 /* FIXME - what to do if put_user() fails? */
1009 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1010 sp_ptr
+= sizeof(abi_ulong
);
1014 static void save_window(CPUSPARCState
*env
)
1016 #ifndef TARGET_SPARC64
1017 unsigned int new_wim
;
1018 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1019 ((1LL << env
->nwindows
) - 1);
1020 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1023 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1029 static void restore_window(CPUSPARCState
*env
)
1031 #ifndef TARGET_SPARC64
1032 unsigned int new_wim
;
1034 unsigned int i
, cwp1
;
1037 #ifndef TARGET_SPARC64
1038 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1039 ((1LL << env
->nwindows
) - 1);
1042 /* restore the invalid window */
1043 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1044 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1045 #ifdef TARGET_SPARC64
1047 sp_ptr
+= SPARC64_STACK_BIAS
;
1049 #if defined(DEBUG_WIN)
1050 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1053 for(i
= 0; i
< 16; i
++) {
1054 /* FIXME - what to do if get_user() fails? */
1055 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1056 sp_ptr
+= sizeof(abi_ulong
);
1058 #ifdef TARGET_SPARC64
1060 if (env
->cleanwin
< env
->nwindows
- 1)
1068 static void flush_windows(CPUSPARCState
*env
)
1074 /* if restore would invoke restore_window(), then we can stop */
1075 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1076 #ifndef TARGET_SPARC64
1077 if (env
->wim
& (1 << cwp1
))
1080 if (env
->canrestore
== 0)
1085 save_window_offset(env
, cwp1
);
1088 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1089 #ifndef TARGET_SPARC64
1090 /* set wim so that restore will reload the registers */
1091 env
->wim
= 1 << cwp1
;
1093 #if defined(DEBUG_WIN)
1094 printf("flush_windows: nb=%d\n", offset
- 1);
1098 void cpu_loop (CPUSPARCState
*env
)
1100 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1103 target_siginfo_t info
;
1107 trapnr
= cpu_exec(cs
);
1109 process_queued_cpu_work(cs
);
1111 /* Compute PSR before exposing state. */
1112 if (env
->cc_op
!= CC_OP_FLAGS
) {
1117 #ifndef TARGET_SPARC64
1124 ret
= do_syscall (env
, env
->gregs
[1],
1125 env
->regwptr
[0], env
->regwptr
[1],
1126 env
->regwptr
[2], env
->regwptr
[3],
1127 env
->regwptr
[4], env
->regwptr
[5],
1129 if (ret
== -TARGET_ERESTARTSYS
|| ret
== -TARGET_QEMU_ESIGRETURN
) {
1132 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1133 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1134 env
->xcc
|= PSR_CARRY
;
1136 env
->psr
|= PSR_CARRY
;
1140 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1141 env
->xcc
&= ~PSR_CARRY
;
1143 env
->psr
&= ~PSR_CARRY
;
1146 env
->regwptr
[0] = ret
;
1147 /* next instruction */
1149 env
->npc
= env
->npc
+ 4;
1151 case 0x83: /* flush windows */
1156 /* next instruction */
1158 env
->npc
= env
->npc
+ 4;
1160 #ifndef TARGET_SPARC64
1161 case TT_WIN_OVF
: /* window overflow */
1164 case TT_WIN_UNF
: /* window underflow */
1165 restore_window(env
);
1170 info
.si_signo
= TARGET_SIGSEGV
;
1172 /* XXX: check env->error_code */
1173 info
.si_code
= TARGET_SEGV_MAPERR
;
1174 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1175 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1179 case TT_SPILL
: /* window overflow */
1182 case TT_FILL
: /* window underflow */
1183 restore_window(env
);
1188 info
.si_signo
= TARGET_SIGSEGV
;
1190 /* XXX: check env->error_code */
1191 info
.si_code
= TARGET_SEGV_MAPERR
;
1192 if (trapnr
== TT_DFAULT
)
1193 info
._sifields
._sigfault
._addr
= env
->dmmu
.mmuregs
[4];
1195 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1196 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1199 #ifndef TARGET_ABI32
1202 sparc64_get_context(env
);
1206 sparc64_set_context(env
);
1210 case EXCP_INTERRUPT
:
1211 /* just indicate that signals should be handled asap */
1215 info
.si_signo
= TARGET_SIGILL
;
1217 info
.si_code
= TARGET_ILL_ILLOPC
;
1218 info
._sifields
._sigfault
._addr
= env
->pc
;
1219 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1226 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1229 info
.si_signo
= sig
;
1231 info
.si_code
= TARGET_TRAP_BRKPT
;
1232 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1237 cpu_exec_step_atomic(cs
);
1240 printf ("Unhandled trap: 0x%x\n", trapnr
);
1241 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1244 process_pending_signals (env
);
1251 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1253 return cpu_get_host_ticks();
1256 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1258 return cpu_ppc_get_tb(env
);
1261 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1263 return cpu_ppc_get_tb(env
) >> 32;
1266 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1268 return cpu_ppc_get_tb(env
);
1271 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1273 return cpu_ppc_get_tb(env
) >> 32;
1276 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1277 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1279 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1281 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1284 /* XXX: to be fixed */
1285 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1290 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1295 static int do_store_exclusive(CPUPPCState
*env
)
1298 target_ulong page_addr
;
1299 target_ulong val
, val2
__attribute__((unused
)) = 0;
1303 addr
= env
->reserve_ea
;
1304 page_addr
= addr
& TARGET_PAGE_MASK
;
1307 flags
= page_get_flags(page_addr
);
1308 if ((flags
& PAGE_READ
) == 0) {
1311 int reg
= env
->reserve_info
& 0x1f;
1312 int size
= env
->reserve_info
>> 5;
1315 if (addr
== env
->reserve_addr
) {
1317 case 1: segv
= get_user_u8(val
, addr
); break;
1318 case 2: segv
= get_user_u16(val
, addr
); break;
1319 case 4: segv
= get_user_u32(val
, addr
); break;
1320 #if defined(TARGET_PPC64)
1321 case 8: segv
= get_user_u64(val
, addr
); break;
1323 segv
= get_user_u64(val
, addr
);
1325 segv
= get_user_u64(val2
, addr
+ 8);
1332 if (!segv
&& val
== env
->reserve_val
) {
1333 val
= env
->gpr
[reg
];
1335 case 1: segv
= put_user_u8(val
, addr
); break;
1336 case 2: segv
= put_user_u16(val
, addr
); break;
1337 case 4: segv
= put_user_u32(val
, addr
); break;
1338 #if defined(TARGET_PPC64)
1339 case 8: segv
= put_user_u64(val
, addr
); break;
1341 if (val2
== env
->reserve_val2
) {
1344 val
= env
->gpr
[reg
+1];
1346 val2
= env
->gpr
[reg
+1];
1348 segv
= put_user_u64(val
, addr
);
1350 segv
= put_user_u64(val2
, addr
+ 8);
1363 env
->crf
[0] = (stored
<< 1) | xer_so
;
1364 env
->reserve_addr
= (target_ulong
)-1;
1374 void cpu_loop(CPUPPCState
*env
)
1376 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1377 target_siginfo_t info
;
1383 trapnr
= cpu_exec(cs
);
1385 process_queued_cpu_work(cs
);
1388 case POWERPC_EXCP_NONE
:
1391 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1392 cpu_abort(cs
, "Critical interrupt while in user mode. "
1395 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1396 cpu_abort(cs
, "Machine check exception while in user mode. "
1399 case POWERPC_EXCP_DSI
: /* Data storage exception */
1400 /* XXX: check this. Seems bugged */
1401 switch (env
->error_code
& 0xFF000000) {
1404 info
.si_signo
= TARGET_SIGSEGV
;
1406 info
.si_code
= TARGET_SEGV_MAPERR
;
1409 info
.si_signo
= TARGET_SIGILL
;
1411 info
.si_code
= TARGET_ILL_ILLADR
;
1414 info
.si_signo
= TARGET_SIGSEGV
;
1416 info
.si_code
= TARGET_SEGV_ACCERR
;
1419 /* Let's send a regular segfault... */
1420 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1422 info
.si_signo
= TARGET_SIGSEGV
;
1424 info
.si_code
= TARGET_SEGV_MAPERR
;
1427 info
._sifields
._sigfault
._addr
= env
->spr
[SPR_DAR
];
1428 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1430 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1431 /* XXX: check this */
1432 switch (env
->error_code
& 0xFF000000) {
1434 info
.si_signo
= TARGET_SIGSEGV
;
1436 info
.si_code
= TARGET_SEGV_MAPERR
;
1440 info
.si_signo
= TARGET_SIGSEGV
;
1442 info
.si_code
= TARGET_SEGV_ACCERR
;
1445 /* Let's send a regular segfault... */
1446 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1448 info
.si_signo
= TARGET_SIGSEGV
;
1450 info
.si_code
= TARGET_SEGV_MAPERR
;
1453 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1454 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1456 case POWERPC_EXCP_EXTERNAL
: /* External input */
1457 cpu_abort(cs
, "External interrupt while in user mode. "
1460 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1461 /* XXX: check this */
1462 info
.si_signo
= TARGET_SIGBUS
;
1464 info
.si_code
= TARGET_BUS_ADRALN
;
1465 info
._sifields
._sigfault
._addr
= env
->nip
;
1466 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1468 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1469 case POWERPC_EXCP_HV_EMU
: /* HV emulation */
1470 /* XXX: check this */
1471 switch (env
->error_code
& ~0xF) {
1472 case POWERPC_EXCP_FP
:
1473 info
.si_signo
= TARGET_SIGFPE
;
1475 switch (env
->error_code
& 0xF) {
1476 case POWERPC_EXCP_FP_OX
:
1477 info
.si_code
= TARGET_FPE_FLTOVF
;
1479 case POWERPC_EXCP_FP_UX
:
1480 info
.si_code
= TARGET_FPE_FLTUND
;
1482 case POWERPC_EXCP_FP_ZX
:
1483 case POWERPC_EXCP_FP_VXZDZ
:
1484 info
.si_code
= TARGET_FPE_FLTDIV
;
1486 case POWERPC_EXCP_FP_XX
:
1487 info
.si_code
= TARGET_FPE_FLTRES
;
1489 case POWERPC_EXCP_FP_VXSOFT
:
1490 info
.si_code
= TARGET_FPE_FLTINV
;
1492 case POWERPC_EXCP_FP_VXSNAN
:
1493 case POWERPC_EXCP_FP_VXISI
:
1494 case POWERPC_EXCP_FP_VXIDI
:
1495 case POWERPC_EXCP_FP_VXIMZ
:
1496 case POWERPC_EXCP_FP_VXVC
:
1497 case POWERPC_EXCP_FP_VXSQRT
:
1498 case POWERPC_EXCP_FP_VXCVI
:
1499 info
.si_code
= TARGET_FPE_FLTSUB
;
1502 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1507 case POWERPC_EXCP_INVAL
:
1508 info
.si_signo
= TARGET_SIGILL
;
1510 switch (env
->error_code
& 0xF) {
1511 case POWERPC_EXCP_INVAL_INVAL
:
1512 info
.si_code
= TARGET_ILL_ILLOPC
;
1514 case POWERPC_EXCP_INVAL_LSWX
:
1515 info
.si_code
= TARGET_ILL_ILLOPN
;
1517 case POWERPC_EXCP_INVAL_SPR
:
1518 info
.si_code
= TARGET_ILL_PRVREG
;
1520 case POWERPC_EXCP_INVAL_FP
:
1521 info
.si_code
= TARGET_ILL_COPROC
;
1524 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1525 env
->error_code
& 0xF);
1526 info
.si_code
= TARGET_ILL_ILLADR
;
1530 case POWERPC_EXCP_PRIV
:
1531 info
.si_signo
= TARGET_SIGILL
;
1533 switch (env
->error_code
& 0xF) {
1534 case POWERPC_EXCP_PRIV_OPC
:
1535 info
.si_code
= TARGET_ILL_PRVOPC
;
1537 case POWERPC_EXCP_PRIV_REG
:
1538 info
.si_code
= TARGET_ILL_PRVREG
;
1541 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1542 env
->error_code
& 0xF);
1543 info
.si_code
= TARGET_ILL_PRVOPC
;
1547 case POWERPC_EXCP_TRAP
:
1548 cpu_abort(cs
, "Tried to call a TRAP\n");
1551 /* Should not happen ! */
1552 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1556 info
._sifields
._sigfault
._addr
= env
->nip
;
1557 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1559 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1560 info
.si_signo
= TARGET_SIGILL
;
1562 info
.si_code
= TARGET_ILL_COPROC
;
1563 info
._sifields
._sigfault
._addr
= env
->nip
;
1564 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1566 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1567 cpu_abort(cs
, "Syscall exception while in user mode. "
1570 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1571 info
.si_signo
= TARGET_SIGILL
;
1573 info
.si_code
= TARGET_ILL_COPROC
;
1574 info
._sifields
._sigfault
._addr
= env
->nip
;
1575 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1577 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1578 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1581 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1582 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1585 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1586 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1589 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1590 cpu_abort(cs
, "Data TLB exception while in user mode. "
1593 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1594 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1597 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1598 info
.si_signo
= TARGET_SIGILL
;
1600 info
.si_code
= TARGET_ILL_COPROC
;
1601 info
._sifields
._sigfault
._addr
= env
->nip
;
1602 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1604 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1605 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1607 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1608 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1610 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1611 cpu_abort(cs
, "Performance monitor exception not handled\n");
1613 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1614 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1617 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1618 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1621 case POWERPC_EXCP_RESET
: /* System reset exception */
1622 cpu_abort(cs
, "Reset interrupt while in user mode. "
1625 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1626 cpu_abort(cs
, "Data segment exception while in user mode. "
1629 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1630 cpu_abort(cs
, "Instruction segment exception "
1631 "while in user mode. Aborting\n");
1633 /* PowerPC 64 with hypervisor mode support */
1634 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1635 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1636 "while in user mode. Aborting\n");
1638 case POWERPC_EXCP_TRACE
: /* Trace exception */
1640 * we use this exception to emulate step-by-step execution mode.
1643 /* PowerPC 64 with hypervisor mode support */
1644 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1645 cpu_abort(cs
, "Hypervisor data storage exception "
1646 "while in user mode. Aborting\n");
1648 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1649 cpu_abort(cs
, "Hypervisor instruction storage exception "
1650 "while in user mode. Aborting\n");
1652 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1653 cpu_abort(cs
, "Hypervisor data segment exception "
1654 "while in user mode. Aborting\n");
1656 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1657 cpu_abort(cs
, "Hypervisor instruction segment exception "
1658 "while in user mode. Aborting\n");
1660 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1661 info
.si_signo
= TARGET_SIGILL
;
1663 info
.si_code
= TARGET_ILL_COPROC
;
1664 info
._sifields
._sigfault
._addr
= env
->nip
;
1665 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1667 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1668 cpu_abort(cs
, "Programmable interval timer interrupt "
1669 "while in user mode. Aborting\n");
1671 case POWERPC_EXCP_IO
: /* IO error exception */
1672 cpu_abort(cs
, "IO error exception while in user mode. "
1675 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1676 cpu_abort(cs
, "Run mode exception while in user mode. "
1679 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1680 cpu_abort(cs
, "Emulation trap exception not handled\n");
1682 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1683 cpu_abort(cs
, "Instruction fetch TLB exception "
1684 "while in user-mode. Aborting");
1686 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1687 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1690 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1691 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1694 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1695 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1697 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1698 cpu_abort(cs
, "Instruction address breakpoint exception "
1701 case POWERPC_EXCP_SMI
: /* System management interrupt */
1702 cpu_abort(cs
, "System management interrupt while in user mode. "
1705 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1706 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1709 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1710 cpu_abort(cs
, "Performance monitor exception not handled\n");
1712 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1713 cpu_abort(cs
, "Vector assist exception not handled\n");
1715 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1716 cpu_abort(cs
, "Soft patch exception not handled\n");
1718 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1719 cpu_abort(cs
, "Maintenance exception while in user mode. "
1722 case POWERPC_EXCP_STOP
: /* stop translation */
1723 /* We did invalidate the instruction cache. Go on */
1725 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1726 /* We just stopped because of a branch. Go on */
1728 case POWERPC_EXCP_SYSCALL_USER
:
1729 /* system call in user-mode emulation */
1731 * PPC ABI uses overflow flag in cr0 to signal an error
1734 env
->crf
[0] &= ~0x1;
1736 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1737 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1739 if (ret
== -TARGET_ERESTARTSYS
) {
1743 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1744 /* Returning from a successful sigreturn syscall.
1745 Avoid corrupting register state. */
1748 if (ret
> (target_ulong
)(-515)) {
1754 case POWERPC_EXCP_STCX
:
1755 if (do_store_exclusive(env
)) {
1756 info
.si_signo
= TARGET_SIGSEGV
;
1758 info
.si_code
= TARGET_SEGV_MAPERR
;
1759 info
._sifields
._sigfault
._addr
= env
->nip
;
1760 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1767 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1769 info
.si_signo
= sig
;
1771 info
.si_code
= TARGET_TRAP_BRKPT
;
1772 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
1776 case EXCP_INTERRUPT
:
1777 /* just indicate that signals should be handled asap */
1780 cpu_exec_step_atomic(cs
);
1783 cpu_abort(cs
, "Unknown exception 0x%x. Aborting\n", trapnr
);
1786 process_pending_signals(env
);
1793 # ifdef TARGET_ABI_MIPSO32
1794 # define MIPS_SYS(name, args) args,
1795 static const uint8_t mips_syscall_args
[] = {
1796 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1797 MIPS_SYS(sys_exit
, 1)
1798 MIPS_SYS(sys_fork
, 0)
1799 MIPS_SYS(sys_read
, 3)
1800 MIPS_SYS(sys_write
, 3)
1801 MIPS_SYS(sys_open
, 3) /* 4005 */
1802 MIPS_SYS(sys_close
, 1)
1803 MIPS_SYS(sys_waitpid
, 3)
1804 MIPS_SYS(sys_creat
, 2)
1805 MIPS_SYS(sys_link
, 2)
1806 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1807 MIPS_SYS(sys_execve
, 0)
1808 MIPS_SYS(sys_chdir
, 1)
1809 MIPS_SYS(sys_time
, 1)
1810 MIPS_SYS(sys_mknod
, 3)
1811 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1812 MIPS_SYS(sys_lchown
, 3)
1813 MIPS_SYS(sys_ni_syscall
, 0)
1814 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
1815 MIPS_SYS(sys_lseek
, 3)
1816 MIPS_SYS(sys_getpid
, 0) /* 4020 */
1817 MIPS_SYS(sys_mount
, 5)
1818 MIPS_SYS(sys_umount
, 1)
1819 MIPS_SYS(sys_setuid
, 1)
1820 MIPS_SYS(sys_getuid
, 0)
1821 MIPS_SYS(sys_stime
, 1) /* 4025 */
1822 MIPS_SYS(sys_ptrace
, 4)
1823 MIPS_SYS(sys_alarm
, 1)
1824 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
1825 MIPS_SYS(sys_pause
, 0)
1826 MIPS_SYS(sys_utime
, 2) /* 4030 */
1827 MIPS_SYS(sys_ni_syscall
, 0)
1828 MIPS_SYS(sys_ni_syscall
, 0)
1829 MIPS_SYS(sys_access
, 2)
1830 MIPS_SYS(sys_nice
, 1)
1831 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
1832 MIPS_SYS(sys_sync
, 0)
1833 MIPS_SYS(sys_kill
, 2)
1834 MIPS_SYS(sys_rename
, 2)
1835 MIPS_SYS(sys_mkdir
, 2)
1836 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
1837 MIPS_SYS(sys_dup
, 1)
1838 MIPS_SYS(sys_pipe
, 0)
1839 MIPS_SYS(sys_times
, 1)
1840 MIPS_SYS(sys_ni_syscall
, 0)
1841 MIPS_SYS(sys_brk
, 1) /* 4045 */
1842 MIPS_SYS(sys_setgid
, 1)
1843 MIPS_SYS(sys_getgid
, 0)
1844 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
1845 MIPS_SYS(sys_geteuid
, 0)
1846 MIPS_SYS(sys_getegid
, 0) /* 4050 */
1847 MIPS_SYS(sys_acct
, 0)
1848 MIPS_SYS(sys_umount2
, 2)
1849 MIPS_SYS(sys_ni_syscall
, 0)
1850 MIPS_SYS(sys_ioctl
, 3)
1851 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
1852 MIPS_SYS(sys_ni_syscall
, 2)
1853 MIPS_SYS(sys_setpgid
, 2)
1854 MIPS_SYS(sys_ni_syscall
, 0)
1855 MIPS_SYS(sys_olduname
, 1)
1856 MIPS_SYS(sys_umask
, 1) /* 4060 */
1857 MIPS_SYS(sys_chroot
, 1)
1858 MIPS_SYS(sys_ustat
, 2)
1859 MIPS_SYS(sys_dup2
, 2)
1860 MIPS_SYS(sys_getppid
, 0)
1861 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
1862 MIPS_SYS(sys_setsid
, 0)
1863 MIPS_SYS(sys_sigaction
, 3)
1864 MIPS_SYS(sys_sgetmask
, 0)
1865 MIPS_SYS(sys_ssetmask
, 1)
1866 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
1867 MIPS_SYS(sys_setregid
, 2)
1868 MIPS_SYS(sys_sigsuspend
, 0)
1869 MIPS_SYS(sys_sigpending
, 1)
1870 MIPS_SYS(sys_sethostname
, 2)
1871 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
1872 MIPS_SYS(sys_getrlimit
, 2)
1873 MIPS_SYS(sys_getrusage
, 2)
1874 MIPS_SYS(sys_gettimeofday
, 2)
1875 MIPS_SYS(sys_settimeofday
, 2)
1876 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
1877 MIPS_SYS(sys_setgroups
, 2)
1878 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
1879 MIPS_SYS(sys_symlink
, 2)
1880 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
1881 MIPS_SYS(sys_readlink
, 3) /* 4085 */
1882 MIPS_SYS(sys_uselib
, 1)
1883 MIPS_SYS(sys_swapon
, 2)
1884 MIPS_SYS(sys_reboot
, 3)
1885 MIPS_SYS(old_readdir
, 3)
1886 MIPS_SYS(old_mmap
, 6) /* 4090 */
1887 MIPS_SYS(sys_munmap
, 2)
1888 MIPS_SYS(sys_truncate
, 2)
1889 MIPS_SYS(sys_ftruncate
, 2)
1890 MIPS_SYS(sys_fchmod
, 2)
1891 MIPS_SYS(sys_fchown
, 3) /* 4095 */
1892 MIPS_SYS(sys_getpriority
, 2)
1893 MIPS_SYS(sys_setpriority
, 3)
1894 MIPS_SYS(sys_ni_syscall
, 0)
1895 MIPS_SYS(sys_statfs
, 2)
1896 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
1897 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
1898 MIPS_SYS(sys_socketcall
, 2)
1899 MIPS_SYS(sys_syslog
, 3)
1900 MIPS_SYS(sys_setitimer
, 3)
1901 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
1902 MIPS_SYS(sys_newstat
, 2)
1903 MIPS_SYS(sys_newlstat
, 2)
1904 MIPS_SYS(sys_newfstat
, 2)
1905 MIPS_SYS(sys_uname
, 1)
1906 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
1907 MIPS_SYS(sys_vhangup
, 0)
1908 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
1909 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
1910 MIPS_SYS(sys_wait4
, 4)
1911 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
1912 MIPS_SYS(sys_sysinfo
, 1)
1913 MIPS_SYS(sys_ipc
, 6)
1914 MIPS_SYS(sys_fsync
, 1)
1915 MIPS_SYS(sys_sigreturn
, 0)
1916 MIPS_SYS(sys_clone
, 6) /* 4120 */
1917 MIPS_SYS(sys_setdomainname
, 2)
1918 MIPS_SYS(sys_newuname
, 1)
1919 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
1920 MIPS_SYS(sys_adjtimex
, 1)
1921 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
1922 MIPS_SYS(sys_sigprocmask
, 3)
1923 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
1924 MIPS_SYS(sys_init_module
, 5)
1925 MIPS_SYS(sys_delete_module
, 1)
1926 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
1927 MIPS_SYS(sys_quotactl
, 0)
1928 MIPS_SYS(sys_getpgid
, 1)
1929 MIPS_SYS(sys_fchdir
, 1)
1930 MIPS_SYS(sys_bdflush
, 2)
1931 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
1932 MIPS_SYS(sys_personality
, 1)
1933 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
1934 MIPS_SYS(sys_setfsuid
, 1)
1935 MIPS_SYS(sys_setfsgid
, 1)
1936 MIPS_SYS(sys_llseek
, 5) /* 4140 */
1937 MIPS_SYS(sys_getdents
, 3)
1938 MIPS_SYS(sys_select
, 5)
1939 MIPS_SYS(sys_flock
, 2)
1940 MIPS_SYS(sys_msync
, 3)
1941 MIPS_SYS(sys_readv
, 3) /* 4145 */
1942 MIPS_SYS(sys_writev
, 3)
1943 MIPS_SYS(sys_cacheflush
, 3)
1944 MIPS_SYS(sys_cachectl
, 3)
1945 MIPS_SYS(sys_sysmips
, 4)
1946 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
1947 MIPS_SYS(sys_getsid
, 1)
1948 MIPS_SYS(sys_fdatasync
, 0)
1949 MIPS_SYS(sys_sysctl
, 1)
1950 MIPS_SYS(sys_mlock
, 2)
1951 MIPS_SYS(sys_munlock
, 2) /* 4155 */
1952 MIPS_SYS(sys_mlockall
, 1)
1953 MIPS_SYS(sys_munlockall
, 0)
1954 MIPS_SYS(sys_sched_setparam
, 2)
1955 MIPS_SYS(sys_sched_getparam
, 2)
1956 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
1957 MIPS_SYS(sys_sched_getscheduler
, 1)
1958 MIPS_SYS(sys_sched_yield
, 0)
1959 MIPS_SYS(sys_sched_get_priority_max
, 1)
1960 MIPS_SYS(sys_sched_get_priority_min
, 1)
1961 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
1962 MIPS_SYS(sys_nanosleep
, 2)
1963 MIPS_SYS(sys_mremap
, 5)
1964 MIPS_SYS(sys_accept
, 3)
1965 MIPS_SYS(sys_bind
, 3)
1966 MIPS_SYS(sys_connect
, 3) /* 4170 */
1967 MIPS_SYS(sys_getpeername
, 3)
1968 MIPS_SYS(sys_getsockname
, 3)
1969 MIPS_SYS(sys_getsockopt
, 5)
1970 MIPS_SYS(sys_listen
, 2)
1971 MIPS_SYS(sys_recv
, 4) /* 4175 */
1972 MIPS_SYS(sys_recvfrom
, 6)
1973 MIPS_SYS(sys_recvmsg
, 3)
1974 MIPS_SYS(sys_send
, 4)
1975 MIPS_SYS(sys_sendmsg
, 3)
1976 MIPS_SYS(sys_sendto
, 6) /* 4180 */
1977 MIPS_SYS(sys_setsockopt
, 5)
1978 MIPS_SYS(sys_shutdown
, 2)
1979 MIPS_SYS(sys_socket
, 3)
1980 MIPS_SYS(sys_socketpair
, 4)
1981 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
1982 MIPS_SYS(sys_getresuid
, 3)
1983 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
1984 MIPS_SYS(sys_poll
, 3)
1985 MIPS_SYS(sys_nfsservctl
, 3)
1986 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
1987 MIPS_SYS(sys_getresgid
, 3)
1988 MIPS_SYS(sys_prctl
, 5)
1989 MIPS_SYS(sys_rt_sigreturn
, 0)
1990 MIPS_SYS(sys_rt_sigaction
, 4)
1991 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
1992 MIPS_SYS(sys_rt_sigpending
, 2)
1993 MIPS_SYS(sys_rt_sigtimedwait
, 4)
1994 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
1995 MIPS_SYS(sys_rt_sigsuspend
, 0)
1996 MIPS_SYS(sys_pread64
, 6) /* 4200 */
1997 MIPS_SYS(sys_pwrite64
, 6)
1998 MIPS_SYS(sys_chown
, 3)
1999 MIPS_SYS(sys_getcwd
, 2)
2000 MIPS_SYS(sys_capget
, 2)
2001 MIPS_SYS(sys_capset
, 2) /* 4205 */
2002 MIPS_SYS(sys_sigaltstack
, 2)
2003 MIPS_SYS(sys_sendfile
, 4)
2004 MIPS_SYS(sys_ni_syscall
, 0)
2005 MIPS_SYS(sys_ni_syscall
, 0)
2006 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2007 MIPS_SYS(sys_truncate64
, 4)
2008 MIPS_SYS(sys_ftruncate64
, 4)
2009 MIPS_SYS(sys_stat64
, 2)
2010 MIPS_SYS(sys_lstat64
, 2)
2011 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2012 MIPS_SYS(sys_pivot_root
, 2)
2013 MIPS_SYS(sys_mincore
, 3)
2014 MIPS_SYS(sys_madvise
, 3)
2015 MIPS_SYS(sys_getdents64
, 3)
2016 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2017 MIPS_SYS(sys_ni_syscall
, 0)
2018 MIPS_SYS(sys_gettid
, 0)
2019 MIPS_SYS(sys_readahead
, 5)
2020 MIPS_SYS(sys_setxattr
, 5)
2021 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2022 MIPS_SYS(sys_fsetxattr
, 5)
2023 MIPS_SYS(sys_getxattr
, 4)
2024 MIPS_SYS(sys_lgetxattr
, 4)
2025 MIPS_SYS(sys_fgetxattr
, 4)
2026 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2027 MIPS_SYS(sys_llistxattr
, 3)
2028 MIPS_SYS(sys_flistxattr
, 3)
2029 MIPS_SYS(sys_removexattr
, 2)
2030 MIPS_SYS(sys_lremovexattr
, 2)
2031 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2032 MIPS_SYS(sys_tkill
, 2)
2033 MIPS_SYS(sys_sendfile64
, 5)
2034 MIPS_SYS(sys_futex
, 6)
2035 MIPS_SYS(sys_sched_setaffinity
, 3)
2036 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2037 MIPS_SYS(sys_io_setup
, 2)
2038 MIPS_SYS(sys_io_destroy
, 1)
2039 MIPS_SYS(sys_io_getevents
, 5)
2040 MIPS_SYS(sys_io_submit
, 3)
2041 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2042 MIPS_SYS(sys_exit_group
, 1)
2043 MIPS_SYS(sys_lookup_dcookie
, 3)
2044 MIPS_SYS(sys_epoll_create
, 1)
2045 MIPS_SYS(sys_epoll_ctl
, 4)
2046 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2047 MIPS_SYS(sys_remap_file_pages
, 5)
2048 MIPS_SYS(sys_set_tid_address
, 1)
2049 MIPS_SYS(sys_restart_syscall
, 0)
2050 MIPS_SYS(sys_fadvise64_64
, 7)
2051 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2052 MIPS_SYS(sys_fstatfs64
, 2)
2053 MIPS_SYS(sys_timer_create
, 3)
2054 MIPS_SYS(sys_timer_settime
, 4)
2055 MIPS_SYS(sys_timer_gettime
, 2)
2056 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2057 MIPS_SYS(sys_timer_delete
, 1)
2058 MIPS_SYS(sys_clock_settime
, 2)
2059 MIPS_SYS(sys_clock_gettime
, 2)
2060 MIPS_SYS(sys_clock_getres
, 2)
2061 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2062 MIPS_SYS(sys_tgkill
, 3)
2063 MIPS_SYS(sys_utimes
, 2)
2064 MIPS_SYS(sys_mbind
, 4)
2065 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2066 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2067 MIPS_SYS(sys_mq_open
, 4)
2068 MIPS_SYS(sys_mq_unlink
, 1)
2069 MIPS_SYS(sys_mq_timedsend
, 5)
2070 MIPS_SYS(sys_mq_timedreceive
, 5)
2071 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2072 MIPS_SYS(sys_mq_getsetattr
, 3)
2073 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2074 MIPS_SYS(sys_waitid
, 4)
2075 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2076 MIPS_SYS(sys_add_key
, 5)
2077 MIPS_SYS(sys_request_key
, 4)
2078 MIPS_SYS(sys_keyctl
, 5)
2079 MIPS_SYS(sys_set_thread_area
, 1)
2080 MIPS_SYS(sys_inotify_init
, 0)
2081 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2082 MIPS_SYS(sys_inotify_rm_watch
, 2)
2083 MIPS_SYS(sys_migrate_pages
, 4)
2084 MIPS_SYS(sys_openat
, 4)
2085 MIPS_SYS(sys_mkdirat
, 3)
2086 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2087 MIPS_SYS(sys_fchownat
, 5)
2088 MIPS_SYS(sys_futimesat
, 3)
2089 MIPS_SYS(sys_fstatat64
, 4)
2090 MIPS_SYS(sys_unlinkat
, 3)
2091 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2092 MIPS_SYS(sys_linkat
, 5)
2093 MIPS_SYS(sys_symlinkat
, 3)
2094 MIPS_SYS(sys_readlinkat
, 4)
2095 MIPS_SYS(sys_fchmodat
, 3)
2096 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2097 MIPS_SYS(sys_pselect6
, 6)
2098 MIPS_SYS(sys_ppoll
, 5)
2099 MIPS_SYS(sys_unshare
, 1)
2100 MIPS_SYS(sys_splice
, 6)
2101 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2102 MIPS_SYS(sys_tee
, 4)
2103 MIPS_SYS(sys_vmsplice
, 4)
2104 MIPS_SYS(sys_move_pages
, 6)
2105 MIPS_SYS(sys_set_robust_list
, 2)
2106 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2107 MIPS_SYS(sys_kexec_load
, 4)
2108 MIPS_SYS(sys_getcpu
, 3)
2109 MIPS_SYS(sys_epoll_pwait
, 6)
2110 MIPS_SYS(sys_ioprio_set
, 3)
2111 MIPS_SYS(sys_ioprio_get
, 2)
2112 MIPS_SYS(sys_utimensat
, 4)
2113 MIPS_SYS(sys_signalfd
, 3)
2114 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2115 MIPS_SYS(sys_eventfd
, 1)
2116 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2117 MIPS_SYS(sys_timerfd_create
, 2)
2118 MIPS_SYS(sys_timerfd_gettime
, 2)
2119 MIPS_SYS(sys_timerfd_settime
, 4)
2120 MIPS_SYS(sys_signalfd4
, 4)
2121 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2122 MIPS_SYS(sys_epoll_create1
, 1)
2123 MIPS_SYS(sys_dup3
, 3)
2124 MIPS_SYS(sys_pipe2
, 2)
2125 MIPS_SYS(sys_inotify_init1
, 1)
2126 MIPS_SYS(sys_preadv
, 5) /* 4330 */
2127 MIPS_SYS(sys_pwritev
, 5)
2128 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2129 MIPS_SYS(sys_perf_event_open
, 5)
2130 MIPS_SYS(sys_accept4
, 4)
2131 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2132 MIPS_SYS(sys_fanotify_init
, 2)
2133 MIPS_SYS(sys_fanotify_mark
, 6)
2134 MIPS_SYS(sys_prlimit64
, 4)
2135 MIPS_SYS(sys_name_to_handle_at
, 5)
2136 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2137 MIPS_SYS(sys_clock_adjtime
, 2)
2138 MIPS_SYS(sys_syncfs
, 1)
2139 MIPS_SYS(sys_sendmmsg
, 4)
2140 MIPS_SYS(sys_setns
, 2)
2141 MIPS_SYS(sys_process_vm_readv
, 6) /* 345 */
2142 MIPS_SYS(sys_process_vm_writev
, 6)
2143 MIPS_SYS(sys_kcmp
, 5)
2144 MIPS_SYS(sys_finit_module
, 3)
2145 MIPS_SYS(sys_sched_setattr
, 2)
2146 MIPS_SYS(sys_sched_getattr
, 3) /* 350 */
2147 MIPS_SYS(sys_renameat2
, 5)
2148 MIPS_SYS(sys_seccomp
, 3)
2149 MIPS_SYS(sys_getrandom
, 3)
2150 MIPS_SYS(sys_memfd_create
, 2)
2151 MIPS_SYS(sys_bpf
, 3) /* 355 */
2152 MIPS_SYS(sys_execveat
, 5)
2153 MIPS_SYS(sys_userfaultfd
, 1)
2154 MIPS_SYS(sys_membarrier
, 2)
2155 MIPS_SYS(sys_mlock2
, 3)
2156 MIPS_SYS(sys_copy_file_range
, 6) /* 360 */
2157 MIPS_SYS(sys_preadv2
, 6)
2158 MIPS_SYS(sys_pwritev2
, 6)
2163 static int do_store_exclusive(CPUMIPSState
*env
)
2166 target_ulong page_addr
;
2174 page_addr
= addr
& TARGET_PAGE_MASK
;
2177 flags
= page_get_flags(page_addr
);
2178 if ((flags
& PAGE_READ
) == 0) {
2181 reg
= env
->llreg
& 0x1f;
2182 d
= (env
->llreg
& 0x20) != 0;
2184 segv
= get_user_s64(val
, addr
);
2186 segv
= get_user_s32(val
, addr
);
2189 if (val
!= env
->llval
) {
2190 env
->active_tc
.gpr
[reg
] = 0;
2193 segv
= put_user_u64(env
->llnewval
, addr
);
2195 segv
= put_user_u32(env
->llnewval
, addr
);
2198 env
->active_tc
.gpr
[reg
] = 1;
2205 env
->active_tc
.PC
+= 4;
2218 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2226 info
->si_signo
= TARGET_SIGFPE
;
2228 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2229 queue_signal(env
, info
->si_signo
, QEMU_SI_FAULT
, &*info
);
2233 info
->si_signo
= TARGET_SIGTRAP
;
2235 queue_signal(env
, info
->si_signo
, QEMU_SI_FAULT
, &*info
);
2243 void cpu_loop(CPUMIPSState
*env
)
2245 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2246 target_siginfo_t info
;
2249 # ifdef TARGET_ABI_MIPSO32
2250 unsigned int syscall_num
;
2255 trapnr
= cpu_exec(cs
);
2257 process_queued_cpu_work(cs
);
2261 env
->active_tc
.PC
+= 4;
2262 # ifdef TARGET_ABI_MIPSO32
2263 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2264 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2265 ret
= -TARGET_ENOSYS
;
2269 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2271 nb_args
= mips_syscall_args
[syscall_num
];
2272 sp_reg
= env
->active_tc
.gpr
[29];
2274 /* these arguments are taken from the stack */
2276 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2280 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2284 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2288 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2294 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2295 env
->active_tc
.gpr
[4],
2296 env
->active_tc
.gpr
[5],
2297 env
->active_tc
.gpr
[6],
2298 env
->active_tc
.gpr
[7],
2299 arg5
, arg6
, arg7
, arg8
);
2303 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2304 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2305 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2306 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2307 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2309 if (ret
== -TARGET_ERESTARTSYS
) {
2310 env
->active_tc
.PC
-= 4;
2313 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2314 /* Returning from a successful sigreturn syscall.
2315 Avoid clobbering register state. */
2318 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2319 env
->active_tc
.gpr
[7] = 1; /* error flag */
2322 env
->active_tc
.gpr
[7] = 0; /* error flag */
2324 env
->active_tc
.gpr
[2] = ret
;
2330 info
.si_signo
= TARGET_SIGSEGV
;
2332 /* XXX: check env->error_code */
2333 info
.si_code
= TARGET_SEGV_MAPERR
;
2334 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2335 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2339 info
.si_signo
= TARGET_SIGILL
;
2342 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2344 case EXCP_INTERRUPT
:
2345 /* just indicate that signals should be handled asap */
2351 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2354 info
.si_signo
= sig
;
2356 info
.si_code
= TARGET_TRAP_BRKPT
;
2357 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2362 if (do_store_exclusive(env
)) {
2363 info
.si_signo
= TARGET_SIGSEGV
;
2365 info
.si_code
= TARGET_SEGV_MAPERR
;
2366 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2367 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2371 info
.si_signo
= TARGET_SIGILL
;
2373 info
.si_code
= TARGET_ILL_ILLOPC
;
2374 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2376 /* The code below was inspired by the MIPS Linux kernel trap
2377 * handling code in arch/mips/kernel/traps.c.
2381 abi_ulong trap_instr
;
2384 if (env
->hflags
& MIPS_HFLAG_M16
) {
2385 if (env
->insn_flags
& ASE_MICROMIPS
) {
2386 /* microMIPS mode */
2387 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2392 if ((trap_instr
>> 10) == 0x11) {
2393 /* 16-bit instruction */
2394 code
= trap_instr
& 0xf;
2396 /* 32-bit instruction */
2399 ret
= get_user_u16(instr_lo
,
2400 env
->active_tc
.PC
+ 2);
2404 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2405 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2406 /* Unfortunately, microMIPS also suffers from
2407 the old assembler bug... */
2408 if (code
>= (1 << 10)) {
2414 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2418 code
= (trap_instr
>> 6) & 0x3f;
2421 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2426 /* As described in the original Linux kernel code, the
2427 * below checks on 'code' are to work around an old
2430 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2431 if (code
>= (1 << 10)) {
2436 if (do_break(env
, &info
, code
) != 0) {
2443 abi_ulong trap_instr
;
2444 unsigned int code
= 0;
2446 if (env
->hflags
& MIPS_HFLAG_M16
) {
2447 /* microMIPS mode */
2450 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2451 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2453 trap_instr
= (instr
[0] << 16) | instr
[1];
2455 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2462 /* The immediate versions don't provide a code. */
2463 if (!(trap_instr
& 0xFC000000)) {
2464 if (env
->hflags
& MIPS_HFLAG_M16
) {
2465 /* microMIPS mode */
2466 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2468 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2472 if (do_break(env
, &info
, code
) != 0) {
2478 cpu_exec_step_atomic(cs
);
2482 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2485 process_pending_signals(env
);
2492 void cpu_loop(CPUNios2State
*env
)
2494 CPUState
*cs
= ENV_GET_CPU(env
);
2495 Nios2CPU
*cpu
= NIOS2_CPU(cs
);
2496 target_siginfo_t info
;
2497 int trapnr
, gdbsig
, ret
;
2501 trapnr
= cpu_exec(cs
);
2506 case EXCP_INTERRUPT
:
2507 /* just indicate that signals should be handled asap */
2510 if (env
->regs
[R_AT
] == 0) {
2512 qemu_log_mask(CPU_LOG_INT
, "\nSyscall\n");
2514 ret
= do_syscall(env
, env
->regs
[2],
2515 env
->regs
[4], env
->regs
[5], env
->regs
[6],
2516 env
->regs
[7], env
->regs
[8], env
->regs
[9],
2519 if (env
->regs
[2] == 0) { /* FIXME: syscall 0 workaround */
2523 env
->regs
[2] = abs(ret
);
2524 /* Return value is 0..4096 */
2525 env
->regs
[7] = (ret
> 0xfffffffffffff000ULL
);
2526 env
->regs
[CR_ESTATUS
] = env
->regs
[CR_STATUS
];
2527 env
->regs
[CR_STATUS
] &= ~0x3;
2528 env
->regs
[R_EA
] = env
->regs
[R_PC
] + 4;
2529 env
->regs
[R_PC
] += 4;
2532 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2534 env
->regs
[CR_ESTATUS
] = env
->regs
[CR_STATUS
];
2535 env
->regs
[CR_STATUS
] &= ~0x3;
2536 env
->regs
[R_EA
] = env
->regs
[R_PC
] + 4;
2537 env
->regs
[R_PC
] = cpu
->exception_addr
;
2539 gdbsig
= TARGET_SIGTRAP
;
2543 switch (env
->regs
[R_PC
]) {
2544 /*case 0x1000:*/ /* TODO:__kuser_helper_version */
2545 case 0x1004: /* __kuser_cmpxchg */
2547 if (env
->regs
[4] & 0x3) {
2550 ret
= get_user_u32(env
->regs
[2], env
->regs
[4]);
2555 env
->regs
[2] -= env
->regs
[5];
2556 if (env
->regs
[2] == 0) {
2557 put_user_u32(env
->regs
[6], env
->regs
[4]);
2560 env
->regs
[R_PC
] = env
->regs
[R_RA
];
2562 /*case 0x1040:*/ /* TODO:__kuser_sigtramp */
2566 info
.si_signo
= TARGET_SIGSEGV
;
2568 /* TODO: check env->error_code */
2569 info
.si_code
= TARGET_SEGV_MAPERR
;
2570 info
._sifields
._sigfault
._addr
= env
->regs
[R_PC
];
2571 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2575 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2577 gdbsig
= TARGET_SIGILL
;
2581 gdb_handlesig(cs
, gdbsig
);
2582 if (gdbsig
!= TARGET_SIGTRAP
) {
2587 process_pending_signals(env
);
2591 #endif /* TARGET_NIOS2 */
2593 #ifdef TARGET_OPENRISC
2595 void cpu_loop(CPUOpenRISCState
*env
)
2597 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2600 target_siginfo_t info
;
2604 trapnr
= cpu_exec(cs
);
2606 process_queued_cpu_work(cs
);
2610 env
->pc
+= 4; /* 0xc00; */
2611 ret
= do_syscall(env
,
2612 cpu_get_gpr(env
, 11), /* return value */
2613 cpu_get_gpr(env
, 3), /* r3 - r7 are params */
2614 cpu_get_gpr(env
, 4),
2615 cpu_get_gpr(env
, 5),
2616 cpu_get_gpr(env
, 6),
2617 cpu_get_gpr(env
, 7),
2618 cpu_get_gpr(env
, 8), 0, 0);
2619 if (ret
== -TARGET_ERESTARTSYS
) {
2621 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2622 cpu_set_gpr(env
, 11, ret
);
2628 info
.si_signo
= TARGET_SIGSEGV
;
2630 info
.si_code
= TARGET_SEGV_MAPERR
;
2631 info
._sifields
._sigfault
._addr
= env
->pc
;
2632 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2635 info
.si_signo
= TARGET_SIGBUS
;
2637 info
.si_code
= TARGET_BUS_ADRALN
;
2638 info
._sifields
._sigfault
._addr
= env
->pc
;
2639 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2642 info
.si_signo
= TARGET_SIGILL
;
2644 info
.si_code
= TARGET_ILL_ILLOPC
;
2645 info
._sifields
._sigfault
._addr
= env
->pc
;
2646 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2649 info
.si_signo
= TARGET_SIGFPE
;
2652 info
._sifields
._sigfault
._addr
= env
->pc
;
2653 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2655 case EXCP_INTERRUPT
:
2656 /* We processed the pending cpu work above. */
2659 trapnr
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2661 info
.si_signo
= trapnr
;
2663 info
.si_code
= TARGET_TRAP_BRKPT
;
2664 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2668 cpu_exec_step_atomic(cs
);
2671 g_assert_not_reached();
2673 process_pending_signals(env
);
2677 #endif /* TARGET_OPENRISC */
2680 void cpu_loop(CPUSH4State
*env
)
2682 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2684 target_siginfo_t info
;
2687 bool arch_interrupt
= true;
2690 trapnr
= cpu_exec(cs
);
2692 process_queued_cpu_work(cs
);
2697 ret
= do_syscall(env
,
2706 if (ret
== -TARGET_ERESTARTSYS
) {
2708 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2709 env
->gregs
[0] = ret
;
2712 case EXCP_INTERRUPT
:
2713 /* just indicate that signals should be handled asap */
2719 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2721 info
.si_signo
= sig
;
2723 info
.si_code
= TARGET_TRAP_BRKPT
;
2724 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2726 arch_interrupt
= false;
2732 info
.si_signo
= TARGET_SIGSEGV
;
2734 info
.si_code
= TARGET_SEGV_MAPERR
;
2735 info
._sifields
._sigfault
._addr
= env
->tea
;
2736 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2739 cpu_exec_step_atomic(cs
);
2740 arch_interrupt
= false;
2743 printf ("Unhandled trap: 0x%x\n", trapnr
);
2744 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2747 process_pending_signals (env
);
2749 /* Most of the traps imply an exception or interrupt, which
2750 implies an REI instruction has been executed. Which means
2751 that LDST (aka LOK_ADDR) should be cleared. But there are
2752 a few exceptions for traps internal to QEMU. */
2753 if (arch_interrupt
) {
2754 env
->lock_addr
= -1;
2761 void cpu_loop(CPUCRISState
*env
)
2763 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2765 target_siginfo_t info
;
2769 trapnr
= cpu_exec(cs
);
2771 process_queued_cpu_work(cs
);
2776 info
.si_signo
= TARGET_SIGSEGV
;
2778 /* XXX: check env->error_code */
2779 info
.si_code
= TARGET_SEGV_MAPERR
;
2780 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2781 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2784 case EXCP_INTERRUPT
:
2785 /* just indicate that signals should be handled asap */
2788 ret
= do_syscall(env
,
2797 if (ret
== -TARGET_ERESTARTSYS
) {
2799 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2800 env
->regs
[10] = ret
;
2807 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2810 info
.si_signo
= sig
;
2812 info
.si_code
= TARGET_TRAP_BRKPT
;
2813 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2818 cpu_exec_step_atomic(cs
);
2821 printf ("Unhandled trap: 0x%x\n", trapnr
);
2822 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2825 process_pending_signals (env
);
2830 #ifdef TARGET_MICROBLAZE
2831 void cpu_loop(CPUMBState
*env
)
2833 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2835 target_siginfo_t info
;
2839 trapnr
= cpu_exec(cs
);
2841 process_queued_cpu_work(cs
);
2846 info
.si_signo
= TARGET_SIGSEGV
;
2848 /* XXX: check env->error_code */
2849 info
.si_code
= TARGET_SEGV_MAPERR
;
2850 info
._sifields
._sigfault
._addr
= 0;
2851 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2854 case EXCP_INTERRUPT
:
2855 /* just indicate that signals should be handled asap */
2858 /* Return address is 4 bytes after the call. */
2860 env
->sregs
[SR_PC
] = env
->regs
[14];
2861 ret
= do_syscall(env
,
2870 if (ret
== -TARGET_ERESTARTSYS
) {
2871 /* Wind back to before the syscall. */
2872 env
->sregs
[SR_PC
] -= 4;
2873 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2876 /* All syscall exits result in guest r14 being equal to the
2877 * PC we return to, because the kernel syscall exit "rtbd" does
2878 * this. (This is true even for sigreturn(); note that r14 is
2879 * not a userspace-usable register, as the kernel may clobber it
2882 env
->regs
[14] = env
->sregs
[SR_PC
];
2885 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2886 if (env
->iflags
& D_FLAG
) {
2887 env
->sregs
[SR_ESR
] |= 1 << 12;
2888 env
->sregs
[SR_PC
] -= 4;
2889 /* FIXME: if branch was immed, replay the imm as well. */
2892 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2894 switch (env
->sregs
[SR_ESR
] & 31) {
2895 case ESR_EC_DIVZERO
:
2896 info
.si_signo
= TARGET_SIGFPE
;
2898 info
.si_code
= TARGET_FPE_FLTDIV
;
2899 info
._sifields
._sigfault
._addr
= 0;
2900 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2903 info
.si_signo
= TARGET_SIGFPE
;
2905 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2906 info
.si_code
= TARGET_FPE_FLTINV
;
2908 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2909 info
.si_code
= TARGET_FPE_FLTDIV
;
2911 info
._sifields
._sigfault
._addr
= 0;
2912 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2915 printf ("Unhandled hw-exception: 0x%x\n",
2916 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2917 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2926 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2929 info
.si_signo
= sig
;
2931 info
.si_code
= TARGET_TRAP_BRKPT
;
2932 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2937 cpu_exec_step_atomic(cs
);
2940 printf ("Unhandled trap: 0x%x\n", trapnr
);
2941 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2944 process_pending_signals (env
);
2951 void cpu_loop(CPUM68KState
*env
)
2953 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2956 target_siginfo_t info
;
2957 TaskState
*ts
= cs
->opaque
;
2961 trapnr
= cpu_exec(cs
);
2963 process_queued_cpu_work(cs
);
2968 if (ts
->sim_syscalls
) {
2970 get_user_u16(nr
, env
->pc
+ 2);
2972 do_m68k_simcall(env
, nr
);
2978 case EXCP_HALT_INSN
:
2979 /* Semihosing syscall. */
2981 do_m68k_semihosting(env
, env
->dregs
[0]);
2985 case EXCP_UNSUPPORTED
:
2987 info
.si_signo
= TARGET_SIGILL
;
2989 info
.si_code
= TARGET_ILL_ILLOPN
;
2990 info
._sifields
._sigfault
._addr
= env
->pc
;
2991 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
2994 info
.si_signo
= TARGET_SIGFPE
;
2996 info
.si_code
= TARGET_FPE_INTOVF
;
2997 info
._sifields
._sigfault
._addr
= env
->pc
;
2998 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3001 info
.si_signo
= TARGET_SIGFPE
;
3003 info
.si_code
= TARGET_FPE_INTDIV
;
3004 info
._sifields
._sigfault
._addr
= env
->pc
;
3005 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3010 ts
->sim_syscalls
= 0;
3013 ret
= do_syscall(env
,
3022 if (ret
== -TARGET_ERESTARTSYS
) {
3024 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3025 env
->dregs
[0] = ret
;
3029 case EXCP_INTERRUPT
:
3030 /* just indicate that signals should be handled asap */
3034 info
.si_signo
= TARGET_SIGSEGV
;
3036 /* XXX: check env->error_code */
3037 info
.si_code
= TARGET_SEGV_MAPERR
;
3038 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3039 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3046 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3049 info
.si_signo
= sig
;
3051 info
.si_code
= TARGET_TRAP_BRKPT
;
3052 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3057 cpu_exec_step_atomic(cs
);
3060 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3063 process_pending_signals(env
);
3066 #endif /* TARGET_M68K */
3069 void cpu_loop(CPUAlphaState
*env
)
3071 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3073 target_siginfo_t info
;
3077 bool arch_interrupt
= true;
3080 trapnr
= cpu_exec(cs
);
3082 process_queued_cpu_work(cs
);
3086 fprintf(stderr
, "Reset requested. Exit\n");
3090 fprintf(stderr
, "Machine check exception. Exit\n");
3093 case EXCP_SMP_INTERRUPT
:
3094 case EXCP_CLK_INTERRUPT
:
3095 case EXCP_DEV_INTERRUPT
:
3096 fprintf(stderr
, "External interrupt. Exit\n");
3100 info
.si_signo
= TARGET_SIGSEGV
;
3102 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3103 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3104 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3105 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3108 info
.si_signo
= TARGET_SIGBUS
;
3110 info
.si_code
= TARGET_BUS_ADRALN
;
3111 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3112 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3116 info
.si_signo
= TARGET_SIGILL
;
3118 info
.si_code
= TARGET_ILL_ILLOPC
;
3119 info
._sifields
._sigfault
._addr
= env
->pc
;
3120 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3123 info
.si_signo
= TARGET_SIGFPE
;
3125 info
.si_code
= TARGET_FPE_FLTINV
;
3126 info
._sifields
._sigfault
._addr
= env
->pc
;
3127 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3130 /* No-op. Linux simply re-enables the FPU. */
3133 switch (env
->error_code
) {
3136 info
.si_signo
= TARGET_SIGTRAP
;
3138 info
.si_code
= TARGET_TRAP_BRKPT
;
3139 info
._sifields
._sigfault
._addr
= env
->pc
;
3140 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3144 info
.si_signo
= TARGET_SIGTRAP
;
3147 info
._sifields
._sigfault
._addr
= env
->pc
;
3148 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3152 trapnr
= env
->ir
[IR_V0
];
3153 sysret
= do_syscall(env
, trapnr
,
3154 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3155 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3156 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3158 if (sysret
== -TARGET_ERESTARTSYS
) {
3162 if (sysret
== -TARGET_QEMU_ESIGRETURN
) {
3165 /* Syscall writes 0 to V0 to bypass error check, similar
3166 to how this is handled internal to Linux kernel.
3167 (Ab)use trapnr temporarily as boolean indicating error. */
3168 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3169 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3170 env
->ir
[IR_A3
] = trapnr
;
3174 /* ??? We can probably elide the code using page_unprotect
3175 that is checking for self-modifying code. Instead we
3176 could simply call tb_flush here. Until we work out the
3177 changes required to turn off the extra write protection,
3178 this can be a no-op. */
3182 /* Handled in the translator for usermode. */
3186 /* Handled in the translator for usermode. */
3190 info
.si_signo
= TARGET_SIGFPE
;
3191 switch (env
->ir
[IR_A0
]) {
3192 case TARGET_GEN_INTOVF
:
3193 info
.si_code
= TARGET_FPE_INTOVF
;
3195 case TARGET_GEN_INTDIV
:
3196 info
.si_code
= TARGET_FPE_INTDIV
;
3198 case TARGET_GEN_FLTOVF
:
3199 info
.si_code
= TARGET_FPE_FLTOVF
;
3201 case TARGET_GEN_FLTUND
:
3202 info
.si_code
= TARGET_FPE_FLTUND
;
3204 case TARGET_GEN_FLTINV
:
3205 info
.si_code
= TARGET_FPE_FLTINV
;
3207 case TARGET_GEN_FLTINE
:
3208 info
.si_code
= TARGET_FPE_FLTRES
;
3210 case TARGET_GEN_ROPRAND
:
3214 info
.si_signo
= TARGET_SIGTRAP
;
3219 info
._sifields
._sigfault
._addr
= env
->pc
;
3220 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3227 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3228 if (info
.si_signo
) {
3230 info
.si_code
= TARGET_TRAP_BRKPT
;
3231 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3233 arch_interrupt
= false;
3236 case EXCP_INTERRUPT
:
3237 /* Just indicate that signals should be handled asap. */
3240 cpu_exec_step_atomic(cs
);
3241 arch_interrupt
= false;
3244 printf ("Unhandled trap: 0x%x\n", trapnr
);
3245 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3248 process_pending_signals (env
);
3250 /* Most of the traps imply a transition through PALcode, which
3251 implies an REI instruction has been executed. Which means
3252 that RX and LOCK_ADDR should be cleared. But there are a
3253 few exceptions for traps internal to QEMU. */
3254 if (arch_interrupt
) {
3255 env
->flags
&= ~ENV_FLAG_RX_FLAG
;
3256 env
->lock_addr
= -1;
3260 #endif /* TARGET_ALPHA */
3264 /* s390x masks the fault address it reports in si_addr for SIGSEGV and SIGBUS */
3265 #define S390X_FAIL_ADDR_MASK -4096LL
3267 void cpu_loop(CPUS390XState
*env
)
3269 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3271 target_siginfo_t info
;
3277 trapnr
= cpu_exec(cs
);
3279 process_queued_cpu_work(cs
);
3282 case EXCP_INTERRUPT
:
3283 /* Just indicate that signals should be handled asap. */
3287 n
= env
->int_svc_code
;
3289 /* syscalls > 255 */
3292 env
->psw
.addr
+= env
->int_svc_ilen
;
3293 ret
= do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3294 env
->regs
[4], env
->regs
[5],
3295 env
->regs
[6], env
->regs
[7], 0, 0);
3296 if (ret
== -TARGET_ERESTARTSYS
) {
3297 env
->psw
.addr
-= env
->int_svc_ilen
;
3298 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3304 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3306 n
= TARGET_TRAP_BRKPT
;
3311 n
= env
->int_pgm_code
;
3314 case PGM_PRIVILEGED
:
3315 sig
= TARGET_SIGILL
;
3316 n
= TARGET_ILL_ILLOPC
;
3318 case PGM_PROTECTION
:
3319 case PGM_ADDRESSING
:
3320 sig
= TARGET_SIGSEGV
;
3321 /* XXX: check env->error_code */
3322 n
= TARGET_SEGV_MAPERR
;
3323 addr
= env
->__excp_addr
& S390X_FAIL_ADDR_MASK
;
3326 case PGM_SPECIFICATION
:
3327 case PGM_SPECIAL_OP
:
3330 sig
= TARGET_SIGILL
;
3331 n
= TARGET_ILL_ILLOPN
;
3334 case PGM_FIXPT_OVERFLOW
:
3335 sig
= TARGET_SIGFPE
;
3336 n
= TARGET_FPE_INTOVF
;
3338 case PGM_FIXPT_DIVIDE
:
3339 sig
= TARGET_SIGFPE
;
3340 n
= TARGET_FPE_INTDIV
;
3344 n
= (env
->fpc
>> 8) & 0xff;
3346 /* compare-and-trap */
3349 /* An IEEE exception, simulated or otherwise. */
3351 n
= TARGET_FPE_FLTINV
;
3352 } else if (n
& 0x40) {
3353 n
= TARGET_FPE_FLTDIV
;
3354 } else if (n
& 0x20) {
3355 n
= TARGET_FPE_FLTOVF
;
3356 } else if (n
& 0x10) {
3357 n
= TARGET_FPE_FLTUND
;
3358 } else if (n
& 0x08) {
3359 n
= TARGET_FPE_FLTRES
;
3361 /* ??? Quantum exception; BFP, DFP error. */
3364 sig
= TARGET_SIGFPE
;
3369 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3370 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3376 addr
= env
->psw
.addr
;
3378 info
.si_signo
= sig
;
3381 info
._sifields
._sigfault
._addr
= addr
;
3382 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3386 cpu_exec_step_atomic(cs
);
3389 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3390 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3393 process_pending_signals (env
);
3397 #endif /* TARGET_S390X */
3399 #ifdef TARGET_TILEGX
3401 static void gen_sigill_reg(CPUTLGState
*env
)
3403 target_siginfo_t info
;
3405 info
.si_signo
= TARGET_SIGILL
;
3407 info
.si_code
= TARGET_ILL_PRVREG
;
3408 info
._sifields
._sigfault
._addr
= env
->pc
;
3409 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3412 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3414 target_siginfo_t info
;
3416 info
.si_signo
= signo
;
3418 info
._sifields
._sigfault
._addr
= env
->pc
;
3420 if (signo
== TARGET_SIGSEGV
) {
3421 /* The passed in sigcode is a dummy; check for a page mapping
3422 and pass either MAPERR or ACCERR. */
3423 target_ulong addr
= env
->excaddr
;
3424 info
._sifields
._sigfault
._addr
= addr
;
3425 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3426 sigcode
= TARGET_SEGV_MAPERR
;
3428 sigcode
= TARGET_SEGV_ACCERR
;
3431 info
.si_code
= sigcode
;
3433 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3436 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3438 env
->excaddr
= addr
;
3439 do_signal(env
, TARGET_SIGSEGV
, 0);
3442 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3444 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3455 gen_sigill_reg(env
);
3458 g_assert_not_reached();
3461 env
->regs
[reg
] = val
;
3465 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3466 * memory at the address held in the first source register. If the values are
3467 * not equal, then no memory operation is performed. If the values are equal,
3468 * the 8-byte quantity from the second source register is written into memory
3469 * at the address held in the first source register. In either case, the result
3470 * of the instruction is the value read from memory. The compare and write to
3471 * memory are atomic and thus can be used for synchronization purposes. This
3472 * instruction only operates for addresses aligned to a 8-byte boundary.
3473 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3475 * Functional Description (64-bit)
3476 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3477 * rf[Dest] = memVal;
3478 * if (memVal == SPR[CmpValueSPR])
3479 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3481 * Functional Description (32-bit)
3482 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3483 * rf[Dest] = memVal;
3484 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3485 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3488 * This function also processes exch and exch4 which need not process SPR.
3490 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3493 target_long val
, sprval
;
3497 addr
= env
->atomic_srca
;
3498 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3499 goto sigsegv_maperr
;
3504 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3506 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3510 if (!cmp
|| val
== sprval
) {
3511 target_long valb
= env
->atomic_srcb
;
3512 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3513 goto sigsegv_maperr
;
3517 set_regval(env
, env
->atomic_dstr
, val
);
3523 gen_sigsegv_maperr(env
, addr
);
3526 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3530 target_long val
, valb
;
3534 addr
= env
->atomic_srca
;
3535 valb
= env
->atomic_srcb
;
3536 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3537 goto sigsegv_maperr
;
3541 case TILEGX_EXCP_OPCODE_FETCHADD
:
3542 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3545 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3551 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3553 if ((int32_t)valb
< 0) {
3557 case TILEGX_EXCP_OPCODE_FETCHAND
:
3558 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3561 case TILEGX_EXCP_OPCODE_FETCHOR
:
3562 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3566 g_assert_not_reached();
3570 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3571 goto sigsegv_maperr
;
3575 set_regval(env
, env
->atomic_dstr
, val
);
3581 gen_sigsegv_maperr(env
, addr
);
3584 void cpu_loop(CPUTLGState
*env
)
3586 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3591 trapnr
= cpu_exec(cs
);
3593 process_queued_cpu_work(cs
);
3596 case TILEGX_EXCP_SYSCALL
:
3598 abi_ulong ret
= do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3599 env
->regs
[0], env
->regs
[1],
3600 env
->regs
[2], env
->regs
[3],
3601 env
->regs
[4], env
->regs
[5],
3602 env
->regs
[6], env
->regs
[7]);
3603 if (ret
== -TARGET_ERESTARTSYS
) {
3605 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3606 env
->regs
[TILEGX_R_RE
] = ret
;
3607 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(ret
) ? -ret
: 0;
3611 case TILEGX_EXCP_OPCODE_EXCH
:
3612 do_exch(env
, true, false);
3614 case TILEGX_EXCP_OPCODE_EXCH4
:
3615 do_exch(env
, false, false);
3617 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3618 do_exch(env
, true, true);
3620 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3621 do_exch(env
, false, true);
3623 case TILEGX_EXCP_OPCODE_FETCHADD
:
3624 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3625 case TILEGX_EXCP_OPCODE_FETCHAND
:
3626 case TILEGX_EXCP_OPCODE_FETCHOR
:
3627 do_fetch(env
, trapnr
, true);
3629 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3630 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3631 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3632 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3633 do_fetch(env
, trapnr
, false);
3635 case TILEGX_EXCP_SIGNAL
:
3636 do_signal(env
, env
->signo
, env
->sigcode
);
3638 case TILEGX_EXCP_REG_IDN_ACCESS
:
3639 case TILEGX_EXCP_REG_UDN_ACCESS
:
3640 gen_sigill_reg(env
);
3643 cpu_exec_step_atomic(cs
);
3646 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3647 g_assert_not_reached();
3649 process_pending_signals(env
);
3657 static abi_ulong
hppa_lws(CPUHPPAState
*env
)
3659 uint32_t which
= env
->gr
[20];
3660 abi_ulong addr
= env
->gr
[26];
3661 abi_ulong old
= env
->gr
[25];
3662 abi_ulong
new = env
->gr
[24];
3663 abi_ulong size
, ret
;
3667 return -TARGET_ENOSYS
;
3669 case 0: /* elf32 atomic 32bit cmpxchg */
3670 if ((addr
& 3) || !access_ok(VERIFY_WRITE
, addr
, 4)) {
3671 return -TARGET_EFAULT
;
3675 ret
= atomic_cmpxchg((uint32_t *)g2h(addr
), old
, new);
3679 case 2: /* elf32 atomic "new" cmpxchg */
3682 return -TARGET_ENOSYS
;
3684 if (((addr
| old
| new) & ((1 << size
) - 1))
3685 || !access_ok(VERIFY_WRITE
, addr
, 1 << size
)
3686 || !access_ok(VERIFY_READ
, old
, 1 << size
)
3687 || !access_ok(VERIFY_READ
, new, 1 << size
)) {
3688 return -TARGET_EFAULT
;
3690 /* Note that below we use host-endian loads so that the cmpxchg
3691 can be host-endian as well. */
3694 old
= *(uint8_t *)g2h(old
);
3695 new = *(uint8_t *)g2h(new);
3696 ret
= atomic_cmpxchg((uint8_t *)g2h(addr
), old
, new);
3700 old
= *(uint16_t *)g2h(old
);
3701 new = *(uint16_t *)g2h(new);
3702 ret
= atomic_cmpxchg((uint16_t *)g2h(addr
), old
, new);
3706 old
= *(uint32_t *)g2h(old
);
3707 new = *(uint32_t *)g2h(new);
3708 ret
= atomic_cmpxchg((uint32_t *)g2h(addr
), old
, new);
3713 uint64_t o64
, n64
, r64
;
3714 o64
= *(uint64_t *)g2h(old
);
3715 n64
= *(uint64_t *)g2h(new);
3716 #ifdef CONFIG_ATOMIC64
3717 r64
= atomic_cmpxchg__nocheck((uint64_t *)g2h(addr
), o64
, n64
);
3721 r64
= *(uint64_t *)g2h(addr
);
3724 *(uint64_t *)g2h(addr
) = n64
;
3739 void cpu_loop(CPUHPPAState
*env
)
3741 CPUState
*cs
= CPU(hppa_env_get_cpu(env
));
3742 target_siginfo_t info
;
3748 trapnr
= cpu_exec(cs
);
3750 process_queued_cpu_work(cs
);
3754 ret
= do_syscall(env
, env
->gr
[20],
3755 env
->gr
[26], env
->gr
[25],
3756 env
->gr
[24], env
->gr
[23],
3757 env
->gr
[22], env
->gr
[21], 0, 0);
3761 /* We arrived here by faking the gateway page. Return. */
3762 env
->iaoq_f
= env
->gr
[31];
3763 env
->iaoq_b
= env
->gr
[31] + 4;
3765 case -TARGET_ERESTARTSYS
:
3766 case -TARGET_QEMU_ESIGRETURN
:
3770 case EXCP_SYSCALL_LWS
:
3771 env
->gr
[21] = hppa_lws(env
);
3772 /* We arrived here by faking the gateway page. Return. */
3773 env
->iaoq_f
= env
->gr
[31];
3774 env
->iaoq_b
= env
->gr
[31] + 4;
3776 case EXCP_ITLB_MISS
:
3777 case EXCP_DTLB_MISS
:
3778 case EXCP_NA_ITLB_MISS
:
3779 case EXCP_NA_DTLB_MISS
:
3786 info
.si_signo
= TARGET_SIGSEGV
;
3788 info
.si_code
= TARGET_SEGV_ACCERR
;
3789 info
._sifields
._sigfault
._addr
= env
->cr
[CR_IOR
];
3790 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3793 info
.si_signo
= TARGET_SIGBUS
;
3796 info
._sifields
._sigfault
._addr
= env
->cr
[CR_IOR
];
3797 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3802 info
.si_signo
= TARGET_SIGILL
;
3804 info
.si_code
= TARGET_ILL_ILLOPN
;
3805 info
._sifields
._sigfault
._addr
= env
->iaoq_f
;
3806 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3811 info
.si_signo
= TARGET_SIGFPE
;
3814 info
._sifields
._sigfault
._addr
= env
->iaoq_f
;
3815 queue_signal(env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
3818 trapnr
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3820 info
.si_signo
= trapnr
;
3822 info
.si_code
= TARGET_TRAP_BRKPT
;
3823 queue_signal(env
, trapnr
, QEMU_SI_FAULT
, &info
);
3826 case EXCP_INTERRUPT
:
3827 /* just indicate that signals should be handled asap */
3830 g_assert_not_reached();
3832 process_pending_signals(env
);
3836 #endif /* TARGET_HPPA */
3838 THREAD CPUState
*thread_cpu
;
3840 bool qemu_cpu_is_self(CPUState
*cpu
)
3842 return thread_cpu
== cpu
;
3845 void qemu_cpu_kick(CPUState
*cpu
)
3850 void task_settid(TaskState
*ts
)
3852 if (ts
->ts_tid
== 0) {
3853 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3857 void stop_all_tasks(void)
3860 * We trust that when using NPTL, start_exclusive()
3861 * handles thread stopping correctly.
3866 /* Assumes contents are already zeroed. */
3867 void init_task_state(TaskState
*ts
)
3872 CPUArchState
*cpu_copy(CPUArchState
*env
)
3874 CPUState
*cpu
= ENV_GET_CPU(env
);
3875 CPUState
*new_cpu
= cpu_init(cpu_model
);
3876 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3880 /* Reset non arch specific state */
3883 memcpy(new_env
, env
, sizeof(CPUArchState
));
3885 /* Clone all break/watchpoints.
3886 Note: Once we support ptrace with hw-debug register access, make sure
3887 BP_CPU break/watchpoints are handled correctly on clone. */
3888 QTAILQ_INIT(&new_cpu
->breakpoints
);
3889 QTAILQ_INIT(&new_cpu
->watchpoints
);
3890 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3891 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3893 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3894 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3900 static void handle_arg_help(const char *arg
)
3902 usage(EXIT_SUCCESS
);
3905 static void handle_arg_log(const char *arg
)
3909 mask
= qemu_str_to_log_mask(arg
);
3911 qemu_print_log_usage(stdout
);
3914 qemu_log_needs_buffers();
3918 static void handle_arg_dfilter(const char *arg
)
3920 qemu_set_dfilter_ranges(arg
, NULL
);
3923 static void handle_arg_log_filename(const char *arg
)
3925 qemu_set_log_filename(arg
, &error_fatal
);
3928 static void handle_arg_set_env(const char *arg
)
3930 char *r
, *p
, *token
;
3931 r
= p
= strdup(arg
);
3932 while ((token
= strsep(&p
, ",")) != NULL
) {
3933 if (envlist_setenv(envlist
, token
) != 0) {
3934 usage(EXIT_FAILURE
);
3940 static void handle_arg_unset_env(const char *arg
)
3942 char *r
, *p
, *token
;
3943 r
= p
= strdup(arg
);
3944 while ((token
= strsep(&p
, ",")) != NULL
) {
3945 if (envlist_unsetenv(envlist
, token
) != 0) {
3946 usage(EXIT_FAILURE
);
3952 static void handle_arg_argv0(const char *arg
)
3954 argv0
= strdup(arg
);
3957 static void handle_arg_stack_size(const char *arg
)
3960 guest_stack_size
= strtoul(arg
, &p
, 0);
3961 if (guest_stack_size
== 0) {
3962 usage(EXIT_FAILURE
);
3966 guest_stack_size
*= 1024 * 1024;
3967 } else if (*p
== 'k' || *p
== 'K') {
3968 guest_stack_size
*= 1024;
3972 static void handle_arg_ld_prefix(const char *arg
)
3974 interp_prefix
= strdup(arg
);
3977 static void handle_arg_pagesize(const char *arg
)
3979 qemu_host_page_size
= atoi(arg
);
3980 if (qemu_host_page_size
== 0 ||
3981 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3982 fprintf(stderr
, "page size must be a power of two\n");
3987 static void handle_arg_randseed(const char *arg
)
3989 unsigned long long seed
;
3991 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3992 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3998 static void handle_arg_gdb(const char *arg
)
4000 gdbstub_port
= atoi(arg
);
4003 static void handle_arg_uname(const char *arg
)
4005 qemu_uname_release
= strdup(arg
);
4008 static void handle_arg_cpu(const char *arg
)
4010 cpu_model
= strdup(arg
);
4011 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
4012 /* XXX: implement xxx_cpu_list for targets that still miss it */
4013 #if defined(cpu_list)
4014 cpu_list(stdout
, &fprintf
);
4020 static void handle_arg_guest_base(const char *arg
)
4022 guest_base
= strtol(arg
, NULL
, 0);
4023 have_guest_base
= 1;
4026 static void handle_arg_reserved_va(const char *arg
)
4030 reserved_va
= strtoul(arg
, &p
, 0);
4044 unsigned long unshifted
= reserved_va
;
4046 reserved_va
<<= shift
;
4047 if (reserved_va
>> shift
!= unshifted
4048 || (MAX_RESERVED_VA
&& reserved_va
> MAX_RESERVED_VA
)) {
4049 fprintf(stderr
, "Reserved virtual address too big\n");
4054 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
4059 static void handle_arg_singlestep(const char *arg
)
4064 static void handle_arg_strace(const char *arg
)
4069 static void handle_arg_version(const char *arg
)
4071 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
4072 "\n" QEMU_COPYRIGHT
"\n");
4076 static char *trace_file
;
4077 static void handle_arg_trace(const char *arg
)
4080 trace_file
= trace_opt_parse(arg
);
4083 struct qemu_argument
{
4087 void (*handle_opt
)(const char *arg
);
4088 const char *example
;
4092 static const struct qemu_argument arg_table
[] = {
4093 {"h", "", false, handle_arg_help
,
4094 "", "print this help"},
4095 {"help", "", false, handle_arg_help
,
4097 {"g", "QEMU_GDB", true, handle_arg_gdb
,
4098 "port", "wait gdb connection to 'port'"},
4099 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
4100 "path", "set the elf interpreter prefix to 'path'"},
4101 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
4102 "size", "set the stack size to 'size' bytes"},
4103 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
4104 "model", "select CPU (-cpu help for list)"},
4105 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
4106 "var=value", "sets targets environment variable (see below)"},
4107 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
4108 "var", "unsets targets environment variable (see below)"},
4109 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
4110 "argv0", "forces target process argv[0] to be 'argv0'"},
4111 {"r", "QEMU_UNAME", true, handle_arg_uname
,
4112 "uname", "set qemu uname release string to 'uname'"},
4113 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
4114 "address", "set guest_base address to 'address'"},
4115 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
4116 "size", "reserve 'size' bytes for guest virtual address space"},
4117 {"d", "QEMU_LOG", true, handle_arg_log
,
4118 "item[,...]", "enable logging of specified items "
4119 "(use '-d help' for a list of items)"},
4120 {"dfilter", "QEMU_DFILTER", true, handle_arg_dfilter
,
4121 "range[,...]","filter logging based on address range"},
4122 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
4123 "logfile", "write logs to 'logfile' (default stderr)"},
4124 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
4125 "pagesize", "set the host page size to 'pagesize'"},
4126 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
4127 "", "run in singlestep mode"},
4128 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
4129 "", "log system calls"},
4130 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
4131 "", "Seed for pseudo-random number generator"},
4132 {"trace", "QEMU_TRACE", true, handle_arg_trace
,
4133 "", "[[enable=]<pattern>][,events=<file>][,file=<file>]"},
4134 {"version", "QEMU_VERSION", false, handle_arg_version
,
4135 "", "display version information and exit"},
4136 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
4139 static void usage(int exitcode
)
4141 const struct qemu_argument
*arginfo
;
4145 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
4146 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
4148 "Options and associated environment variables:\n"
4151 /* Calculate column widths. We must always have at least enough space
4152 * for the column header.
4154 maxarglen
= strlen("Argument");
4155 maxenvlen
= strlen("Env-variable");
4157 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4158 int arglen
= strlen(arginfo
->argv
);
4159 if (arginfo
->has_arg
) {
4160 arglen
+= strlen(arginfo
->example
) + 1;
4162 if (strlen(arginfo
->env
) > maxenvlen
) {
4163 maxenvlen
= strlen(arginfo
->env
);
4165 if (arglen
> maxarglen
) {
4170 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4171 maxenvlen
, "Env-variable");
4173 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4174 if (arginfo
->has_arg
) {
4175 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4176 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4177 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4179 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4180 maxenvlen
, arginfo
->env
,
4187 "QEMU_LD_PREFIX = %s\n"
4188 "QEMU_STACK_SIZE = %ld byte\n",
4193 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4194 "QEMU_UNSET_ENV environment variables to set and unset\n"
4195 "environment variables for the target process.\n"
4196 "It is possible to provide several variables by separating them\n"
4197 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4198 "provide the -E and -U options multiple times.\n"
4199 "The following lines are equivalent:\n"
4200 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4201 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4202 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4203 "Note that if you provide several changes to a single variable\n"
4204 "the last change will stay in effect.\n"
4206 QEMU_HELP_BOTTOM
"\n");
4211 static int parse_args(int argc
, char **argv
)
4215 const struct qemu_argument
*arginfo
;
4217 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4218 if (arginfo
->env
== NULL
) {
4222 r
= getenv(arginfo
->env
);
4224 arginfo
->handle_opt(r
);
4230 if (optind
>= argc
) {
4239 if (!strcmp(r
, "-")) {
4242 /* Treat --foo the same as -foo. */
4247 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4248 if (!strcmp(r
, arginfo
->argv
)) {
4249 if (arginfo
->has_arg
) {
4250 if (optind
>= argc
) {
4251 (void) fprintf(stderr
,
4252 "qemu: missing argument for option '%s'\n", r
);
4255 arginfo
->handle_opt(argv
[optind
]);
4258 arginfo
->handle_opt(NULL
);
4264 /* no option matched the current argv */
4265 if (arginfo
->handle_opt
== NULL
) {
4266 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4271 if (optind
>= argc
) {
4272 (void) fprintf(stderr
, "qemu: no user program specified\n");
4276 filename
= argv
[optind
];
4277 exec_path
= argv
[optind
];
4282 int main(int argc
, char **argv
, char **envp
)
4284 struct target_pt_regs regs1
, *regs
= ®s1
;
4285 struct image_info info1
, *info
= &info1
;
4286 struct linux_binprm bprm
;
4291 char **target_environ
, **wrk
;
4298 module_call_init(MODULE_INIT_TRACE
);
4299 qemu_init_cpu_list();
4300 module_call_init(MODULE_INIT_QOM
);
4302 envlist
= envlist_create();
4304 /* add current environment into the list */
4305 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4306 (void) envlist_setenv(envlist
, *wrk
);
4309 /* Read the stack limit from the kernel. If it's "unlimited",
4310 then we can do little else besides use the default. */
4313 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4314 && lim
.rlim_cur
!= RLIM_INFINITY
4315 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4316 guest_stack_size
= lim
.rlim_cur
;
4324 qemu_add_opts(&qemu_trace_opts
);
4326 optind
= parse_args(argc
, argv
);
4328 if (!trace_init_backends()) {
4331 trace_init_file(trace_file
);
4334 memset(regs
, 0, sizeof(struct target_pt_regs
));
4336 /* Zero out image_info */
4337 memset(info
, 0, sizeof(struct image_info
));
4339 memset(&bprm
, 0, sizeof (bprm
));
4341 /* Scan interp_prefix dir for replacement files. */
4342 init_paths(interp_prefix
);
4344 init_qemu_uname_release();
4346 if (cpu_model
== NULL
) {
4347 #if defined(TARGET_I386)
4348 #ifdef TARGET_X86_64
4349 cpu_model
= "qemu64";
4351 cpu_model
= "qemu32";
4353 #elif defined(TARGET_ARM)
4355 #elif defined(TARGET_UNICORE32)
4357 #elif defined(TARGET_M68K)
4359 #elif defined(TARGET_SPARC)
4360 #ifdef TARGET_SPARC64
4361 cpu_model
= "TI UltraSparc II";
4363 cpu_model
= "Fujitsu MB86904";
4365 #elif defined(TARGET_MIPS)
4366 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4371 #elif defined TARGET_OPENRISC
4372 cpu_model
= "or1200";
4373 #elif defined(TARGET_PPC)
4374 # ifdef TARGET_PPC64
4375 cpu_model
= "POWER8";
4379 #elif defined TARGET_SH4
4380 cpu_model
= "sh7785";
4381 #elif defined TARGET_S390X
4388 /* NOTE: we need to init the CPU at this stage to get
4389 qemu_host_page_size */
4390 cpu
= cpu_init(cpu_model
);
4396 if (getenv("QEMU_STRACE")) {
4400 if (getenv("QEMU_RAND_SEED")) {
4401 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4404 target_environ
= envlist_to_environ(envlist
, NULL
);
4405 envlist_free(envlist
);
4408 * Now that page sizes are configured in cpu_init() we can do
4409 * proper page alignment for guest_base.
4411 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4413 if (reserved_va
|| have_guest_base
) {
4414 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4416 if (guest_base
== (unsigned long)-1) {
4417 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4418 "space for use as guest address space (check your virtual "
4419 "memory ulimit setting or reserve less using -R option)\n",
4425 mmap_next_start
= reserved_va
;
4430 * Read in mmap_min_addr kernel parameter. This value is used
4431 * When loading the ELF image to determine whether guest_base
4432 * is needed. It is also used in mmap_find_vma.
4437 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4439 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4440 mmap_min_addr
= tmp
;
4441 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4448 * Prepare copy of argv vector for target.
4450 target_argc
= argc
- optind
;
4451 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4452 if (target_argv
== NULL
) {
4453 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4458 * If argv0 is specified (using '-0' switch) we replace
4459 * argv[0] pointer with the given one.
4462 if (argv0
!= NULL
) {
4463 target_argv
[i
++] = strdup(argv0
);
4465 for (; i
< target_argc
; i
++) {
4466 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4468 target_argv
[target_argc
] = NULL
;
4470 ts
= g_new0(TaskState
, 1);
4471 init_task_state(ts
);
4472 /* build Task State */
4478 execfd
= qemu_getauxval(AT_EXECFD
);
4480 execfd
= open(filename
, O_RDONLY
);
4482 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4483 _exit(EXIT_FAILURE
);
4487 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4490 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4491 _exit(EXIT_FAILURE
);
4494 for (wrk
= target_environ
; *wrk
; wrk
++) {
4498 g_free(target_environ
);
4500 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4501 qemu_log("guest_base 0x%lx\n", guest_base
);
4504 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4505 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4506 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n", info
->start_code
);
4507 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n", info
->start_data
);
4508 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4509 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n", info
->start_stack
);
4510 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4511 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4512 qemu_log("argv_start 0x" TARGET_ABI_FMT_lx
"\n", info
->arg_start
);
4513 qemu_log("env_start 0x" TARGET_ABI_FMT_lx
"\n",
4514 info
->arg_end
+ (abi_ulong
)sizeof(abi_ulong
));
4515 qemu_log("auxv_start 0x" TARGET_ABI_FMT_lx
"\n", info
->saved_auxv
);
4518 target_set_brk(info
->brk
);
4522 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4523 generating the prologue until now so that the prologue can take
4524 the real value of GUEST_BASE into account. */
4525 tcg_prologue_init(tcg_ctx
);
4528 #if defined(TARGET_I386)
4529 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4530 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4531 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4532 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4533 env
->hflags
|= HF_OSFXSR_MASK
;
4535 #ifndef TARGET_ABI32
4536 /* enable 64 bit mode if possible */
4537 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4538 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4541 env
->cr
[4] |= CR4_PAE_MASK
;
4542 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4543 env
->hflags
|= HF_LMA_MASK
;
4546 /* flags setup : we activate the IRQs by default as in user mode */
4547 env
->eflags
|= IF_MASK
;
4549 /* linux register setup */
4550 #ifndef TARGET_ABI32
4551 env
->regs
[R_EAX
] = regs
->rax
;
4552 env
->regs
[R_EBX
] = regs
->rbx
;
4553 env
->regs
[R_ECX
] = regs
->rcx
;
4554 env
->regs
[R_EDX
] = regs
->rdx
;
4555 env
->regs
[R_ESI
] = regs
->rsi
;
4556 env
->regs
[R_EDI
] = regs
->rdi
;
4557 env
->regs
[R_EBP
] = regs
->rbp
;
4558 env
->regs
[R_ESP
] = regs
->rsp
;
4559 env
->eip
= regs
->rip
;
4561 env
->regs
[R_EAX
] = regs
->eax
;
4562 env
->regs
[R_EBX
] = regs
->ebx
;
4563 env
->regs
[R_ECX
] = regs
->ecx
;
4564 env
->regs
[R_EDX
] = regs
->edx
;
4565 env
->regs
[R_ESI
] = regs
->esi
;
4566 env
->regs
[R_EDI
] = regs
->edi
;
4567 env
->regs
[R_EBP
] = regs
->ebp
;
4568 env
->regs
[R_ESP
] = regs
->esp
;
4569 env
->eip
= regs
->eip
;
4572 /* linux interrupt setup */
4573 #ifndef TARGET_ABI32
4574 env
->idt
.limit
= 511;
4576 env
->idt
.limit
= 255;
4578 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4579 PROT_READ
|PROT_WRITE
,
4580 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4581 idt_table
= g2h(env
->idt
.base
);
4604 /* linux segment setup */
4606 uint64_t *gdt_table
;
4607 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4608 PROT_READ
|PROT_WRITE
,
4609 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4610 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4611 gdt_table
= g2h(env
->gdt
.base
);
4613 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4614 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4615 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4617 /* 64 bit code segment */
4618 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4619 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4621 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4623 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4624 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4625 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4627 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4628 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4630 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4631 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4632 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4633 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4634 /* This hack makes Wine work... */
4635 env
->segs
[R_FS
].selector
= 0;
4637 cpu_x86_load_seg(env
, R_DS
, 0);
4638 cpu_x86_load_seg(env
, R_ES
, 0);
4639 cpu_x86_load_seg(env
, R_FS
, 0);
4640 cpu_x86_load_seg(env
, R_GS
, 0);
4642 #elif defined(TARGET_AARCH64)
4646 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4648 "The selected ARM CPU does not support 64 bit mode\n");
4652 for (i
= 0; i
< 31; i
++) {
4653 env
->xregs
[i
] = regs
->regs
[i
];
4656 env
->xregs
[31] = regs
->sp
;
4657 #ifdef TARGET_WORDS_BIGENDIAN
4658 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4659 for (i
= 1; i
< 4; ++i
) {
4660 env
->cp15
.sctlr_el
[i
] |= SCTLR_EE
;
4664 #elif defined(TARGET_ARM)
4667 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4669 for(i
= 0; i
< 16; i
++) {
4670 env
->regs
[i
] = regs
->uregs
[i
];
4672 #ifdef TARGET_WORDS_BIGENDIAN
4674 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4675 && (info
->elf_flags
& EF_ARM_BE8
)) {
4676 env
->uncached_cpsr
|= CPSR_E
;
4677 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4679 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4683 #elif defined(TARGET_UNICORE32)
4686 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4687 for (i
= 0; i
< 32; i
++) {
4688 env
->regs
[i
] = regs
->uregs
[i
];
4691 #elif defined(TARGET_SPARC)
4695 env
->npc
= regs
->npc
;
4697 for(i
= 0; i
< 8; i
++)
4698 env
->gregs
[i
] = regs
->u_regs
[i
];
4699 for(i
= 0; i
< 8; i
++)
4700 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4702 #elif defined(TARGET_PPC)
4706 #if defined(TARGET_PPC64)
4707 int flag
= (env
->insns_flags2
& PPC2_BOOKE206
) ? MSR_CM
: MSR_SF
;
4708 #if defined(TARGET_ABI32)
4709 env
->msr
&= ~((target_ulong
)1 << flag
);
4711 env
->msr
|= (target_ulong
)1 << flag
;
4714 env
->nip
= regs
->nip
;
4715 for(i
= 0; i
< 32; i
++) {
4716 env
->gpr
[i
] = regs
->gpr
[i
];
4719 #elif defined(TARGET_M68K)
4722 env
->dregs
[0] = regs
->d0
;
4723 env
->dregs
[1] = regs
->d1
;
4724 env
->dregs
[2] = regs
->d2
;
4725 env
->dregs
[3] = regs
->d3
;
4726 env
->dregs
[4] = regs
->d4
;
4727 env
->dregs
[5] = regs
->d5
;
4728 env
->dregs
[6] = regs
->d6
;
4729 env
->dregs
[7] = regs
->d7
;
4730 env
->aregs
[0] = regs
->a0
;
4731 env
->aregs
[1] = regs
->a1
;
4732 env
->aregs
[2] = regs
->a2
;
4733 env
->aregs
[3] = regs
->a3
;
4734 env
->aregs
[4] = regs
->a4
;
4735 env
->aregs
[5] = regs
->a5
;
4736 env
->aregs
[6] = regs
->a6
;
4737 env
->aregs
[7] = regs
->usp
;
4739 ts
->sim_syscalls
= 1;
4741 #elif defined(TARGET_MICROBLAZE)
4743 env
->regs
[0] = regs
->r0
;
4744 env
->regs
[1] = regs
->r1
;
4745 env
->regs
[2] = regs
->r2
;
4746 env
->regs
[3] = regs
->r3
;
4747 env
->regs
[4] = regs
->r4
;
4748 env
->regs
[5] = regs
->r5
;
4749 env
->regs
[6] = regs
->r6
;
4750 env
->regs
[7] = regs
->r7
;
4751 env
->regs
[8] = regs
->r8
;
4752 env
->regs
[9] = regs
->r9
;
4753 env
->regs
[10] = regs
->r10
;
4754 env
->regs
[11] = regs
->r11
;
4755 env
->regs
[12] = regs
->r12
;
4756 env
->regs
[13] = regs
->r13
;
4757 env
->regs
[14] = regs
->r14
;
4758 env
->regs
[15] = regs
->r15
;
4759 env
->regs
[16] = regs
->r16
;
4760 env
->regs
[17] = regs
->r17
;
4761 env
->regs
[18] = regs
->r18
;
4762 env
->regs
[19] = regs
->r19
;
4763 env
->regs
[20] = regs
->r20
;
4764 env
->regs
[21] = regs
->r21
;
4765 env
->regs
[22] = regs
->r22
;
4766 env
->regs
[23] = regs
->r23
;
4767 env
->regs
[24] = regs
->r24
;
4768 env
->regs
[25] = regs
->r25
;
4769 env
->regs
[26] = regs
->r26
;
4770 env
->regs
[27] = regs
->r27
;
4771 env
->regs
[28] = regs
->r28
;
4772 env
->regs
[29] = regs
->r29
;
4773 env
->regs
[30] = regs
->r30
;
4774 env
->regs
[31] = regs
->r31
;
4775 env
->sregs
[SR_PC
] = regs
->pc
;
4777 #elif defined(TARGET_MIPS)
4781 for(i
= 0; i
< 32; i
++) {
4782 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4784 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4785 if (regs
->cp0_epc
& 1) {
4786 env
->hflags
|= MIPS_HFLAG_M16
;
4788 if (((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) !=
4789 ((env
->active_fpu
.fcr31
& (1 << FCR31_NAN2008
)) != 0)) {
4790 if ((env
->active_fpu
.fcr31_rw_bitmask
&
4791 (1 << FCR31_NAN2008
)) == 0) {
4792 fprintf(stderr
, "ELF binary's NaN mode not supported by CPU\n");
4795 if ((info
->elf_flags
& EF_MIPS_NAN2008
) != 0) {
4796 env
->active_fpu
.fcr31
|= (1 << FCR31_NAN2008
);
4798 env
->active_fpu
.fcr31
&= ~(1 << FCR31_NAN2008
);
4800 restore_snan_bit_mode(env
);
4803 #elif defined(TARGET_NIOS2)
4806 env
->regs
[1] = regs
->r1
;
4807 env
->regs
[2] = regs
->r2
;
4808 env
->regs
[3] = regs
->r3
;
4809 env
->regs
[4] = regs
->r4
;
4810 env
->regs
[5] = regs
->r5
;
4811 env
->regs
[6] = regs
->r6
;
4812 env
->regs
[7] = regs
->r7
;
4813 env
->regs
[8] = regs
->r8
;
4814 env
->regs
[9] = regs
->r9
;
4815 env
->regs
[10] = regs
->r10
;
4816 env
->regs
[11] = regs
->r11
;
4817 env
->regs
[12] = regs
->r12
;
4818 env
->regs
[13] = regs
->r13
;
4819 env
->regs
[14] = regs
->r14
;
4820 env
->regs
[15] = regs
->r15
;
4821 /* TODO: unsigned long orig_r2; */
4822 env
->regs
[R_RA
] = regs
->ra
;
4823 env
->regs
[R_FP
] = regs
->fp
;
4824 env
->regs
[R_SP
] = regs
->sp
;
4825 env
->regs
[R_GP
] = regs
->gp
;
4826 env
->regs
[CR_ESTATUS
] = regs
->estatus
;
4827 env
->regs
[R_EA
] = regs
->ea
;
4828 /* TODO: unsigned long orig_r7; */
4830 /* Emulate eret when starting thread. */
4831 env
->regs
[R_PC
] = regs
->ea
;
4833 #elif defined(TARGET_OPENRISC)
4837 for (i
= 0; i
< 32; i
++) {
4838 cpu_set_gpr(env
, i
, regs
->gpr
[i
]);
4841 cpu_set_sr(env
, regs
->sr
);
4843 #elif defined(TARGET_SH4)
4847 for(i
= 0; i
< 16; i
++) {
4848 env
->gregs
[i
] = regs
->regs
[i
];
4852 #elif defined(TARGET_ALPHA)
4856 for(i
= 0; i
< 28; i
++) {
4857 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4859 env
->ir
[IR_SP
] = regs
->usp
;
4862 #elif defined(TARGET_CRIS)
4864 env
->regs
[0] = regs
->r0
;
4865 env
->regs
[1] = regs
->r1
;
4866 env
->regs
[2] = regs
->r2
;
4867 env
->regs
[3] = regs
->r3
;
4868 env
->regs
[4] = regs
->r4
;
4869 env
->regs
[5] = regs
->r5
;
4870 env
->regs
[6] = regs
->r6
;
4871 env
->regs
[7] = regs
->r7
;
4872 env
->regs
[8] = regs
->r8
;
4873 env
->regs
[9] = regs
->r9
;
4874 env
->regs
[10] = regs
->r10
;
4875 env
->regs
[11] = regs
->r11
;
4876 env
->regs
[12] = regs
->r12
;
4877 env
->regs
[13] = regs
->r13
;
4878 env
->regs
[14] = info
->start_stack
;
4879 env
->regs
[15] = regs
->acr
;
4880 env
->pc
= regs
->erp
;
4882 #elif defined(TARGET_S390X)
4885 for (i
= 0; i
< 16; i
++) {
4886 env
->regs
[i
] = regs
->gprs
[i
];
4888 env
->psw
.mask
= regs
->psw
.mask
;
4889 env
->psw
.addr
= regs
->psw
.addr
;
4891 #elif defined(TARGET_TILEGX)
4894 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4895 env
->regs
[i
] = regs
->regs
[i
];
4897 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4902 #elif defined(TARGET_HPPA)
4905 for (i
= 1; i
< 32; i
++) {
4906 env
->gr
[i
] = regs
->gr
[i
];
4908 env
->iaoq_f
= regs
->iaoq
[0];
4909 env
->iaoq_b
= regs
->iaoq
[1];
4912 #error unsupported target CPU
4915 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4916 ts
->stack_base
= info
->start_stack
;
4917 ts
->heap_base
= info
->brk
;
4918 /* This will be filled in on the first SYS_HEAPINFO call. */
4923 if (gdbserver_start(gdbstub_port
) < 0) {
4924 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4928 gdb_handlesig(cpu
, 0);