4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu/path.h"
26 #include "qemu/cutils.h"
27 #include "qemu/help_option.h"
30 #include "qemu/timer.h"
31 #include "qemu/envlist.h"
38 static const char *filename
;
39 static const char *argv0
;
40 static int gdbstub_port
;
41 static envlist_t
*envlist
;
42 static const char *cpu_model
;
43 unsigned long mmap_min_addr
;
44 unsigned long guest_base
;
47 #define EXCP_DUMP(env, fmt, ...) \
49 CPUState *cs = ENV_GET_CPU(env); \
50 fprintf(stderr, fmt , ## __VA_ARGS__); \
51 cpu_dump_state(cs, stderr, fprintf, 0); \
52 if (qemu_log_separate()) { \
53 qemu_log(fmt, ## __VA_ARGS__); \
54 log_cpu_state(cs, 0); \
58 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
60 * When running 32-on-64 we should make sure we can fit all of the possible
61 * guest address space into a contiguous chunk of virtual host memory.
63 * This way we will never overlap with our own libraries or binaries or stack
64 * or anything else that QEMU maps.
67 /* MIPS only supports 31 bits of virtual address space for user space */
68 unsigned long reserved_va
= 0x77000000;
70 unsigned long reserved_va
= 0xf7000000;
73 unsigned long reserved_va
;
76 static void usage(int exitcode
);
78 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
79 const char *qemu_uname_release
;
81 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
82 we allocate a bigger stack. Need a better solution, for example
83 by remapping the process stack directly at the right place */
84 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
86 void gemu_log(const char *fmt
, ...)
91 vfprintf(stderr
, fmt
, ap
);
95 #if defined(TARGET_I386)
96 int cpu_get_pic_interrupt(CPUX86State
*env
)
102 /***********************************************************/
103 /* Helper routines for implementing atomic operations. */
105 /* To implement exclusive operations we force all cpus to syncronise.
106 We don't require a full sync, only that no cpus are executing guest code.
107 The alternative is to map target atomic ops onto host equivalents,
108 which requires quite a lot of per host/target work. */
109 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
110 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
111 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
112 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
113 static int pending_cpus
;
115 /* Make sure everything is in a consistent state for calling fork(). */
116 void fork_start(void)
118 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
119 pthread_mutex_lock(&exclusive_lock
);
123 void fork_end(int child
)
125 mmap_fork_end(child
);
127 CPUState
*cpu
, *next_cpu
;
128 /* Child processes created by fork() only have a single thread.
129 Discard information about the parent threads. */
130 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
131 if (cpu
!= thread_cpu
) {
132 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
136 pthread_mutex_init(&exclusive_lock
, NULL
);
137 pthread_mutex_init(&cpu_list_mutex
, NULL
);
138 pthread_cond_init(&exclusive_cond
, NULL
);
139 pthread_cond_init(&exclusive_resume
, NULL
);
140 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
141 gdbserver_fork(thread_cpu
);
143 pthread_mutex_unlock(&exclusive_lock
);
144 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
148 /* Wait for pending exclusive operations to complete. The exclusive lock
150 static inline void exclusive_idle(void)
152 while (pending_cpus
) {
153 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
157 /* Start an exclusive operation.
158 Must only be called from outside cpu_arm_exec. */
159 static inline void start_exclusive(void)
163 pthread_mutex_lock(&exclusive_lock
);
167 /* Make all other cpus stop executing. */
168 CPU_FOREACH(other_cpu
) {
169 if (other_cpu
->running
) {
174 if (pending_cpus
> 1) {
175 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
179 /* Finish an exclusive operation. */
180 static inline void __attribute__((unused
)) end_exclusive(void)
183 pthread_cond_broadcast(&exclusive_resume
);
184 pthread_mutex_unlock(&exclusive_lock
);
187 /* Wait for exclusive ops to finish, and begin cpu execution. */
188 static inline void cpu_exec_start(CPUState
*cpu
)
190 pthread_mutex_lock(&exclusive_lock
);
193 pthread_mutex_unlock(&exclusive_lock
);
196 /* Mark cpu as not executing, and release pending exclusive ops. */
197 static inline void cpu_exec_end(CPUState
*cpu
)
199 pthread_mutex_lock(&exclusive_lock
);
200 cpu
->running
= false;
201 if (pending_cpus
> 1) {
203 if (pending_cpus
== 1) {
204 pthread_cond_signal(&exclusive_cond
);
208 pthread_mutex_unlock(&exclusive_lock
);
211 void cpu_list_lock(void)
213 pthread_mutex_lock(&cpu_list_mutex
);
216 void cpu_list_unlock(void)
218 pthread_mutex_unlock(&cpu_list_mutex
);
223 /***********************************************************/
224 /* CPUX86 core interface */
226 uint64_t cpu_get_tsc(CPUX86State
*env
)
228 return cpu_get_host_ticks();
231 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
236 e1
= (addr
<< 16) | (limit
& 0xffff);
237 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
244 static uint64_t *idt_table
;
246 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
247 uint64_t addr
, unsigned int sel
)
250 e1
= (addr
& 0xffff) | (sel
<< 16);
251 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
255 p
[2] = tswap32(addr
>> 32);
258 /* only dpl matters as we do only user space emulation */
259 static void set_idt(int n
, unsigned int dpl
)
261 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
264 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
265 uint32_t addr
, unsigned int sel
)
268 e1
= (addr
& 0xffff) | (sel
<< 16);
269 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
275 /* only dpl matters as we do only user space emulation */
276 static void set_idt(int n
, unsigned int dpl
)
278 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
282 void cpu_loop(CPUX86State
*env
)
284 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
287 target_siginfo_t info
;
291 trapnr
= cpu_x86_exec(cs
);
295 /* linux syscall from int $0x80 */
296 env
->regs
[R_EAX
] = do_syscall(env
,
308 /* linux syscall from syscall instruction */
309 env
->regs
[R_EAX
] = do_syscall(env
,
322 info
.si_signo
= TARGET_SIGBUS
;
324 info
.si_code
= TARGET_SI_KERNEL
;
325 info
._sifields
._sigfault
._addr
= 0;
326 queue_signal(env
, info
.si_signo
, &info
);
329 /* XXX: potential problem if ABI32 */
330 #ifndef TARGET_X86_64
331 if (env
->eflags
& VM_MASK
) {
332 handle_vm86_fault(env
);
336 info
.si_signo
= TARGET_SIGSEGV
;
338 info
.si_code
= TARGET_SI_KERNEL
;
339 info
._sifields
._sigfault
._addr
= 0;
340 queue_signal(env
, info
.si_signo
, &info
);
344 info
.si_signo
= TARGET_SIGSEGV
;
346 if (!(env
->error_code
& 1))
347 info
.si_code
= TARGET_SEGV_MAPERR
;
349 info
.si_code
= TARGET_SEGV_ACCERR
;
350 info
._sifields
._sigfault
._addr
= env
->cr
[2];
351 queue_signal(env
, info
.si_signo
, &info
);
354 #ifndef TARGET_X86_64
355 if (env
->eflags
& VM_MASK
) {
356 handle_vm86_trap(env
, trapnr
);
360 /* division by zero */
361 info
.si_signo
= TARGET_SIGFPE
;
363 info
.si_code
= TARGET_FPE_INTDIV
;
364 info
._sifields
._sigfault
._addr
= env
->eip
;
365 queue_signal(env
, info
.si_signo
, &info
);
370 #ifndef TARGET_X86_64
371 if (env
->eflags
& VM_MASK
) {
372 handle_vm86_trap(env
, trapnr
);
376 info
.si_signo
= TARGET_SIGTRAP
;
378 if (trapnr
== EXCP01_DB
) {
379 info
.si_code
= TARGET_TRAP_BRKPT
;
380 info
._sifields
._sigfault
._addr
= env
->eip
;
382 info
.si_code
= TARGET_SI_KERNEL
;
383 info
._sifields
._sigfault
._addr
= 0;
385 queue_signal(env
, info
.si_signo
, &info
);
390 #ifndef TARGET_X86_64
391 if (env
->eflags
& VM_MASK
) {
392 handle_vm86_trap(env
, trapnr
);
396 info
.si_signo
= TARGET_SIGSEGV
;
398 info
.si_code
= TARGET_SI_KERNEL
;
399 info
._sifields
._sigfault
._addr
= 0;
400 queue_signal(env
, info
.si_signo
, &info
);
404 info
.si_signo
= TARGET_SIGILL
;
406 info
.si_code
= TARGET_ILL_ILLOPN
;
407 info
._sifields
._sigfault
._addr
= env
->eip
;
408 queue_signal(env
, info
.si_signo
, &info
);
411 /* just indicate that signals should be handled asap */
417 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
422 info
.si_code
= TARGET_TRAP_BRKPT
;
423 queue_signal(env
, info
.si_signo
, &info
);
428 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
429 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
433 process_pending_signals(env
);
440 #define get_user_code_u32(x, gaddr, env) \
441 ({ abi_long __r = get_user_u32((x), (gaddr)); \
442 if (!__r && bswap_code(arm_sctlr_b(env))) { \
448 #define get_user_code_u16(x, gaddr, env) \
449 ({ abi_long __r = get_user_u16((x), (gaddr)); \
450 if (!__r && bswap_code(arm_sctlr_b(env))) { \
456 #define get_user_data_u32(x, gaddr, env) \
457 ({ abi_long __r = get_user_u32((x), (gaddr)); \
458 if (!__r && arm_cpu_bswap_data(env)) { \
464 #define get_user_data_u16(x, gaddr, env) \
465 ({ abi_long __r = get_user_u16((x), (gaddr)); \
466 if (!__r && arm_cpu_bswap_data(env)) { \
472 #define put_user_data_u32(x, gaddr, env) \
473 ({ typeof(x) __x = (x); \
474 if (arm_cpu_bswap_data(env)) { \
475 __x = bswap32(__x); \
477 put_user_u32(__x, (gaddr)); \
480 #define put_user_data_u16(x, gaddr, env) \
481 ({ typeof(x) __x = (x); \
482 if (arm_cpu_bswap_data(env)) { \
483 __x = bswap16(__x); \
485 put_user_u16(__x, (gaddr)); \
489 /* Commpage handling -- there is no commpage for AArch64 */
492 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
494 * r0 = pointer to oldval
495 * r1 = pointer to newval
496 * r2 = pointer to target value
499 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
500 * C set if *ptr was changed, clear if no exchange happened
502 * Note segv's in kernel helpers are a bit tricky, we can set the
503 * data address sensibly but the PC address is just the entry point.
505 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
507 uint64_t oldval
, newval
, val
;
509 target_siginfo_t info
;
511 /* Based on the 32 bit code in do_kernel_trap */
513 /* XXX: This only works between threads, not between processes.
514 It's probably possible to implement this with native host
515 operations. However things like ldrex/strex are much harder so
516 there's not much point trying. */
518 cpsr
= cpsr_read(env
);
521 if (get_user_u64(oldval
, env
->regs
[0])) {
522 env
->exception
.vaddress
= env
->regs
[0];
526 if (get_user_u64(newval
, env
->regs
[1])) {
527 env
->exception
.vaddress
= env
->regs
[1];
531 if (get_user_u64(val
, addr
)) {
532 env
->exception
.vaddress
= addr
;
539 if (put_user_u64(val
, addr
)) {
540 env
->exception
.vaddress
= addr
;
550 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
556 /* We get the PC of the entry address - which is as good as anything,
557 on a real kernel what you get depends on which mode it uses. */
558 info
.si_signo
= TARGET_SIGSEGV
;
560 /* XXX: check env->error_code */
561 info
.si_code
= TARGET_SEGV_MAPERR
;
562 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
563 queue_signal(env
, info
.si_signo
, &info
);
566 /* Handle a jump to the kernel code page. */
568 do_kernel_trap(CPUARMState
*env
)
574 switch (env
->regs
[15]) {
575 case 0xffff0fa0: /* __kernel_memory_barrier */
576 /* ??? No-op. Will need to do better for SMP. */
578 case 0xffff0fc0: /* __kernel_cmpxchg */
579 /* XXX: This only works between threads, not between processes.
580 It's probably possible to implement this with native host
581 operations. However things like ldrex/strex are much harder so
582 there's not much point trying. */
584 cpsr
= cpsr_read(env
);
586 /* FIXME: This should SEGV if the access fails. */
587 if (get_user_u32(val
, addr
))
589 if (val
== env
->regs
[0]) {
591 /* FIXME: Check for segfaults. */
592 put_user_u32(val
, addr
);
599 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
602 case 0xffff0fe0: /* __kernel_get_tls */
603 env
->regs
[0] = cpu_get_tls(env
);
605 case 0xffff0f60: /* __kernel_cmpxchg64 */
606 arm_kernel_cmpxchg64_helper(env
);
612 /* Jump back to the caller. */
613 addr
= env
->regs
[14];
618 env
->regs
[15] = addr
;
623 /* Store exclusive handling for AArch32 */
624 static int do_strex(CPUARMState
*env
)
632 if (env
->exclusive_addr
!= env
->exclusive_test
) {
635 /* We know we're always AArch32 so the address is in uint32_t range
636 * unless it was the -1 exclusive-monitor-lost value (which won't
637 * match exclusive_test above).
639 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
640 addr
= env
->exclusive_addr
;
641 size
= env
->exclusive_info
& 0xf;
644 segv
= get_user_u8(val
, addr
);
647 segv
= get_user_data_u16(val
, addr
, env
);
651 segv
= get_user_data_u32(val
, addr
, env
);
657 env
->exception
.vaddress
= addr
;
662 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
664 env
->exception
.vaddress
= addr
+ 4;
667 if (arm_cpu_bswap_data(env
)) {
668 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
670 val
= deposit64(val
, 32, 32, valhi
);
673 if (val
!= env
->exclusive_val
) {
677 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
680 segv
= put_user_u8(val
, addr
);
683 segv
= put_user_data_u16(val
, addr
, env
);
687 segv
= put_user_data_u32(val
, addr
, env
);
691 env
->exception
.vaddress
= addr
;
695 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
696 segv
= put_user_data_u32(val
, addr
+ 4, env
);
698 env
->exception
.vaddress
= addr
+ 4;
705 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
711 void cpu_loop(CPUARMState
*env
)
713 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
715 unsigned int n
, insn
;
716 target_siginfo_t info
;
721 trapnr
= cpu_arm_exec(cs
);
726 TaskState
*ts
= cs
->opaque
;
730 /* we handle the FPU emulation here, as Linux */
731 /* we get the opcode */
732 /* FIXME - what to do if get_user() fails? */
733 get_user_code_u32(opcode
, env
->regs
[15], env
);
735 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
736 if (rc
== 0) { /* illegal instruction */
737 info
.si_signo
= TARGET_SIGILL
;
739 info
.si_code
= TARGET_ILL_ILLOPN
;
740 info
._sifields
._sigfault
._addr
= env
->regs
[15];
741 queue_signal(env
, info
.si_signo
, &info
);
742 } else if (rc
< 0) { /* FP exception */
745 /* translate softfloat flags to FPSR flags */
746 if (-rc
& float_flag_invalid
)
748 if (-rc
& float_flag_divbyzero
)
750 if (-rc
& float_flag_overflow
)
752 if (-rc
& float_flag_underflow
)
754 if (-rc
& float_flag_inexact
)
757 FPSR fpsr
= ts
->fpa
.fpsr
;
758 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
760 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
761 info
.si_signo
= TARGET_SIGFPE
;
764 /* ordered by priority, least first */
765 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
766 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
767 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
768 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
769 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
771 info
._sifields
._sigfault
._addr
= env
->regs
[15];
772 queue_signal(env
, info
.si_signo
, &info
);
777 /* accumulate unenabled exceptions */
778 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
780 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
782 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
784 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
786 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
789 } else { /* everything OK */
800 if (trapnr
== EXCP_BKPT
) {
802 /* FIXME - what to do if get_user() fails? */
803 get_user_code_u16(insn
, env
->regs
[15], env
);
807 /* FIXME - what to do if get_user() fails? */
808 get_user_code_u32(insn
, env
->regs
[15], env
);
809 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
814 /* FIXME - what to do if get_user() fails? */
815 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
818 /* FIXME - what to do if get_user() fails? */
819 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
824 if (n
== ARM_NR_cacheflush
) {
826 } else if (n
== ARM_NR_semihosting
827 || n
== ARM_NR_thumb_semihosting
) {
828 env
->regs
[0] = do_arm_semihosting (env
);
829 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
831 if (env
->thumb
|| n
== 0) {
834 n
-= ARM_SYSCALL_BASE
;
837 if ( n
> ARM_NR_BASE
) {
839 case ARM_NR_cacheflush
:
843 cpu_set_tls(env
, env
->regs
[0]);
846 case ARM_NR_breakpoint
:
847 env
->regs
[15] -= env
->thumb
? 2 : 4;
850 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
852 env
->regs
[0] = -TARGET_ENOSYS
;
856 env
->regs
[0] = do_syscall(env
,
872 /* just indicate that signals should be handled asap */
875 if (!do_strex(env
)) {
878 /* fall through for segv */
879 case EXCP_PREFETCH_ABORT
:
880 case EXCP_DATA_ABORT
:
881 addr
= env
->exception
.vaddress
;
883 info
.si_signo
= TARGET_SIGSEGV
;
885 /* XXX: check env->error_code */
886 info
.si_code
= TARGET_SEGV_MAPERR
;
887 info
._sifields
._sigfault
._addr
= addr
;
888 queue_signal(env
, info
.si_signo
, &info
);
896 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
901 info
.si_code
= TARGET_TRAP_BRKPT
;
902 queue_signal(env
, info
.si_signo
, &info
);
906 case EXCP_KERNEL_TRAP
:
907 if (do_kernel_trap(env
))
912 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
915 process_pending_signals(env
);
922 * Handle AArch64 store-release exclusive
924 * rs = gets the status result of store exclusive
925 * rt = is the register that is stored
926 * rt2 = is the second register store (in STP)
929 static int do_strex_a64(CPUARMState
*env
)
940 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
941 size
= extract32(env
->exclusive_info
, 0, 2);
942 is_pair
= extract32(env
->exclusive_info
, 2, 1);
943 rs
= extract32(env
->exclusive_info
, 4, 5);
944 rt
= extract32(env
->exclusive_info
, 9, 5);
945 rt2
= extract32(env
->exclusive_info
, 14, 5);
947 addr
= env
->exclusive_addr
;
949 if (addr
!= env
->exclusive_test
) {
955 segv
= get_user_u8(val
, addr
);
958 segv
= get_user_u16(val
, addr
);
961 segv
= get_user_u32(val
, addr
);
964 segv
= get_user_u64(val
, addr
);
970 env
->exception
.vaddress
= addr
;
973 if (val
!= env
->exclusive_val
) {
978 segv
= get_user_u32(val
, addr
+ 4);
980 segv
= get_user_u64(val
, addr
+ 8);
983 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
986 if (val
!= env
->exclusive_high
) {
990 /* handle the zero register */
991 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
994 segv
= put_user_u8(val
, addr
);
997 segv
= put_user_u16(val
, addr
);
1000 segv
= put_user_u32(val
, addr
);
1003 segv
= put_user_u64(val
, addr
);
1010 /* handle the zero register */
1011 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1013 segv
= put_user_u32(val
, addr
+ 4);
1015 segv
= put_user_u64(val
, addr
+ 8);
1018 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1025 /* rs == 31 encodes a write to the ZR, thus throwing away
1026 * the status return. This is rather silly but valid.
1029 env
->xregs
[rs
] = rc
;
1032 /* instruction faulted, PC does not advance */
1033 /* either way a strex releases any exclusive lock we have */
1034 env
->exclusive_addr
= -1;
1039 /* AArch64 main loop */
1040 void cpu_loop(CPUARMState
*env
)
1042 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1044 target_siginfo_t info
;
1048 trapnr
= cpu_arm_exec(cs
);
1053 env
->xregs
[0] = do_syscall(env
,
1063 case EXCP_INTERRUPT
:
1064 /* just indicate that signals should be handled asap */
1067 info
.si_signo
= TARGET_SIGILL
;
1069 info
.si_code
= TARGET_ILL_ILLOPN
;
1070 info
._sifields
._sigfault
._addr
= env
->pc
;
1071 queue_signal(env
, info
.si_signo
, &info
);
1074 if (!do_strex_a64(env
)) {
1077 /* fall through for segv */
1078 case EXCP_PREFETCH_ABORT
:
1079 case EXCP_DATA_ABORT
:
1080 info
.si_signo
= TARGET_SIGSEGV
;
1082 /* XXX: check env->error_code */
1083 info
.si_code
= TARGET_SEGV_MAPERR
;
1084 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1085 queue_signal(env
, info
.si_signo
, &info
);
1089 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1091 info
.si_signo
= sig
;
1093 info
.si_code
= TARGET_TRAP_BRKPT
;
1094 queue_signal(env
, info
.si_signo
, &info
);
1098 env
->xregs
[0] = do_arm_semihosting(env
);
1101 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1104 process_pending_signals(env
);
1105 /* Exception return on AArch64 always clears the exclusive monitor,
1106 * so any return to running guest code implies this.
1107 * A strex (successful or otherwise) also clears the monitor, so
1108 * we don't need to specialcase EXCP_STREX.
1110 env
->exclusive_addr
= -1;
1113 #endif /* ndef TARGET_ABI32 */
1117 #ifdef TARGET_UNICORE32
1119 void cpu_loop(CPUUniCore32State
*env
)
1121 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1123 unsigned int n
, insn
;
1124 target_siginfo_t info
;
1128 trapnr
= uc32_cpu_exec(cs
);
1131 case UC32_EXCP_PRIV
:
1134 get_user_u32(insn
, env
->regs
[31] - 4);
1135 n
= insn
& 0xffffff;
1137 if (n
>= UC32_SYSCALL_BASE
) {
1139 n
-= UC32_SYSCALL_BASE
;
1140 if (n
== UC32_SYSCALL_NR_set_tls
) {
1141 cpu_set_tls(env
, env
->regs
[0]);
1144 env
->regs
[0] = do_syscall(env
,
1159 case UC32_EXCP_DTRAP
:
1160 case UC32_EXCP_ITRAP
:
1161 info
.si_signo
= TARGET_SIGSEGV
;
1163 /* XXX: check env->error_code */
1164 info
.si_code
= TARGET_SEGV_MAPERR
;
1165 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1166 queue_signal(env
, info
.si_signo
, &info
);
1168 case EXCP_INTERRUPT
:
1169 /* just indicate that signals should be handled asap */
1175 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1177 info
.si_signo
= sig
;
1179 info
.si_code
= TARGET_TRAP_BRKPT
;
1180 queue_signal(env
, info
.si_signo
, &info
);
1187 process_pending_signals(env
);
1191 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1197 #define SPARC64_STACK_BIAS 2047
1201 /* WARNING: dealing with register windows _is_ complicated. More info
1202 can be found at http://www.sics.se/~psm/sparcstack.html */
1203 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1205 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1206 /* wrap handling : if cwp is on the last window, then we use the
1207 registers 'after' the end */
1208 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1209 index
+= 16 * env
->nwindows
;
1213 /* save the register window 'cwp1' */
1214 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1219 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1220 #ifdef TARGET_SPARC64
1222 sp_ptr
+= SPARC64_STACK_BIAS
;
1224 #if defined(DEBUG_WIN)
1225 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1228 for(i
= 0; i
< 16; i
++) {
1229 /* FIXME - what to do if put_user() fails? */
1230 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1231 sp_ptr
+= sizeof(abi_ulong
);
1235 static void save_window(CPUSPARCState
*env
)
1237 #ifndef TARGET_SPARC64
1238 unsigned int new_wim
;
1239 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1240 ((1LL << env
->nwindows
) - 1);
1241 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1244 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1250 static void restore_window(CPUSPARCState
*env
)
1252 #ifndef TARGET_SPARC64
1253 unsigned int new_wim
;
1255 unsigned int i
, cwp1
;
1258 #ifndef TARGET_SPARC64
1259 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1260 ((1LL << env
->nwindows
) - 1);
1263 /* restore the invalid window */
1264 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1265 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1266 #ifdef TARGET_SPARC64
1268 sp_ptr
+= SPARC64_STACK_BIAS
;
1270 #if defined(DEBUG_WIN)
1271 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1274 for(i
= 0; i
< 16; i
++) {
1275 /* FIXME - what to do if get_user() fails? */
1276 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1277 sp_ptr
+= sizeof(abi_ulong
);
1279 #ifdef TARGET_SPARC64
1281 if (env
->cleanwin
< env
->nwindows
- 1)
1289 static void flush_windows(CPUSPARCState
*env
)
1295 /* if restore would invoke restore_window(), then we can stop */
1296 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1297 #ifndef TARGET_SPARC64
1298 if (env
->wim
& (1 << cwp1
))
1301 if (env
->canrestore
== 0)
1306 save_window_offset(env
, cwp1
);
1309 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1310 #ifndef TARGET_SPARC64
1311 /* set wim so that restore will reload the registers */
1312 env
->wim
= 1 << cwp1
;
1314 #if defined(DEBUG_WIN)
1315 printf("flush_windows: nb=%d\n", offset
- 1);
1319 void cpu_loop (CPUSPARCState
*env
)
1321 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1324 target_siginfo_t info
;
1328 trapnr
= cpu_sparc_exec(cs
);
1331 /* Compute PSR before exposing state. */
1332 if (env
->cc_op
!= CC_OP_FLAGS
) {
1337 #ifndef TARGET_SPARC64
1344 ret
= do_syscall (env
, env
->gregs
[1],
1345 env
->regwptr
[0], env
->regwptr
[1],
1346 env
->regwptr
[2], env
->regwptr
[3],
1347 env
->regwptr
[4], env
->regwptr
[5],
1349 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1350 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1351 env
->xcc
|= PSR_CARRY
;
1353 env
->psr
|= PSR_CARRY
;
1357 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1358 env
->xcc
&= ~PSR_CARRY
;
1360 env
->psr
&= ~PSR_CARRY
;
1363 env
->regwptr
[0] = ret
;
1364 /* next instruction */
1366 env
->npc
= env
->npc
+ 4;
1368 case 0x83: /* flush windows */
1373 /* next instruction */
1375 env
->npc
= env
->npc
+ 4;
1377 #ifndef TARGET_SPARC64
1378 case TT_WIN_OVF
: /* window overflow */
1381 case TT_WIN_UNF
: /* window underflow */
1382 restore_window(env
);
1387 info
.si_signo
= TARGET_SIGSEGV
;
1389 /* XXX: check env->error_code */
1390 info
.si_code
= TARGET_SEGV_MAPERR
;
1391 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1392 queue_signal(env
, info
.si_signo
, &info
);
1396 case TT_SPILL
: /* window overflow */
1399 case TT_FILL
: /* window underflow */
1400 restore_window(env
);
1405 info
.si_signo
= TARGET_SIGSEGV
;
1407 /* XXX: check env->error_code */
1408 info
.si_code
= TARGET_SEGV_MAPERR
;
1409 if (trapnr
== TT_DFAULT
)
1410 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1412 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1413 queue_signal(env
, info
.si_signo
, &info
);
1416 #ifndef TARGET_ABI32
1419 sparc64_get_context(env
);
1423 sparc64_set_context(env
);
1427 case EXCP_INTERRUPT
:
1428 /* just indicate that signals should be handled asap */
1432 info
.si_signo
= TARGET_SIGILL
;
1434 info
.si_code
= TARGET_ILL_ILLOPC
;
1435 info
._sifields
._sigfault
._addr
= env
->pc
;
1436 queue_signal(env
, info
.si_signo
, &info
);
1443 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1446 info
.si_signo
= sig
;
1448 info
.si_code
= TARGET_TRAP_BRKPT
;
1449 queue_signal(env
, info
.si_signo
, &info
);
1454 printf ("Unhandled trap: 0x%x\n", trapnr
);
1455 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1458 process_pending_signals (env
);
1465 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1467 return cpu_get_host_ticks();
1470 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1472 return cpu_ppc_get_tb(env
);
1475 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1477 return cpu_ppc_get_tb(env
) >> 32;
1480 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1482 return cpu_ppc_get_tb(env
);
1485 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1487 return cpu_ppc_get_tb(env
) >> 32;
1490 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1491 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1493 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1495 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1498 /* XXX: to be fixed */
1499 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1504 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1509 static int do_store_exclusive(CPUPPCState
*env
)
1512 target_ulong page_addr
;
1513 target_ulong val
, val2
__attribute__((unused
)) = 0;
1517 addr
= env
->reserve_ea
;
1518 page_addr
= addr
& TARGET_PAGE_MASK
;
1521 flags
= page_get_flags(page_addr
);
1522 if ((flags
& PAGE_READ
) == 0) {
1525 int reg
= env
->reserve_info
& 0x1f;
1526 int size
= env
->reserve_info
>> 5;
1529 if (addr
== env
->reserve_addr
) {
1531 case 1: segv
= get_user_u8(val
, addr
); break;
1532 case 2: segv
= get_user_u16(val
, addr
); break;
1533 case 4: segv
= get_user_u32(val
, addr
); break;
1534 #if defined(TARGET_PPC64)
1535 case 8: segv
= get_user_u64(val
, addr
); break;
1537 segv
= get_user_u64(val
, addr
);
1539 segv
= get_user_u64(val2
, addr
+ 8);
1546 if (!segv
&& val
== env
->reserve_val
) {
1547 val
= env
->gpr
[reg
];
1549 case 1: segv
= put_user_u8(val
, addr
); break;
1550 case 2: segv
= put_user_u16(val
, addr
); break;
1551 case 4: segv
= put_user_u32(val
, addr
); break;
1552 #if defined(TARGET_PPC64)
1553 case 8: segv
= put_user_u64(val
, addr
); break;
1555 if (val2
== env
->reserve_val2
) {
1558 val
= env
->gpr
[reg
+1];
1560 val2
= env
->gpr
[reg
+1];
1562 segv
= put_user_u64(val
, addr
);
1564 segv
= put_user_u64(val2
, addr
+ 8);
1577 env
->crf
[0] = (stored
<< 1) | xer_so
;
1578 env
->reserve_addr
= (target_ulong
)-1;
1588 void cpu_loop(CPUPPCState
*env
)
1590 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1591 target_siginfo_t info
;
1597 trapnr
= cpu_ppc_exec(cs
);
1600 case POWERPC_EXCP_NONE
:
1603 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1604 cpu_abort(cs
, "Critical interrupt while in user mode. "
1607 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1608 cpu_abort(cs
, "Machine check exception while in user mode. "
1611 case POWERPC_EXCP_DSI
: /* Data storage exception */
1612 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1614 /* XXX: check this. Seems bugged */
1615 switch (env
->error_code
& 0xFF000000) {
1617 info
.si_signo
= TARGET_SIGSEGV
;
1619 info
.si_code
= TARGET_SEGV_MAPERR
;
1622 info
.si_signo
= TARGET_SIGILL
;
1624 info
.si_code
= TARGET_ILL_ILLADR
;
1627 info
.si_signo
= TARGET_SIGSEGV
;
1629 info
.si_code
= TARGET_SEGV_ACCERR
;
1632 /* Let's send a regular segfault... */
1633 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1635 info
.si_signo
= TARGET_SIGSEGV
;
1637 info
.si_code
= TARGET_SEGV_MAPERR
;
1640 info
._sifields
._sigfault
._addr
= env
->nip
;
1641 queue_signal(env
, info
.si_signo
, &info
);
1643 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1644 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1645 "\n", env
->spr
[SPR_SRR0
]);
1646 /* XXX: check this */
1647 switch (env
->error_code
& 0xFF000000) {
1649 info
.si_signo
= TARGET_SIGSEGV
;
1651 info
.si_code
= TARGET_SEGV_MAPERR
;
1655 info
.si_signo
= TARGET_SIGSEGV
;
1657 info
.si_code
= TARGET_SEGV_ACCERR
;
1660 /* Let's send a regular segfault... */
1661 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1663 info
.si_signo
= TARGET_SIGSEGV
;
1665 info
.si_code
= TARGET_SEGV_MAPERR
;
1668 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1669 queue_signal(env
, info
.si_signo
, &info
);
1671 case POWERPC_EXCP_EXTERNAL
: /* External input */
1672 cpu_abort(cs
, "External interrupt while in user mode. "
1675 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1676 EXCP_DUMP(env
, "Unaligned memory access\n");
1677 /* XXX: check this */
1678 info
.si_signo
= TARGET_SIGBUS
;
1680 info
.si_code
= TARGET_BUS_ADRALN
;
1681 info
._sifields
._sigfault
._addr
= env
->nip
;
1682 queue_signal(env
, info
.si_signo
, &info
);
1684 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1685 /* XXX: check this */
1686 switch (env
->error_code
& ~0xF) {
1687 case POWERPC_EXCP_FP
:
1688 EXCP_DUMP(env
, "Floating point program exception\n");
1689 info
.si_signo
= TARGET_SIGFPE
;
1691 switch (env
->error_code
& 0xF) {
1692 case POWERPC_EXCP_FP_OX
:
1693 info
.si_code
= TARGET_FPE_FLTOVF
;
1695 case POWERPC_EXCP_FP_UX
:
1696 info
.si_code
= TARGET_FPE_FLTUND
;
1698 case POWERPC_EXCP_FP_ZX
:
1699 case POWERPC_EXCP_FP_VXZDZ
:
1700 info
.si_code
= TARGET_FPE_FLTDIV
;
1702 case POWERPC_EXCP_FP_XX
:
1703 info
.si_code
= TARGET_FPE_FLTRES
;
1705 case POWERPC_EXCP_FP_VXSOFT
:
1706 info
.si_code
= TARGET_FPE_FLTINV
;
1708 case POWERPC_EXCP_FP_VXSNAN
:
1709 case POWERPC_EXCP_FP_VXISI
:
1710 case POWERPC_EXCP_FP_VXIDI
:
1711 case POWERPC_EXCP_FP_VXIMZ
:
1712 case POWERPC_EXCP_FP_VXVC
:
1713 case POWERPC_EXCP_FP_VXSQRT
:
1714 case POWERPC_EXCP_FP_VXCVI
:
1715 info
.si_code
= TARGET_FPE_FLTSUB
;
1718 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1723 case POWERPC_EXCP_INVAL
:
1724 EXCP_DUMP(env
, "Invalid instruction\n");
1725 info
.si_signo
= TARGET_SIGILL
;
1727 switch (env
->error_code
& 0xF) {
1728 case POWERPC_EXCP_INVAL_INVAL
:
1729 info
.si_code
= TARGET_ILL_ILLOPC
;
1731 case POWERPC_EXCP_INVAL_LSWX
:
1732 info
.si_code
= TARGET_ILL_ILLOPN
;
1734 case POWERPC_EXCP_INVAL_SPR
:
1735 info
.si_code
= TARGET_ILL_PRVREG
;
1737 case POWERPC_EXCP_INVAL_FP
:
1738 info
.si_code
= TARGET_ILL_COPROC
;
1741 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1742 env
->error_code
& 0xF);
1743 info
.si_code
= TARGET_ILL_ILLADR
;
1747 case POWERPC_EXCP_PRIV
:
1748 EXCP_DUMP(env
, "Privilege violation\n");
1749 info
.si_signo
= TARGET_SIGILL
;
1751 switch (env
->error_code
& 0xF) {
1752 case POWERPC_EXCP_PRIV_OPC
:
1753 info
.si_code
= TARGET_ILL_PRVOPC
;
1755 case POWERPC_EXCP_PRIV_REG
:
1756 info
.si_code
= TARGET_ILL_PRVREG
;
1759 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1760 env
->error_code
& 0xF);
1761 info
.si_code
= TARGET_ILL_PRVOPC
;
1765 case POWERPC_EXCP_TRAP
:
1766 cpu_abort(cs
, "Tried to call a TRAP\n");
1769 /* Should not happen ! */
1770 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1774 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1775 queue_signal(env
, info
.si_signo
, &info
);
1777 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1778 EXCP_DUMP(env
, "No floating point allowed\n");
1779 info
.si_signo
= TARGET_SIGILL
;
1781 info
.si_code
= TARGET_ILL_COPROC
;
1782 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1783 queue_signal(env
, info
.si_signo
, &info
);
1785 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1786 cpu_abort(cs
, "Syscall exception while in user mode. "
1789 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1790 EXCP_DUMP(env
, "No APU instruction allowed\n");
1791 info
.si_signo
= TARGET_SIGILL
;
1793 info
.si_code
= TARGET_ILL_COPROC
;
1794 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1795 queue_signal(env
, info
.si_signo
, &info
);
1797 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1798 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1801 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1802 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1805 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1806 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1809 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1810 cpu_abort(cs
, "Data TLB exception while in user mode. "
1813 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1814 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1817 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1818 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1819 info
.si_signo
= TARGET_SIGILL
;
1821 info
.si_code
= TARGET_ILL_COPROC
;
1822 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1823 queue_signal(env
, info
.si_signo
, &info
);
1825 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1826 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1828 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1829 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1831 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1832 cpu_abort(cs
, "Performance monitor exception not handled\n");
1834 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1835 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1838 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1839 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1842 case POWERPC_EXCP_RESET
: /* System reset exception */
1843 cpu_abort(cs
, "Reset interrupt while in user mode. "
1846 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1847 cpu_abort(cs
, "Data segment exception while in user mode. "
1850 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1851 cpu_abort(cs
, "Instruction segment exception "
1852 "while in user mode. Aborting\n");
1854 /* PowerPC 64 with hypervisor mode support */
1855 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1856 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1857 "while in user mode. Aborting\n");
1859 case POWERPC_EXCP_TRACE
: /* Trace exception */
1861 * we use this exception to emulate step-by-step execution mode.
1864 /* PowerPC 64 with hypervisor mode support */
1865 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1866 cpu_abort(cs
, "Hypervisor data storage exception "
1867 "while in user mode. Aborting\n");
1869 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1870 cpu_abort(cs
, "Hypervisor instruction storage exception "
1871 "while in user mode. Aborting\n");
1873 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1874 cpu_abort(cs
, "Hypervisor data segment exception "
1875 "while in user mode. Aborting\n");
1877 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1878 cpu_abort(cs
, "Hypervisor instruction segment exception "
1879 "while in user mode. Aborting\n");
1881 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1882 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1883 info
.si_signo
= TARGET_SIGILL
;
1885 info
.si_code
= TARGET_ILL_COPROC
;
1886 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1887 queue_signal(env
, info
.si_signo
, &info
);
1889 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1890 cpu_abort(cs
, "Programmable interval timer interrupt "
1891 "while in user mode. Aborting\n");
1893 case POWERPC_EXCP_IO
: /* IO error exception */
1894 cpu_abort(cs
, "IO error exception while in user mode. "
1897 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1898 cpu_abort(cs
, "Run mode exception while in user mode. "
1901 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1902 cpu_abort(cs
, "Emulation trap exception not handled\n");
1904 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1905 cpu_abort(cs
, "Instruction fetch TLB exception "
1906 "while in user-mode. Aborting");
1908 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1909 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1912 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1913 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1916 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1917 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1919 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1920 cpu_abort(cs
, "Instruction address breakpoint exception "
1923 case POWERPC_EXCP_SMI
: /* System management interrupt */
1924 cpu_abort(cs
, "System management interrupt while in user mode. "
1927 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1928 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1931 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1932 cpu_abort(cs
, "Performance monitor exception not handled\n");
1934 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1935 cpu_abort(cs
, "Vector assist exception not handled\n");
1937 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1938 cpu_abort(cs
, "Soft patch exception not handled\n");
1940 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1941 cpu_abort(cs
, "Maintenance exception while in user mode. "
1944 case POWERPC_EXCP_STOP
: /* stop translation */
1945 /* We did invalidate the instruction cache. Go on */
1947 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1948 /* We just stopped because of a branch. Go on */
1950 case POWERPC_EXCP_SYSCALL_USER
:
1951 /* system call in user-mode emulation */
1953 * PPC ABI uses overflow flag in cr0 to signal an error
1956 env
->crf
[0] &= ~0x1;
1957 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1958 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1960 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1961 /* Returning from a successful sigreturn syscall.
1962 Avoid corrupting register state. */
1965 if (ret
> (target_ulong
)(-515)) {
1971 case POWERPC_EXCP_STCX
:
1972 if (do_store_exclusive(env
)) {
1973 info
.si_signo
= TARGET_SIGSEGV
;
1975 info
.si_code
= TARGET_SEGV_MAPERR
;
1976 info
._sifields
._sigfault
._addr
= env
->nip
;
1977 queue_signal(env
, info
.si_signo
, &info
);
1984 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1986 info
.si_signo
= sig
;
1988 info
.si_code
= TARGET_TRAP_BRKPT
;
1989 queue_signal(env
, info
.si_signo
, &info
);
1993 case EXCP_INTERRUPT
:
1994 /* just indicate that signals should be handled asap */
1997 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2000 process_pending_signals(env
);
2007 # ifdef TARGET_ABI_MIPSO32
2008 # define MIPS_SYS(name, args) args,
2009 static const uint8_t mips_syscall_args
[] = {
2010 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2011 MIPS_SYS(sys_exit
, 1)
2012 MIPS_SYS(sys_fork
, 0)
2013 MIPS_SYS(sys_read
, 3)
2014 MIPS_SYS(sys_write
, 3)
2015 MIPS_SYS(sys_open
, 3) /* 4005 */
2016 MIPS_SYS(sys_close
, 1)
2017 MIPS_SYS(sys_waitpid
, 3)
2018 MIPS_SYS(sys_creat
, 2)
2019 MIPS_SYS(sys_link
, 2)
2020 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2021 MIPS_SYS(sys_execve
, 0)
2022 MIPS_SYS(sys_chdir
, 1)
2023 MIPS_SYS(sys_time
, 1)
2024 MIPS_SYS(sys_mknod
, 3)
2025 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2026 MIPS_SYS(sys_lchown
, 3)
2027 MIPS_SYS(sys_ni_syscall
, 0)
2028 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2029 MIPS_SYS(sys_lseek
, 3)
2030 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2031 MIPS_SYS(sys_mount
, 5)
2032 MIPS_SYS(sys_umount
, 1)
2033 MIPS_SYS(sys_setuid
, 1)
2034 MIPS_SYS(sys_getuid
, 0)
2035 MIPS_SYS(sys_stime
, 1) /* 4025 */
2036 MIPS_SYS(sys_ptrace
, 4)
2037 MIPS_SYS(sys_alarm
, 1)
2038 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2039 MIPS_SYS(sys_pause
, 0)
2040 MIPS_SYS(sys_utime
, 2) /* 4030 */
2041 MIPS_SYS(sys_ni_syscall
, 0)
2042 MIPS_SYS(sys_ni_syscall
, 0)
2043 MIPS_SYS(sys_access
, 2)
2044 MIPS_SYS(sys_nice
, 1)
2045 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2046 MIPS_SYS(sys_sync
, 0)
2047 MIPS_SYS(sys_kill
, 2)
2048 MIPS_SYS(sys_rename
, 2)
2049 MIPS_SYS(sys_mkdir
, 2)
2050 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2051 MIPS_SYS(sys_dup
, 1)
2052 MIPS_SYS(sys_pipe
, 0)
2053 MIPS_SYS(sys_times
, 1)
2054 MIPS_SYS(sys_ni_syscall
, 0)
2055 MIPS_SYS(sys_brk
, 1) /* 4045 */
2056 MIPS_SYS(sys_setgid
, 1)
2057 MIPS_SYS(sys_getgid
, 0)
2058 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2059 MIPS_SYS(sys_geteuid
, 0)
2060 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2061 MIPS_SYS(sys_acct
, 0)
2062 MIPS_SYS(sys_umount2
, 2)
2063 MIPS_SYS(sys_ni_syscall
, 0)
2064 MIPS_SYS(sys_ioctl
, 3)
2065 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2066 MIPS_SYS(sys_ni_syscall
, 2)
2067 MIPS_SYS(sys_setpgid
, 2)
2068 MIPS_SYS(sys_ni_syscall
, 0)
2069 MIPS_SYS(sys_olduname
, 1)
2070 MIPS_SYS(sys_umask
, 1) /* 4060 */
2071 MIPS_SYS(sys_chroot
, 1)
2072 MIPS_SYS(sys_ustat
, 2)
2073 MIPS_SYS(sys_dup2
, 2)
2074 MIPS_SYS(sys_getppid
, 0)
2075 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2076 MIPS_SYS(sys_setsid
, 0)
2077 MIPS_SYS(sys_sigaction
, 3)
2078 MIPS_SYS(sys_sgetmask
, 0)
2079 MIPS_SYS(sys_ssetmask
, 1)
2080 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2081 MIPS_SYS(sys_setregid
, 2)
2082 MIPS_SYS(sys_sigsuspend
, 0)
2083 MIPS_SYS(sys_sigpending
, 1)
2084 MIPS_SYS(sys_sethostname
, 2)
2085 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2086 MIPS_SYS(sys_getrlimit
, 2)
2087 MIPS_SYS(sys_getrusage
, 2)
2088 MIPS_SYS(sys_gettimeofday
, 2)
2089 MIPS_SYS(sys_settimeofday
, 2)
2090 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2091 MIPS_SYS(sys_setgroups
, 2)
2092 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2093 MIPS_SYS(sys_symlink
, 2)
2094 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2095 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2096 MIPS_SYS(sys_uselib
, 1)
2097 MIPS_SYS(sys_swapon
, 2)
2098 MIPS_SYS(sys_reboot
, 3)
2099 MIPS_SYS(old_readdir
, 3)
2100 MIPS_SYS(old_mmap
, 6) /* 4090 */
2101 MIPS_SYS(sys_munmap
, 2)
2102 MIPS_SYS(sys_truncate
, 2)
2103 MIPS_SYS(sys_ftruncate
, 2)
2104 MIPS_SYS(sys_fchmod
, 2)
2105 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2106 MIPS_SYS(sys_getpriority
, 2)
2107 MIPS_SYS(sys_setpriority
, 3)
2108 MIPS_SYS(sys_ni_syscall
, 0)
2109 MIPS_SYS(sys_statfs
, 2)
2110 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2111 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2112 MIPS_SYS(sys_socketcall
, 2)
2113 MIPS_SYS(sys_syslog
, 3)
2114 MIPS_SYS(sys_setitimer
, 3)
2115 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2116 MIPS_SYS(sys_newstat
, 2)
2117 MIPS_SYS(sys_newlstat
, 2)
2118 MIPS_SYS(sys_newfstat
, 2)
2119 MIPS_SYS(sys_uname
, 1)
2120 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2121 MIPS_SYS(sys_vhangup
, 0)
2122 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2123 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2124 MIPS_SYS(sys_wait4
, 4)
2125 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2126 MIPS_SYS(sys_sysinfo
, 1)
2127 MIPS_SYS(sys_ipc
, 6)
2128 MIPS_SYS(sys_fsync
, 1)
2129 MIPS_SYS(sys_sigreturn
, 0)
2130 MIPS_SYS(sys_clone
, 6) /* 4120 */
2131 MIPS_SYS(sys_setdomainname
, 2)
2132 MIPS_SYS(sys_newuname
, 1)
2133 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2134 MIPS_SYS(sys_adjtimex
, 1)
2135 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2136 MIPS_SYS(sys_sigprocmask
, 3)
2137 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2138 MIPS_SYS(sys_init_module
, 5)
2139 MIPS_SYS(sys_delete_module
, 1)
2140 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2141 MIPS_SYS(sys_quotactl
, 0)
2142 MIPS_SYS(sys_getpgid
, 1)
2143 MIPS_SYS(sys_fchdir
, 1)
2144 MIPS_SYS(sys_bdflush
, 2)
2145 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2146 MIPS_SYS(sys_personality
, 1)
2147 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2148 MIPS_SYS(sys_setfsuid
, 1)
2149 MIPS_SYS(sys_setfsgid
, 1)
2150 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2151 MIPS_SYS(sys_getdents
, 3)
2152 MIPS_SYS(sys_select
, 5)
2153 MIPS_SYS(sys_flock
, 2)
2154 MIPS_SYS(sys_msync
, 3)
2155 MIPS_SYS(sys_readv
, 3) /* 4145 */
2156 MIPS_SYS(sys_writev
, 3)
2157 MIPS_SYS(sys_cacheflush
, 3)
2158 MIPS_SYS(sys_cachectl
, 3)
2159 MIPS_SYS(sys_sysmips
, 4)
2160 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2161 MIPS_SYS(sys_getsid
, 1)
2162 MIPS_SYS(sys_fdatasync
, 0)
2163 MIPS_SYS(sys_sysctl
, 1)
2164 MIPS_SYS(sys_mlock
, 2)
2165 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2166 MIPS_SYS(sys_mlockall
, 1)
2167 MIPS_SYS(sys_munlockall
, 0)
2168 MIPS_SYS(sys_sched_setparam
, 2)
2169 MIPS_SYS(sys_sched_getparam
, 2)
2170 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2171 MIPS_SYS(sys_sched_getscheduler
, 1)
2172 MIPS_SYS(sys_sched_yield
, 0)
2173 MIPS_SYS(sys_sched_get_priority_max
, 1)
2174 MIPS_SYS(sys_sched_get_priority_min
, 1)
2175 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2176 MIPS_SYS(sys_nanosleep
, 2)
2177 MIPS_SYS(sys_mremap
, 5)
2178 MIPS_SYS(sys_accept
, 3)
2179 MIPS_SYS(sys_bind
, 3)
2180 MIPS_SYS(sys_connect
, 3) /* 4170 */
2181 MIPS_SYS(sys_getpeername
, 3)
2182 MIPS_SYS(sys_getsockname
, 3)
2183 MIPS_SYS(sys_getsockopt
, 5)
2184 MIPS_SYS(sys_listen
, 2)
2185 MIPS_SYS(sys_recv
, 4) /* 4175 */
2186 MIPS_SYS(sys_recvfrom
, 6)
2187 MIPS_SYS(sys_recvmsg
, 3)
2188 MIPS_SYS(sys_send
, 4)
2189 MIPS_SYS(sys_sendmsg
, 3)
2190 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2191 MIPS_SYS(sys_setsockopt
, 5)
2192 MIPS_SYS(sys_shutdown
, 2)
2193 MIPS_SYS(sys_socket
, 3)
2194 MIPS_SYS(sys_socketpair
, 4)
2195 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2196 MIPS_SYS(sys_getresuid
, 3)
2197 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2198 MIPS_SYS(sys_poll
, 3)
2199 MIPS_SYS(sys_nfsservctl
, 3)
2200 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2201 MIPS_SYS(sys_getresgid
, 3)
2202 MIPS_SYS(sys_prctl
, 5)
2203 MIPS_SYS(sys_rt_sigreturn
, 0)
2204 MIPS_SYS(sys_rt_sigaction
, 4)
2205 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2206 MIPS_SYS(sys_rt_sigpending
, 2)
2207 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2208 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2209 MIPS_SYS(sys_rt_sigsuspend
, 0)
2210 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2211 MIPS_SYS(sys_pwrite64
, 6)
2212 MIPS_SYS(sys_chown
, 3)
2213 MIPS_SYS(sys_getcwd
, 2)
2214 MIPS_SYS(sys_capget
, 2)
2215 MIPS_SYS(sys_capset
, 2) /* 4205 */
2216 MIPS_SYS(sys_sigaltstack
, 2)
2217 MIPS_SYS(sys_sendfile
, 4)
2218 MIPS_SYS(sys_ni_syscall
, 0)
2219 MIPS_SYS(sys_ni_syscall
, 0)
2220 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2221 MIPS_SYS(sys_truncate64
, 4)
2222 MIPS_SYS(sys_ftruncate64
, 4)
2223 MIPS_SYS(sys_stat64
, 2)
2224 MIPS_SYS(sys_lstat64
, 2)
2225 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2226 MIPS_SYS(sys_pivot_root
, 2)
2227 MIPS_SYS(sys_mincore
, 3)
2228 MIPS_SYS(sys_madvise
, 3)
2229 MIPS_SYS(sys_getdents64
, 3)
2230 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2231 MIPS_SYS(sys_ni_syscall
, 0)
2232 MIPS_SYS(sys_gettid
, 0)
2233 MIPS_SYS(sys_readahead
, 5)
2234 MIPS_SYS(sys_setxattr
, 5)
2235 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2236 MIPS_SYS(sys_fsetxattr
, 5)
2237 MIPS_SYS(sys_getxattr
, 4)
2238 MIPS_SYS(sys_lgetxattr
, 4)
2239 MIPS_SYS(sys_fgetxattr
, 4)
2240 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2241 MIPS_SYS(sys_llistxattr
, 3)
2242 MIPS_SYS(sys_flistxattr
, 3)
2243 MIPS_SYS(sys_removexattr
, 2)
2244 MIPS_SYS(sys_lremovexattr
, 2)
2245 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2246 MIPS_SYS(sys_tkill
, 2)
2247 MIPS_SYS(sys_sendfile64
, 5)
2248 MIPS_SYS(sys_futex
, 6)
2249 MIPS_SYS(sys_sched_setaffinity
, 3)
2250 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2251 MIPS_SYS(sys_io_setup
, 2)
2252 MIPS_SYS(sys_io_destroy
, 1)
2253 MIPS_SYS(sys_io_getevents
, 5)
2254 MIPS_SYS(sys_io_submit
, 3)
2255 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2256 MIPS_SYS(sys_exit_group
, 1)
2257 MIPS_SYS(sys_lookup_dcookie
, 3)
2258 MIPS_SYS(sys_epoll_create
, 1)
2259 MIPS_SYS(sys_epoll_ctl
, 4)
2260 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2261 MIPS_SYS(sys_remap_file_pages
, 5)
2262 MIPS_SYS(sys_set_tid_address
, 1)
2263 MIPS_SYS(sys_restart_syscall
, 0)
2264 MIPS_SYS(sys_fadvise64_64
, 7)
2265 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2266 MIPS_SYS(sys_fstatfs64
, 2)
2267 MIPS_SYS(sys_timer_create
, 3)
2268 MIPS_SYS(sys_timer_settime
, 4)
2269 MIPS_SYS(sys_timer_gettime
, 2)
2270 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2271 MIPS_SYS(sys_timer_delete
, 1)
2272 MIPS_SYS(sys_clock_settime
, 2)
2273 MIPS_SYS(sys_clock_gettime
, 2)
2274 MIPS_SYS(sys_clock_getres
, 2)
2275 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2276 MIPS_SYS(sys_tgkill
, 3)
2277 MIPS_SYS(sys_utimes
, 2)
2278 MIPS_SYS(sys_mbind
, 4)
2279 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2280 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2281 MIPS_SYS(sys_mq_open
, 4)
2282 MIPS_SYS(sys_mq_unlink
, 1)
2283 MIPS_SYS(sys_mq_timedsend
, 5)
2284 MIPS_SYS(sys_mq_timedreceive
, 5)
2285 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2286 MIPS_SYS(sys_mq_getsetattr
, 3)
2287 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2288 MIPS_SYS(sys_waitid
, 4)
2289 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2290 MIPS_SYS(sys_add_key
, 5)
2291 MIPS_SYS(sys_request_key
, 4)
2292 MIPS_SYS(sys_keyctl
, 5)
2293 MIPS_SYS(sys_set_thread_area
, 1)
2294 MIPS_SYS(sys_inotify_init
, 0)
2295 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2296 MIPS_SYS(sys_inotify_rm_watch
, 2)
2297 MIPS_SYS(sys_migrate_pages
, 4)
2298 MIPS_SYS(sys_openat
, 4)
2299 MIPS_SYS(sys_mkdirat
, 3)
2300 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2301 MIPS_SYS(sys_fchownat
, 5)
2302 MIPS_SYS(sys_futimesat
, 3)
2303 MIPS_SYS(sys_fstatat64
, 4)
2304 MIPS_SYS(sys_unlinkat
, 3)
2305 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2306 MIPS_SYS(sys_linkat
, 5)
2307 MIPS_SYS(sys_symlinkat
, 3)
2308 MIPS_SYS(sys_readlinkat
, 4)
2309 MIPS_SYS(sys_fchmodat
, 3)
2310 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2311 MIPS_SYS(sys_pselect6
, 6)
2312 MIPS_SYS(sys_ppoll
, 5)
2313 MIPS_SYS(sys_unshare
, 1)
2314 MIPS_SYS(sys_splice
, 6)
2315 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2316 MIPS_SYS(sys_tee
, 4)
2317 MIPS_SYS(sys_vmsplice
, 4)
2318 MIPS_SYS(sys_move_pages
, 6)
2319 MIPS_SYS(sys_set_robust_list
, 2)
2320 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2321 MIPS_SYS(sys_kexec_load
, 4)
2322 MIPS_SYS(sys_getcpu
, 3)
2323 MIPS_SYS(sys_epoll_pwait
, 6)
2324 MIPS_SYS(sys_ioprio_set
, 3)
2325 MIPS_SYS(sys_ioprio_get
, 2)
2326 MIPS_SYS(sys_utimensat
, 4)
2327 MIPS_SYS(sys_signalfd
, 3)
2328 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2329 MIPS_SYS(sys_eventfd
, 1)
2330 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2331 MIPS_SYS(sys_timerfd_create
, 2)
2332 MIPS_SYS(sys_timerfd_gettime
, 2)
2333 MIPS_SYS(sys_timerfd_settime
, 4)
2334 MIPS_SYS(sys_signalfd4
, 4)
2335 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2336 MIPS_SYS(sys_epoll_create1
, 1)
2337 MIPS_SYS(sys_dup3
, 3)
2338 MIPS_SYS(sys_pipe2
, 2)
2339 MIPS_SYS(sys_inotify_init1
, 1)
2340 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2341 MIPS_SYS(sys_pwritev
, 6)
2342 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2343 MIPS_SYS(sys_perf_event_open
, 5)
2344 MIPS_SYS(sys_accept4
, 4)
2345 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2346 MIPS_SYS(sys_fanotify_init
, 2)
2347 MIPS_SYS(sys_fanotify_mark
, 6)
2348 MIPS_SYS(sys_prlimit64
, 4)
2349 MIPS_SYS(sys_name_to_handle_at
, 5)
2350 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2351 MIPS_SYS(sys_clock_adjtime
, 2)
2352 MIPS_SYS(sys_syncfs
, 1)
2357 static int do_store_exclusive(CPUMIPSState
*env
)
2360 target_ulong page_addr
;
2368 page_addr
= addr
& TARGET_PAGE_MASK
;
2371 flags
= page_get_flags(page_addr
);
2372 if ((flags
& PAGE_READ
) == 0) {
2375 reg
= env
->llreg
& 0x1f;
2376 d
= (env
->llreg
& 0x20) != 0;
2378 segv
= get_user_s64(val
, addr
);
2380 segv
= get_user_s32(val
, addr
);
2383 if (val
!= env
->llval
) {
2384 env
->active_tc
.gpr
[reg
] = 0;
2387 segv
= put_user_u64(env
->llnewval
, addr
);
2389 segv
= put_user_u32(env
->llnewval
, addr
);
2392 env
->active_tc
.gpr
[reg
] = 1;
2399 env
->active_tc
.PC
+= 4;
2412 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2420 info
->si_signo
= TARGET_SIGFPE
;
2422 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2423 queue_signal(env
, info
->si_signo
, &*info
);
2427 info
->si_signo
= TARGET_SIGTRAP
;
2429 queue_signal(env
, info
->si_signo
, &*info
);
2437 void cpu_loop(CPUMIPSState
*env
)
2439 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2440 target_siginfo_t info
;
2443 # ifdef TARGET_ABI_MIPSO32
2444 unsigned int syscall_num
;
2449 trapnr
= cpu_mips_exec(cs
);
2453 env
->active_tc
.PC
+= 4;
2454 # ifdef TARGET_ABI_MIPSO32
2455 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2456 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2457 ret
= -TARGET_ENOSYS
;
2461 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2463 nb_args
= mips_syscall_args
[syscall_num
];
2464 sp_reg
= env
->active_tc
.gpr
[29];
2466 /* these arguments are taken from the stack */
2468 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2472 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2476 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2480 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2486 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2487 env
->active_tc
.gpr
[4],
2488 env
->active_tc
.gpr
[5],
2489 env
->active_tc
.gpr
[6],
2490 env
->active_tc
.gpr
[7],
2491 arg5
, arg6
, arg7
, arg8
);
2495 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2496 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2497 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2498 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2499 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2501 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2502 /* Returning from a successful sigreturn syscall.
2503 Avoid clobbering register state. */
2506 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2507 env
->active_tc
.gpr
[7] = 1; /* error flag */
2510 env
->active_tc
.gpr
[7] = 0; /* error flag */
2512 env
->active_tc
.gpr
[2] = ret
;
2518 info
.si_signo
= TARGET_SIGSEGV
;
2520 /* XXX: check env->error_code */
2521 info
.si_code
= TARGET_SEGV_MAPERR
;
2522 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2523 queue_signal(env
, info
.si_signo
, &info
);
2527 info
.si_signo
= TARGET_SIGILL
;
2530 queue_signal(env
, info
.si_signo
, &info
);
2532 case EXCP_INTERRUPT
:
2533 /* just indicate that signals should be handled asap */
2539 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2542 info
.si_signo
= sig
;
2544 info
.si_code
= TARGET_TRAP_BRKPT
;
2545 queue_signal(env
, info
.si_signo
, &info
);
2550 if (do_store_exclusive(env
)) {
2551 info
.si_signo
= TARGET_SIGSEGV
;
2553 info
.si_code
= TARGET_SEGV_MAPERR
;
2554 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2555 queue_signal(env
, info
.si_signo
, &info
);
2559 info
.si_signo
= TARGET_SIGILL
;
2561 info
.si_code
= TARGET_ILL_ILLOPC
;
2562 queue_signal(env
, info
.si_signo
, &info
);
2564 /* The code below was inspired by the MIPS Linux kernel trap
2565 * handling code in arch/mips/kernel/traps.c.
2569 abi_ulong trap_instr
;
2572 if (env
->hflags
& MIPS_HFLAG_M16
) {
2573 if (env
->insn_flags
& ASE_MICROMIPS
) {
2574 /* microMIPS mode */
2575 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2580 if ((trap_instr
>> 10) == 0x11) {
2581 /* 16-bit instruction */
2582 code
= trap_instr
& 0xf;
2584 /* 32-bit instruction */
2587 ret
= get_user_u16(instr_lo
,
2588 env
->active_tc
.PC
+ 2);
2592 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2593 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2594 /* Unfortunately, microMIPS also suffers from
2595 the old assembler bug... */
2596 if (code
>= (1 << 10)) {
2602 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2606 code
= (trap_instr
>> 6) & 0x3f;
2609 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2614 /* As described in the original Linux kernel code, the
2615 * below checks on 'code' are to work around an old
2618 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2619 if (code
>= (1 << 10)) {
2624 if (do_break(env
, &info
, code
) != 0) {
2631 abi_ulong trap_instr
;
2632 unsigned int code
= 0;
2634 if (env
->hflags
& MIPS_HFLAG_M16
) {
2635 /* microMIPS mode */
2638 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2639 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2641 trap_instr
= (instr
[0] << 16) | instr
[1];
2643 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2650 /* The immediate versions don't provide a code. */
2651 if (!(trap_instr
& 0xFC000000)) {
2652 if (env
->hflags
& MIPS_HFLAG_M16
) {
2653 /* microMIPS mode */
2654 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2656 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2660 if (do_break(env
, &info
, code
) != 0) {
2667 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2670 process_pending_signals(env
);
2675 #ifdef TARGET_OPENRISC
2677 void cpu_loop(CPUOpenRISCState
*env
)
2679 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2684 trapnr
= cpu_openrisc_exec(cs
);
2690 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2694 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2695 gdbsig
= TARGET_SIGBUS
;
2699 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2700 gdbsig
= TARGET_SIGSEGV
;
2703 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2706 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2707 gdbsig
= TARGET_SIGBUS
;
2710 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2711 gdbsig
= TARGET_SIGILL
;
2714 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2718 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2721 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2722 gdbsig
= TARGET_SIGSEGV
;
2725 env
->pc
+= 4; /* 0xc00; */
2726 env
->gpr
[11] = do_syscall(env
,
2727 env
->gpr
[11], /* return value */
2728 env
->gpr
[3], /* r3 - r7 are params */
2736 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2739 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2740 gdbsig
= TARGET_SIGTRAP
;
2743 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2746 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2748 gdbsig
= TARGET_SIGILL
;
2752 gdb_handlesig(cs
, gdbsig
);
2753 if (gdbsig
!= TARGET_SIGTRAP
) {
2758 process_pending_signals(env
);
2762 #endif /* TARGET_OPENRISC */
2765 void cpu_loop(CPUSH4State
*env
)
2767 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2769 target_siginfo_t info
;
2773 trapnr
= cpu_sh4_exec(cs
);
2779 ret
= do_syscall(env
,
2788 env
->gregs
[0] = ret
;
2790 case EXCP_INTERRUPT
:
2791 /* just indicate that signals should be handled asap */
2797 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2800 info
.si_signo
= sig
;
2802 info
.si_code
= TARGET_TRAP_BRKPT
;
2803 queue_signal(env
, info
.si_signo
, &info
);
2809 info
.si_signo
= TARGET_SIGSEGV
;
2811 info
.si_code
= TARGET_SEGV_MAPERR
;
2812 info
._sifields
._sigfault
._addr
= env
->tea
;
2813 queue_signal(env
, info
.si_signo
, &info
);
2817 printf ("Unhandled trap: 0x%x\n", trapnr
);
2818 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2821 process_pending_signals (env
);
2827 void cpu_loop(CPUCRISState
*env
)
2829 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2831 target_siginfo_t info
;
2835 trapnr
= cpu_cris_exec(cs
);
2840 info
.si_signo
= TARGET_SIGSEGV
;
2842 /* XXX: check env->error_code */
2843 info
.si_code
= TARGET_SEGV_MAPERR
;
2844 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2845 queue_signal(env
, info
.si_signo
, &info
);
2848 case EXCP_INTERRUPT
:
2849 /* just indicate that signals should be handled asap */
2852 ret
= do_syscall(env
,
2861 env
->regs
[10] = ret
;
2867 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2870 info
.si_signo
= sig
;
2872 info
.si_code
= TARGET_TRAP_BRKPT
;
2873 queue_signal(env
, info
.si_signo
, &info
);
2878 printf ("Unhandled trap: 0x%x\n", trapnr
);
2879 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2882 process_pending_signals (env
);
2887 #ifdef TARGET_MICROBLAZE
2888 void cpu_loop(CPUMBState
*env
)
2890 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2892 target_siginfo_t info
;
2896 trapnr
= cpu_mb_exec(cs
);
2901 info
.si_signo
= TARGET_SIGSEGV
;
2903 /* XXX: check env->error_code */
2904 info
.si_code
= TARGET_SEGV_MAPERR
;
2905 info
._sifields
._sigfault
._addr
= 0;
2906 queue_signal(env
, info
.si_signo
, &info
);
2909 case EXCP_INTERRUPT
:
2910 /* just indicate that signals should be handled asap */
2913 /* Return address is 4 bytes after the call. */
2915 env
->sregs
[SR_PC
] = env
->regs
[14];
2916 ret
= do_syscall(env
,
2928 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2929 if (env
->iflags
& D_FLAG
) {
2930 env
->sregs
[SR_ESR
] |= 1 << 12;
2931 env
->sregs
[SR_PC
] -= 4;
2932 /* FIXME: if branch was immed, replay the imm as well. */
2935 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2937 switch (env
->sregs
[SR_ESR
] & 31) {
2938 case ESR_EC_DIVZERO
:
2939 info
.si_signo
= TARGET_SIGFPE
;
2941 info
.si_code
= TARGET_FPE_FLTDIV
;
2942 info
._sifields
._sigfault
._addr
= 0;
2943 queue_signal(env
, info
.si_signo
, &info
);
2946 info
.si_signo
= TARGET_SIGFPE
;
2948 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2949 info
.si_code
= TARGET_FPE_FLTINV
;
2951 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2952 info
.si_code
= TARGET_FPE_FLTDIV
;
2954 info
._sifields
._sigfault
._addr
= 0;
2955 queue_signal(env
, info
.si_signo
, &info
);
2958 printf ("Unhandled hw-exception: 0x%x\n",
2959 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2960 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2969 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2972 info
.si_signo
= sig
;
2974 info
.si_code
= TARGET_TRAP_BRKPT
;
2975 queue_signal(env
, info
.si_signo
, &info
);
2980 printf ("Unhandled trap: 0x%x\n", trapnr
);
2981 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2984 process_pending_signals (env
);
2991 void cpu_loop(CPUM68KState
*env
)
2993 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2996 target_siginfo_t info
;
2997 TaskState
*ts
= cs
->opaque
;
3001 trapnr
= cpu_m68k_exec(cs
);
3006 if (ts
->sim_syscalls
) {
3008 get_user_u16(nr
, env
->pc
+ 2);
3010 do_m68k_simcall(env
, nr
);
3016 case EXCP_HALT_INSN
:
3017 /* Semihosing syscall. */
3019 do_m68k_semihosting(env
, env
->dregs
[0]);
3023 case EXCP_UNSUPPORTED
:
3025 info
.si_signo
= TARGET_SIGILL
;
3027 info
.si_code
= TARGET_ILL_ILLOPN
;
3028 info
._sifields
._sigfault
._addr
= env
->pc
;
3029 queue_signal(env
, info
.si_signo
, &info
);
3033 ts
->sim_syscalls
= 0;
3036 env
->dregs
[0] = do_syscall(env
,
3047 case EXCP_INTERRUPT
:
3048 /* just indicate that signals should be handled asap */
3052 info
.si_signo
= TARGET_SIGSEGV
;
3054 /* XXX: check env->error_code */
3055 info
.si_code
= TARGET_SEGV_MAPERR
;
3056 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3057 queue_signal(env
, info
.si_signo
, &info
);
3064 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3067 info
.si_signo
= sig
;
3069 info
.si_code
= TARGET_TRAP_BRKPT
;
3070 queue_signal(env
, info
.si_signo
, &info
);
3075 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3078 process_pending_signals(env
);
3081 #endif /* TARGET_M68K */
3084 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3086 target_ulong addr
, val
, tmp
;
3087 target_siginfo_t info
;
3090 addr
= env
->lock_addr
;
3091 tmp
= env
->lock_st_addr
;
3092 env
->lock_addr
= -1;
3093 env
->lock_st_addr
= 0;
3099 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3103 if (val
== env
->lock_value
) {
3105 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3122 info
.si_signo
= TARGET_SIGSEGV
;
3124 info
.si_code
= TARGET_SEGV_MAPERR
;
3125 info
._sifields
._sigfault
._addr
= addr
;
3126 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3129 void cpu_loop(CPUAlphaState
*env
)
3131 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3133 target_siginfo_t info
;
3138 trapnr
= cpu_alpha_exec(cs
);
3141 /* All of the traps imply a transition through PALcode, which
3142 implies an REI instruction has been executed. Which means
3143 that the intr_flag should be cleared. */
3148 fprintf(stderr
, "Reset requested. Exit\n");
3152 fprintf(stderr
, "Machine check exception. Exit\n");
3155 case EXCP_SMP_INTERRUPT
:
3156 case EXCP_CLK_INTERRUPT
:
3157 case EXCP_DEV_INTERRUPT
:
3158 fprintf(stderr
, "External interrupt. Exit\n");
3162 env
->lock_addr
= -1;
3163 info
.si_signo
= TARGET_SIGSEGV
;
3165 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3166 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3167 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3168 queue_signal(env
, info
.si_signo
, &info
);
3171 env
->lock_addr
= -1;
3172 info
.si_signo
= TARGET_SIGBUS
;
3174 info
.si_code
= TARGET_BUS_ADRALN
;
3175 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3176 queue_signal(env
, info
.si_signo
, &info
);
3180 env
->lock_addr
= -1;
3181 info
.si_signo
= TARGET_SIGILL
;
3183 info
.si_code
= TARGET_ILL_ILLOPC
;
3184 info
._sifields
._sigfault
._addr
= env
->pc
;
3185 queue_signal(env
, info
.si_signo
, &info
);
3188 env
->lock_addr
= -1;
3189 info
.si_signo
= TARGET_SIGFPE
;
3191 info
.si_code
= TARGET_FPE_FLTINV
;
3192 info
._sifields
._sigfault
._addr
= env
->pc
;
3193 queue_signal(env
, info
.si_signo
, &info
);
3196 /* No-op. Linux simply re-enables the FPU. */
3199 env
->lock_addr
= -1;
3200 switch (env
->error_code
) {
3203 info
.si_signo
= TARGET_SIGTRAP
;
3205 info
.si_code
= TARGET_TRAP_BRKPT
;
3206 info
._sifields
._sigfault
._addr
= env
->pc
;
3207 queue_signal(env
, info
.si_signo
, &info
);
3211 info
.si_signo
= TARGET_SIGTRAP
;
3214 info
._sifields
._sigfault
._addr
= env
->pc
;
3215 queue_signal(env
, info
.si_signo
, &info
);
3219 trapnr
= env
->ir
[IR_V0
];
3220 sysret
= do_syscall(env
, trapnr
,
3221 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3222 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3223 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3225 if (trapnr
== TARGET_NR_sigreturn
3226 || trapnr
== TARGET_NR_rt_sigreturn
) {
3229 /* Syscall writes 0 to V0 to bypass error check, similar
3230 to how this is handled internal to Linux kernel.
3231 (Ab)use trapnr temporarily as boolean indicating error. */
3232 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3233 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3234 env
->ir
[IR_A3
] = trapnr
;
3238 /* ??? We can probably elide the code using page_unprotect
3239 that is checking for self-modifying code. Instead we
3240 could simply call tb_flush here. Until we work out the
3241 changes required to turn off the extra write protection,
3242 this can be a no-op. */
3246 /* Handled in the translator for usermode. */
3250 /* Handled in the translator for usermode. */
3254 info
.si_signo
= TARGET_SIGFPE
;
3255 switch (env
->ir
[IR_A0
]) {
3256 case TARGET_GEN_INTOVF
:
3257 info
.si_code
= TARGET_FPE_INTOVF
;
3259 case TARGET_GEN_INTDIV
:
3260 info
.si_code
= TARGET_FPE_INTDIV
;
3262 case TARGET_GEN_FLTOVF
:
3263 info
.si_code
= TARGET_FPE_FLTOVF
;
3265 case TARGET_GEN_FLTUND
:
3266 info
.si_code
= TARGET_FPE_FLTUND
;
3268 case TARGET_GEN_FLTINV
:
3269 info
.si_code
= TARGET_FPE_FLTINV
;
3271 case TARGET_GEN_FLTINE
:
3272 info
.si_code
= TARGET_FPE_FLTRES
;
3274 case TARGET_GEN_ROPRAND
:
3278 info
.si_signo
= TARGET_SIGTRAP
;
3283 info
._sifields
._sigfault
._addr
= env
->pc
;
3284 queue_signal(env
, info
.si_signo
, &info
);
3291 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3292 if (info
.si_signo
) {
3293 env
->lock_addr
= -1;
3295 info
.si_code
= TARGET_TRAP_BRKPT
;
3296 queue_signal(env
, info
.si_signo
, &info
);
3301 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3303 case EXCP_INTERRUPT
:
3304 /* Just indicate that signals should be handled asap. */
3307 printf ("Unhandled trap: 0x%x\n", trapnr
);
3308 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3311 process_pending_signals (env
);
3314 #endif /* TARGET_ALPHA */
3317 void cpu_loop(CPUS390XState
*env
)
3319 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3321 target_siginfo_t info
;
3326 trapnr
= cpu_s390x_exec(cs
);
3329 case EXCP_INTERRUPT
:
3330 /* Just indicate that signals should be handled asap. */
3334 n
= env
->int_svc_code
;
3336 /* syscalls > 255 */
3339 env
->psw
.addr
+= env
->int_svc_ilen
;
3340 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3341 env
->regs
[4], env
->regs
[5],
3342 env
->regs
[6], env
->regs
[7], 0, 0);
3346 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3348 n
= TARGET_TRAP_BRKPT
;
3353 n
= env
->int_pgm_code
;
3356 case PGM_PRIVILEGED
:
3357 sig
= TARGET_SIGILL
;
3358 n
= TARGET_ILL_ILLOPC
;
3360 case PGM_PROTECTION
:
3361 case PGM_ADDRESSING
:
3362 sig
= TARGET_SIGSEGV
;
3363 /* XXX: check env->error_code */
3364 n
= TARGET_SEGV_MAPERR
;
3365 addr
= env
->__excp_addr
;
3368 case PGM_SPECIFICATION
:
3369 case PGM_SPECIAL_OP
:
3372 sig
= TARGET_SIGILL
;
3373 n
= TARGET_ILL_ILLOPN
;
3376 case PGM_FIXPT_OVERFLOW
:
3377 sig
= TARGET_SIGFPE
;
3378 n
= TARGET_FPE_INTOVF
;
3380 case PGM_FIXPT_DIVIDE
:
3381 sig
= TARGET_SIGFPE
;
3382 n
= TARGET_FPE_INTDIV
;
3386 n
= (env
->fpc
>> 8) & 0xff;
3388 /* compare-and-trap */
3391 /* An IEEE exception, simulated or otherwise. */
3393 n
= TARGET_FPE_FLTINV
;
3394 } else if (n
& 0x40) {
3395 n
= TARGET_FPE_FLTDIV
;
3396 } else if (n
& 0x20) {
3397 n
= TARGET_FPE_FLTOVF
;
3398 } else if (n
& 0x10) {
3399 n
= TARGET_FPE_FLTUND
;
3400 } else if (n
& 0x08) {
3401 n
= TARGET_FPE_FLTRES
;
3403 /* ??? Quantum exception; BFP, DFP error. */
3406 sig
= TARGET_SIGFPE
;
3411 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3412 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3418 addr
= env
->psw
.addr
;
3420 info
.si_signo
= sig
;
3423 info
._sifields
._sigfault
._addr
= addr
;
3424 queue_signal(env
, info
.si_signo
, &info
);
3428 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3429 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3432 process_pending_signals (env
);
3436 #endif /* TARGET_S390X */
3438 #ifdef TARGET_TILEGX
3440 static void gen_sigill_reg(CPUTLGState
*env
)
3442 target_siginfo_t info
;
3444 info
.si_signo
= TARGET_SIGILL
;
3446 info
.si_code
= TARGET_ILL_PRVREG
;
3447 info
._sifields
._sigfault
._addr
= env
->pc
;
3448 queue_signal(env
, info
.si_signo
, &info
);
3451 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3453 target_siginfo_t info
;
3455 info
.si_signo
= signo
;
3457 info
._sifields
._sigfault
._addr
= env
->pc
;
3459 if (signo
== TARGET_SIGSEGV
) {
3460 /* The passed in sigcode is a dummy; check for a page mapping
3461 and pass either MAPERR or ACCERR. */
3462 target_ulong addr
= env
->excaddr
;
3463 info
._sifields
._sigfault
._addr
= addr
;
3464 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3465 sigcode
= TARGET_SEGV_MAPERR
;
3467 sigcode
= TARGET_SEGV_ACCERR
;
3470 info
.si_code
= sigcode
;
3472 queue_signal(env
, info
.si_signo
, &info
);
3475 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3477 env
->excaddr
= addr
;
3478 do_signal(env
, TARGET_SIGSEGV
, 0);
3481 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3483 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3494 gen_sigill_reg(env
);
3497 g_assert_not_reached();
3500 env
->regs
[reg
] = val
;
3504 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3505 * memory at the address held in the first source register. If the values are
3506 * not equal, then no memory operation is performed. If the values are equal,
3507 * the 8-byte quantity from the second source register is written into memory
3508 * at the address held in the first source register. In either case, the result
3509 * of the instruction is the value read from memory. The compare and write to
3510 * memory are atomic and thus can be used for synchronization purposes. This
3511 * instruction only operates for addresses aligned to a 8-byte boundary.
3512 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3514 * Functional Description (64-bit)
3515 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3516 * rf[Dest] = memVal;
3517 * if (memVal == SPR[CmpValueSPR])
3518 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3520 * Functional Description (32-bit)
3521 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3522 * rf[Dest] = memVal;
3523 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3524 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3527 * This function also processes exch and exch4 which need not process SPR.
3529 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3532 target_long val
, sprval
;
3536 addr
= env
->atomic_srca
;
3537 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3538 goto sigsegv_maperr
;
3543 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3545 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3549 if (!cmp
|| val
== sprval
) {
3550 target_long valb
= env
->atomic_srcb
;
3551 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3552 goto sigsegv_maperr
;
3556 set_regval(env
, env
->atomic_dstr
, val
);
3562 gen_sigsegv_maperr(env
, addr
);
3565 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3569 target_long val
, valb
;
3573 addr
= env
->atomic_srca
;
3574 valb
= env
->atomic_srcb
;
3575 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3576 goto sigsegv_maperr
;
3580 case TILEGX_EXCP_OPCODE_FETCHADD
:
3581 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3584 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3590 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3592 if ((int32_t)valb
< 0) {
3596 case TILEGX_EXCP_OPCODE_FETCHAND
:
3597 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3600 case TILEGX_EXCP_OPCODE_FETCHOR
:
3601 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3605 g_assert_not_reached();
3609 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3610 goto sigsegv_maperr
;
3614 set_regval(env
, env
->atomic_dstr
, val
);
3620 gen_sigsegv_maperr(env
, addr
);
3623 void cpu_loop(CPUTLGState
*env
)
3625 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3630 trapnr
= cpu_tilegx_exec(cs
);
3633 case TILEGX_EXCP_SYSCALL
:
3634 env
->regs
[TILEGX_R_RE
] = do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3635 env
->regs
[0], env
->regs
[1],
3636 env
->regs
[2], env
->regs
[3],
3637 env
->regs
[4], env
->regs
[5],
3638 env
->regs
[6], env
->regs
[7]);
3639 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(env
->regs
[TILEGX_R_RE
])
3640 ? - env
->regs
[TILEGX_R_RE
]
3643 case TILEGX_EXCP_OPCODE_EXCH
:
3644 do_exch(env
, true, false);
3646 case TILEGX_EXCP_OPCODE_EXCH4
:
3647 do_exch(env
, false, false);
3649 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3650 do_exch(env
, true, true);
3652 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3653 do_exch(env
, false, true);
3655 case TILEGX_EXCP_OPCODE_FETCHADD
:
3656 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3657 case TILEGX_EXCP_OPCODE_FETCHAND
:
3658 case TILEGX_EXCP_OPCODE_FETCHOR
:
3659 do_fetch(env
, trapnr
, true);
3661 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3662 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3663 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3664 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3665 do_fetch(env
, trapnr
, false);
3667 case TILEGX_EXCP_SIGNAL
:
3668 do_signal(env
, env
->signo
, env
->sigcode
);
3670 case TILEGX_EXCP_REG_IDN_ACCESS
:
3671 case TILEGX_EXCP_REG_UDN_ACCESS
:
3672 gen_sigill_reg(env
);
3675 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3676 g_assert_not_reached();
3678 process_pending_signals(env
);
3684 THREAD CPUState
*thread_cpu
;
3686 void task_settid(TaskState
*ts
)
3688 if (ts
->ts_tid
== 0) {
3689 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3693 void stop_all_tasks(void)
3696 * We trust that when using NPTL, start_exclusive()
3697 * handles thread stopping correctly.
3702 /* Assumes contents are already zeroed. */
3703 void init_task_state(TaskState
*ts
)
3708 ts
->first_free
= ts
->sigqueue_table
;
3709 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3710 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3712 ts
->sigqueue_table
[i
].next
= NULL
;
3715 CPUArchState
*cpu_copy(CPUArchState
*env
)
3717 CPUState
*cpu
= ENV_GET_CPU(env
);
3718 CPUState
*new_cpu
= cpu_init(cpu_model
);
3719 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3723 /* Reset non arch specific state */
3726 memcpy(new_env
, env
, sizeof(CPUArchState
));
3728 /* Clone all break/watchpoints.
3729 Note: Once we support ptrace with hw-debug register access, make sure
3730 BP_CPU break/watchpoints are handled correctly on clone. */
3731 QTAILQ_INIT(&new_cpu
->breakpoints
);
3732 QTAILQ_INIT(&new_cpu
->watchpoints
);
3733 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3734 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3736 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3737 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3743 static void handle_arg_help(const char *arg
)
3745 usage(EXIT_SUCCESS
);
3748 static void handle_arg_log(const char *arg
)
3752 mask
= qemu_str_to_log_mask(arg
);
3754 qemu_print_log_usage(stdout
);
3760 static void handle_arg_log_filename(const char *arg
)
3762 qemu_set_log_filename(arg
);
3765 static void handle_arg_set_env(const char *arg
)
3767 char *r
, *p
, *token
;
3768 r
= p
= strdup(arg
);
3769 while ((token
= strsep(&p
, ",")) != NULL
) {
3770 if (envlist_setenv(envlist
, token
) != 0) {
3771 usage(EXIT_FAILURE
);
3777 static void handle_arg_unset_env(const char *arg
)
3779 char *r
, *p
, *token
;
3780 r
= p
= strdup(arg
);
3781 while ((token
= strsep(&p
, ",")) != NULL
) {
3782 if (envlist_unsetenv(envlist
, token
) != 0) {
3783 usage(EXIT_FAILURE
);
3789 static void handle_arg_argv0(const char *arg
)
3791 argv0
= strdup(arg
);
3794 static void handle_arg_stack_size(const char *arg
)
3797 guest_stack_size
= strtoul(arg
, &p
, 0);
3798 if (guest_stack_size
== 0) {
3799 usage(EXIT_FAILURE
);
3803 guest_stack_size
*= 1024 * 1024;
3804 } else if (*p
== 'k' || *p
== 'K') {
3805 guest_stack_size
*= 1024;
3809 static void handle_arg_ld_prefix(const char *arg
)
3811 interp_prefix
= strdup(arg
);
3814 static void handle_arg_pagesize(const char *arg
)
3816 qemu_host_page_size
= atoi(arg
);
3817 if (qemu_host_page_size
== 0 ||
3818 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3819 fprintf(stderr
, "page size must be a power of two\n");
3824 static void handle_arg_randseed(const char *arg
)
3826 unsigned long long seed
;
3828 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3829 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3835 static void handle_arg_gdb(const char *arg
)
3837 gdbstub_port
= atoi(arg
);
3840 static void handle_arg_uname(const char *arg
)
3842 qemu_uname_release
= strdup(arg
);
3845 static void handle_arg_cpu(const char *arg
)
3847 cpu_model
= strdup(arg
);
3848 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3849 /* XXX: implement xxx_cpu_list for targets that still miss it */
3850 #if defined(cpu_list)
3851 cpu_list(stdout
, &fprintf
);
3857 static void handle_arg_guest_base(const char *arg
)
3859 guest_base
= strtol(arg
, NULL
, 0);
3860 have_guest_base
= 1;
3863 static void handle_arg_reserved_va(const char *arg
)
3867 reserved_va
= strtoul(arg
, &p
, 0);
3881 unsigned long unshifted
= reserved_va
;
3883 reserved_va
<<= shift
;
3884 if (((reserved_va
>> shift
) != unshifted
)
3885 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3886 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3889 fprintf(stderr
, "Reserved virtual address too big\n");
3894 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3899 static void handle_arg_singlestep(const char *arg
)
3904 static void handle_arg_strace(const char *arg
)
3909 static void handle_arg_version(const char *arg
)
3911 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3912 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3916 struct qemu_argument
{
3920 void (*handle_opt
)(const char *arg
);
3921 const char *example
;
3925 static const struct qemu_argument arg_table
[] = {
3926 {"h", "", false, handle_arg_help
,
3927 "", "print this help"},
3928 {"help", "", false, handle_arg_help
,
3930 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3931 "port", "wait gdb connection to 'port'"},
3932 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3933 "path", "set the elf interpreter prefix to 'path'"},
3934 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3935 "size", "set the stack size to 'size' bytes"},
3936 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3937 "model", "select CPU (-cpu help for list)"},
3938 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3939 "var=value", "sets targets environment variable (see below)"},
3940 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3941 "var", "unsets targets environment variable (see below)"},
3942 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3943 "argv0", "forces target process argv[0] to be 'argv0'"},
3944 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3945 "uname", "set qemu uname release string to 'uname'"},
3946 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3947 "address", "set guest_base address to 'address'"},
3948 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3949 "size", "reserve 'size' bytes for guest virtual address space"},
3950 {"d", "QEMU_LOG", true, handle_arg_log
,
3951 "item[,...]", "enable logging of specified items "
3952 "(use '-d help' for a list of items)"},
3953 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3954 "logfile", "write logs to 'logfile' (default stderr)"},
3955 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3956 "pagesize", "set the host page size to 'pagesize'"},
3957 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3958 "", "run in singlestep mode"},
3959 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3960 "", "log system calls"},
3961 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3962 "", "Seed for pseudo-random number generator"},
3963 {"version", "QEMU_VERSION", false, handle_arg_version
,
3964 "", "display version information and exit"},
3965 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3968 static void usage(int exitcode
)
3970 const struct qemu_argument
*arginfo
;
3974 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3975 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3977 "Options and associated environment variables:\n"
3980 /* Calculate column widths. We must always have at least enough space
3981 * for the column header.
3983 maxarglen
= strlen("Argument");
3984 maxenvlen
= strlen("Env-variable");
3986 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3987 int arglen
= strlen(arginfo
->argv
);
3988 if (arginfo
->has_arg
) {
3989 arglen
+= strlen(arginfo
->example
) + 1;
3991 if (strlen(arginfo
->env
) > maxenvlen
) {
3992 maxenvlen
= strlen(arginfo
->env
);
3994 if (arglen
> maxarglen
) {
3999 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4000 maxenvlen
, "Env-variable");
4002 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4003 if (arginfo
->has_arg
) {
4004 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4005 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4006 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4008 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4009 maxenvlen
, arginfo
->env
,
4016 "QEMU_LD_PREFIX = %s\n"
4017 "QEMU_STACK_SIZE = %ld byte\n",
4022 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4023 "QEMU_UNSET_ENV environment variables to set and unset\n"
4024 "environment variables for the target process.\n"
4025 "It is possible to provide several variables by separating them\n"
4026 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4027 "provide the -E and -U options multiple times.\n"
4028 "The following lines are equivalent:\n"
4029 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4030 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4031 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4032 "Note that if you provide several changes to a single variable\n"
4033 "the last change will stay in effect.\n");
4038 static int parse_args(int argc
, char **argv
)
4042 const struct qemu_argument
*arginfo
;
4044 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4045 if (arginfo
->env
== NULL
) {
4049 r
= getenv(arginfo
->env
);
4051 arginfo
->handle_opt(r
);
4057 if (optind
>= argc
) {
4066 if (!strcmp(r
, "-")) {
4069 /* Treat --foo the same as -foo. */
4074 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4075 if (!strcmp(r
, arginfo
->argv
)) {
4076 if (arginfo
->has_arg
) {
4077 if (optind
>= argc
) {
4078 (void) fprintf(stderr
,
4079 "qemu: missing argument for option '%s'\n", r
);
4082 arginfo
->handle_opt(argv
[optind
]);
4085 arginfo
->handle_opt(NULL
);
4091 /* no option matched the current argv */
4092 if (arginfo
->handle_opt
== NULL
) {
4093 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4098 if (optind
>= argc
) {
4099 (void) fprintf(stderr
, "qemu: no user program specified\n");
4103 filename
= argv
[optind
];
4104 exec_path
= argv
[optind
];
4109 int main(int argc
, char **argv
, char **envp
)
4111 struct target_pt_regs regs1
, *regs
= ®s1
;
4112 struct image_info info1
, *info
= &info1
;
4113 struct linux_binprm bprm
;
4118 char **target_environ
, **wrk
;
4125 module_call_init(MODULE_INIT_QOM
);
4127 if ((envlist
= envlist_create()) == NULL
) {
4128 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4132 /* add current environment into the list */
4133 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4134 (void) envlist_setenv(envlist
, *wrk
);
4137 /* Read the stack limit from the kernel. If it's "unlimited",
4138 then we can do little else besides use the default. */
4141 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4142 && lim
.rlim_cur
!= RLIM_INFINITY
4143 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4144 guest_stack_size
= lim
.rlim_cur
;
4149 #if defined(cpudef_setup)
4150 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
4155 optind
= parse_args(argc
, argv
);
4158 memset(regs
, 0, sizeof(struct target_pt_regs
));
4160 /* Zero out image_info */
4161 memset(info
, 0, sizeof(struct image_info
));
4163 memset(&bprm
, 0, sizeof (bprm
));
4165 /* Scan interp_prefix dir for replacement files. */
4166 init_paths(interp_prefix
);
4168 init_qemu_uname_release();
4170 if (cpu_model
== NULL
) {
4171 #if defined(TARGET_I386)
4172 #ifdef TARGET_X86_64
4173 cpu_model
= "qemu64";
4175 cpu_model
= "qemu32";
4177 #elif defined(TARGET_ARM)
4179 #elif defined(TARGET_UNICORE32)
4181 #elif defined(TARGET_M68K)
4183 #elif defined(TARGET_SPARC)
4184 #ifdef TARGET_SPARC64
4185 cpu_model
= "TI UltraSparc II";
4187 cpu_model
= "Fujitsu MB86904";
4189 #elif defined(TARGET_MIPS)
4190 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4195 #elif defined TARGET_OPENRISC
4196 cpu_model
= "or1200";
4197 #elif defined(TARGET_PPC)
4198 # ifdef TARGET_PPC64
4199 cpu_model
= "POWER8";
4203 #elif defined TARGET_SH4
4204 cpu_model
= TYPE_SH7785_CPU
;
4210 /* NOTE: we need to init the CPU at this stage to get
4211 qemu_host_page_size */
4212 cpu
= cpu_init(cpu_model
);
4214 fprintf(stderr
, "Unable to find CPU definition\n");
4222 if (getenv("QEMU_STRACE")) {
4226 if (getenv("QEMU_RAND_SEED")) {
4227 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4230 target_environ
= envlist_to_environ(envlist
, NULL
);
4231 envlist_free(envlist
);
4234 * Now that page sizes are configured in cpu_init() we can do
4235 * proper page alignment for guest_base.
4237 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4239 if (reserved_va
|| have_guest_base
) {
4240 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4242 if (guest_base
== (unsigned long)-1) {
4243 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4244 "space for use as guest address space (check your virtual "
4245 "memory ulimit setting or reserve less using -R option)\n",
4251 mmap_next_start
= reserved_va
;
4256 * Read in mmap_min_addr kernel parameter. This value is used
4257 * When loading the ELF image to determine whether guest_base
4258 * is needed. It is also used in mmap_find_vma.
4263 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4265 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4266 mmap_min_addr
= tmp
;
4267 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4274 * Prepare copy of argv vector for target.
4276 target_argc
= argc
- optind
;
4277 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4278 if (target_argv
== NULL
) {
4279 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4284 * If argv0 is specified (using '-0' switch) we replace
4285 * argv[0] pointer with the given one.
4288 if (argv0
!= NULL
) {
4289 target_argv
[i
++] = strdup(argv0
);
4291 for (; i
< target_argc
; i
++) {
4292 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4294 target_argv
[target_argc
] = NULL
;
4296 ts
= g_new0(TaskState
, 1);
4297 init_task_state(ts
);
4298 /* build Task State */
4304 execfd
= qemu_getauxval(AT_EXECFD
);
4306 execfd
= open(filename
, O_RDONLY
);
4308 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4309 _exit(EXIT_FAILURE
);
4313 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4316 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4317 _exit(EXIT_FAILURE
);
4320 for (wrk
= target_environ
; *wrk
; wrk
++) {
4324 free(target_environ
);
4326 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4327 qemu_log("guest_base 0x%lx\n", guest_base
);
4330 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4331 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4332 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4334 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4336 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4337 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4339 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4340 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4343 target_set_brk(info
->brk
);
4347 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4348 generating the prologue until now so that the prologue can take
4349 the real value of GUEST_BASE into account. */
4350 tcg_prologue_init(&tcg_ctx
);
4352 #if defined(TARGET_I386)
4353 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4354 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4355 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4356 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4357 env
->hflags
|= HF_OSFXSR_MASK
;
4359 #ifndef TARGET_ABI32
4360 /* enable 64 bit mode if possible */
4361 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4362 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4365 env
->cr
[4] |= CR4_PAE_MASK
;
4366 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4367 env
->hflags
|= HF_LMA_MASK
;
4370 /* flags setup : we activate the IRQs by default as in user mode */
4371 env
->eflags
|= IF_MASK
;
4373 /* linux register setup */
4374 #ifndef TARGET_ABI32
4375 env
->regs
[R_EAX
] = regs
->rax
;
4376 env
->regs
[R_EBX
] = regs
->rbx
;
4377 env
->regs
[R_ECX
] = regs
->rcx
;
4378 env
->regs
[R_EDX
] = regs
->rdx
;
4379 env
->regs
[R_ESI
] = regs
->rsi
;
4380 env
->regs
[R_EDI
] = regs
->rdi
;
4381 env
->regs
[R_EBP
] = regs
->rbp
;
4382 env
->regs
[R_ESP
] = regs
->rsp
;
4383 env
->eip
= regs
->rip
;
4385 env
->regs
[R_EAX
] = regs
->eax
;
4386 env
->regs
[R_EBX
] = regs
->ebx
;
4387 env
->regs
[R_ECX
] = regs
->ecx
;
4388 env
->regs
[R_EDX
] = regs
->edx
;
4389 env
->regs
[R_ESI
] = regs
->esi
;
4390 env
->regs
[R_EDI
] = regs
->edi
;
4391 env
->regs
[R_EBP
] = regs
->ebp
;
4392 env
->regs
[R_ESP
] = regs
->esp
;
4393 env
->eip
= regs
->eip
;
4396 /* linux interrupt setup */
4397 #ifndef TARGET_ABI32
4398 env
->idt
.limit
= 511;
4400 env
->idt
.limit
= 255;
4402 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4403 PROT_READ
|PROT_WRITE
,
4404 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4405 idt_table
= g2h(env
->idt
.base
);
4428 /* linux segment setup */
4430 uint64_t *gdt_table
;
4431 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4432 PROT_READ
|PROT_WRITE
,
4433 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4434 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4435 gdt_table
= g2h(env
->gdt
.base
);
4437 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4438 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4439 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4441 /* 64 bit code segment */
4442 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4443 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4445 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4447 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4448 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4449 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4451 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4452 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4454 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4455 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4456 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4457 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4458 /* This hack makes Wine work... */
4459 env
->segs
[R_FS
].selector
= 0;
4461 cpu_x86_load_seg(env
, R_DS
, 0);
4462 cpu_x86_load_seg(env
, R_ES
, 0);
4463 cpu_x86_load_seg(env
, R_FS
, 0);
4464 cpu_x86_load_seg(env
, R_GS
, 0);
4466 #elif defined(TARGET_AARCH64)
4470 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4472 "The selected ARM CPU does not support 64 bit mode\n");
4476 for (i
= 0; i
< 31; i
++) {
4477 env
->xregs
[i
] = regs
->regs
[i
];
4480 env
->xregs
[31] = regs
->sp
;
4482 #elif defined(TARGET_ARM)
4485 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4487 for(i
= 0; i
< 16; i
++) {
4488 env
->regs
[i
] = regs
->uregs
[i
];
4490 #ifdef TARGET_WORDS_BIGENDIAN
4492 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4493 && (info
->elf_flags
& EF_ARM_BE8
)) {
4494 env
->uncached_cpsr
|= CPSR_E
;
4495 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4497 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4501 #elif defined(TARGET_UNICORE32)
4504 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4505 for (i
= 0; i
< 32; i
++) {
4506 env
->regs
[i
] = regs
->uregs
[i
];
4509 #elif defined(TARGET_SPARC)
4513 env
->npc
= regs
->npc
;
4515 for(i
= 0; i
< 8; i
++)
4516 env
->gregs
[i
] = regs
->u_regs
[i
];
4517 for(i
= 0; i
< 8; i
++)
4518 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4520 #elif defined(TARGET_PPC)
4524 #if defined(TARGET_PPC64)
4525 #if defined(TARGET_ABI32)
4526 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4528 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4531 env
->nip
= regs
->nip
;
4532 for(i
= 0; i
< 32; i
++) {
4533 env
->gpr
[i
] = regs
->gpr
[i
];
4536 #elif defined(TARGET_M68K)
4539 env
->dregs
[0] = regs
->d0
;
4540 env
->dregs
[1] = regs
->d1
;
4541 env
->dregs
[2] = regs
->d2
;
4542 env
->dregs
[3] = regs
->d3
;
4543 env
->dregs
[4] = regs
->d4
;
4544 env
->dregs
[5] = regs
->d5
;
4545 env
->dregs
[6] = regs
->d6
;
4546 env
->dregs
[7] = regs
->d7
;
4547 env
->aregs
[0] = regs
->a0
;
4548 env
->aregs
[1] = regs
->a1
;
4549 env
->aregs
[2] = regs
->a2
;
4550 env
->aregs
[3] = regs
->a3
;
4551 env
->aregs
[4] = regs
->a4
;
4552 env
->aregs
[5] = regs
->a5
;
4553 env
->aregs
[6] = regs
->a6
;
4554 env
->aregs
[7] = regs
->usp
;
4556 ts
->sim_syscalls
= 1;
4558 #elif defined(TARGET_MICROBLAZE)
4560 env
->regs
[0] = regs
->r0
;
4561 env
->regs
[1] = regs
->r1
;
4562 env
->regs
[2] = regs
->r2
;
4563 env
->regs
[3] = regs
->r3
;
4564 env
->regs
[4] = regs
->r4
;
4565 env
->regs
[5] = regs
->r5
;
4566 env
->regs
[6] = regs
->r6
;
4567 env
->regs
[7] = regs
->r7
;
4568 env
->regs
[8] = regs
->r8
;
4569 env
->regs
[9] = regs
->r9
;
4570 env
->regs
[10] = regs
->r10
;
4571 env
->regs
[11] = regs
->r11
;
4572 env
->regs
[12] = regs
->r12
;
4573 env
->regs
[13] = regs
->r13
;
4574 env
->regs
[14] = regs
->r14
;
4575 env
->regs
[15] = regs
->r15
;
4576 env
->regs
[16] = regs
->r16
;
4577 env
->regs
[17] = regs
->r17
;
4578 env
->regs
[18] = regs
->r18
;
4579 env
->regs
[19] = regs
->r19
;
4580 env
->regs
[20] = regs
->r20
;
4581 env
->regs
[21] = regs
->r21
;
4582 env
->regs
[22] = regs
->r22
;
4583 env
->regs
[23] = regs
->r23
;
4584 env
->regs
[24] = regs
->r24
;
4585 env
->regs
[25] = regs
->r25
;
4586 env
->regs
[26] = regs
->r26
;
4587 env
->regs
[27] = regs
->r27
;
4588 env
->regs
[28] = regs
->r28
;
4589 env
->regs
[29] = regs
->r29
;
4590 env
->regs
[30] = regs
->r30
;
4591 env
->regs
[31] = regs
->r31
;
4592 env
->sregs
[SR_PC
] = regs
->pc
;
4594 #elif defined(TARGET_MIPS)
4598 for(i
= 0; i
< 32; i
++) {
4599 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4601 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4602 if (regs
->cp0_epc
& 1) {
4603 env
->hflags
|= MIPS_HFLAG_M16
;
4606 #elif defined(TARGET_OPENRISC)
4610 for (i
= 0; i
< 32; i
++) {
4611 env
->gpr
[i
] = regs
->gpr
[i
];
4617 #elif defined(TARGET_SH4)
4621 for(i
= 0; i
< 16; i
++) {
4622 env
->gregs
[i
] = regs
->regs
[i
];
4626 #elif defined(TARGET_ALPHA)
4630 for(i
= 0; i
< 28; i
++) {
4631 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4633 env
->ir
[IR_SP
] = regs
->usp
;
4636 #elif defined(TARGET_CRIS)
4638 env
->regs
[0] = regs
->r0
;
4639 env
->regs
[1] = regs
->r1
;
4640 env
->regs
[2] = regs
->r2
;
4641 env
->regs
[3] = regs
->r3
;
4642 env
->regs
[4] = regs
->r4
;
4643 env
->regs
[5] = regs
->r5
;
4644 env
->regs
[6] = regs
->r6
;
4645 env
->regs
[7] = regs
->r7
;
4646 env
->regs
[8] = regs
->r8
;
4647 env
->regs
[9] = regs
->r9
;
4648 env
->regs
[10] = regs
->r10
;
4649 env
->regs
[11] = regs
->r11
;
4650 env
->regs
[12] = regs
->r12
;
4651 env
->regs
[13] = regs
->r13
;
4652 env
->regs
[14] = info
->start_stack
;
4653 env
->regs
[15] = regs
->acr
;
4654 env
->pc
= regs
->erp
;
4656 #elif defined(TARGET_S390X)
4659 for (i
= 0; i
< 16; i
++) {
4660 env
->regs
[i
] = regs
->gprs
[i
];
4662 env
->psw
.mask
= regs
->psw
.mask
;
4663 env
->psw
.addr
= regs
->psw
.addr
;
4665 #elif defined(TARGET_TILEGX)
4668 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4669 env
->regs
[i
] = regs
->regs
[i
];
4671 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4677 #error unsupported target CPU
4680 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4681 ts
->stack_base
= info
->start_stack
;
4682 ts
->heap_base
= info
->brk
;
4683 /* This will be filled in on the first SYS_HEAPINFO call. */
4688 if (gdbserver_start(gdbstub_port
) < 0) {
4689 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4693 gdb_handlesig(cpu
, 0);