4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
28 #include "qemu/timer.h"
29 #include "qemu/envlist.h"
36 static const char *filename
;
37 static const char *argv0
;
38 static int gdbstub_port
;
39 static envlist_t
*envlist
;
40 static const char *cpu_model
;
41 unsigned long mmap_min_addr
;
42 unsigned long guest_base
;
45 #define EXCP_DUMP(env, fmt, ...) \
47 CPUState *cs = ENV_GET_CPU(env); \
48 fprintf(stderr, fmt , ## __VA_ARGS__); \
49 cpu_dump_state(cs, stderr, fprintf, 0); \
50 if (qemu_log_separate()) { \
51 qemu_log(fmt, ## __VA_ARGS__); \
52 log_cpu_state(cs, 0); \
56 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
58 * When running 32-on-64 we should make sure we can fit all of the possible
59 * guest address space into a contiguous chunk of virtual host memory.
61 * This way we will never overlap with our own libraries or binaries or stack
62 * or anything else that QEMU maps.
65 /* MIPS only supports 31 bits of virtual address space for user space */
66 unsigned long reserved_va
= 0x77000000;
68 unsigned long reserved_va
= 0xf7000000;
71 unsigned long reserved_va
;
74 static void usage(int exitcode
);
76 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
77 const char *qemu_uname_release
;
79 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
80 we allocate a bigger stack. Need a better solution, for example
81 by remapping the process stack directly at the right place */
82 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
84 void gemu_log(const char *fmt
, ...)
89 vfprintf(stderr
, fmt
, ap
);
93 #if defined(TARGET_I386)
94 int cpu_get_pic_interrupt(CPUX86State
*env
)
100 /***********************************************************/
101 /* Helper routines for implementing atomic operations. */
103 /* To implement exclusive operations we force all cpus to syncronise.
104 We don't require a full sync, only that no cpus are executing guest code.
105 The alternative is to map target atomic ops onto host equivalents,
106 which requires quite a lot of per host/target work. */
107 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
108 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
109 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
110 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
111 static int pending_cpus
;
113 /* Make sure everything is in a consistent state for calling fork(). */
114 void fork_start(void)
116 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
117 pthread_mutex_lock(&exclusive_lock
);
121 void fork_end(int child
)
123 mmap_fork_end(child
);
125 CPUState
*cpu
, *next_cpu
;
126 /* Child processes created by fork() only have a single thread.
127 Discard information about the parent threads. */
128 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
129 if (cpu
!= thread_cpu
) {
130 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
134 pthread_mutex_init(&exclusive_lock
, NULL
);
135 pthread_mutex_init(&cpu_list_mutex
, NULL
);
136 pthread_cond_init(&exclusive_cond
, NULL
);
137 pthread_cond_init(&exclusive_resume
, NULL
);
138 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
139 gdbserver_fork(thread_cpu
);
141 pthread_mutex_unlock(&exclusive_lock
);
142 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
146 /* Wait for pending exclusive operations to complete. The exclusive lock
148 static inline void exclusive_idle(void)
150 while (pending_cpus
) {
151 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
155 /* Start an exclusive operation.
156 Must only be called from outside cpu_arm_exec. */
157 static inline void start_exclusive(void)
161 pthread_mutex_lock(&exclusive_lock
);
165 /* Make all other cpus stop executing. */
166 CPU_FOREACH(other_cpu
) {
167 if (other_cpu
->running
) {
172 if (pending_cpus
> 1) {
173 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
177 /* Finish an exclusive operation. */
178 static inline void __attribute__((unused
)) end_exclusive(void)
181 pthread_cond_broadcast(&exclusive_resume
);
182 pthread_mutex_unlock(&exclusive_lock
);
185 /* Wait for exclusive ops to finish, and begin cpu execution. */
186 static inline void cpu_exec_start(CPUState
*cpu
)
188 pthread_mutex_lock(&exclusive_lock
);
191 pthread_mutex_unlock(&exclusive_lock
);
194 /* Mark cpu as not executing, and release pending exclusive ops. */
195 static inline void cpu_exec_end(CPUState
*cpu
)
197 pthread_mutex_lock(&exclusive_lock
);
198 cpu
->running
= false;
199 if (pending_cpus
> 1) {
201 if (pending_cpus
== 1) {
202 pthread_cond_signal(&exclusive_cond
);
206 pthread_mutex_unlock(&exclusive_lock
);
209 void cpu_list_lock(void)
211 pthread_mutex_lock(&cpu_list_mutex
);
214 void cpu_list_unlock(void)
216 pthread_mutex_unlock(&cpu_list_mutex
);
221 /***********************************************************/
222 /* CPUX86 core interface */
224 uint64_t cpu_get_tsc(CPUX86State
*env
)
226 return cpu_get_host_ticks();
229 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
234 e1
= (addr
<< 16) | (limit
& 0xffff);
235 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
242 static uint64_t *idt_table
;
244 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
245 uint64_t addr
, unsigned int sel
)
248 e1
= (addr
& 0xffff) | (sel
<< 16);
249 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
253 p
[2] = tswap32(addr
>> 32);
256 /* only dpl matters as we do only user space emulation */
257 static void set_idt(int n
, unsigned int dpl
)
259 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
262 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
263 uint32_t addr
, unsigned int sel
)
266 e1
= (addr
& 0xffff) | (sel
<< 16);
267 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
273 /* only dpl matters as we do only user space emulation */
274 static void set_idt(int n
, unsigned int dpl
)
276 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
280 void cpu_loop(CPUX86State
*env
)
282 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
285 target_siginfo_t info
;
289 trapnr
= cpu_x86_exec(cs
);
293 /* linux syscall from int $0x80 */
294 env
->regs
[R_EAX
] = do_syscall(env
,
306 /* linux syscall from syscall instruction */
307 env
->regs
[R_EAX
] = do_syscall(env
,
320 info
.si_signo
= TARGET_SIGBUS
;
322 info
.si_code
= TARGET_SI_KERNEL
;
323 info
._sifields
._sigfault
._addr
= 0;
324 queue_signal(env
, info
.si_signo
, &info
);
327 /* XXX: potential problem if ABI32 */
328 #ifndef TARGET_X86_64
329 if (env
->eflags
& VM_MASK
) {
330 handle_vm86_fault(env
);
334 info
.si_signo
= TARGET_SIGSEGV
;
336 info
.si_code
= TARGET_SI_KERNEL
;
337 info
._sifields
._sigfault
._addr
= 0;
338 queue_signal(env
, info
.si_signo
, &info
);
342 info
.si_signo
= TARGET_SIGSEGV
;
344 if (!(env
->error_code
& 1))
345 info
.si_code
= TARGET_SEGV_MAPERR
;
347 info
.si_code
= TARGET_SEGV_ACCERR
;
348 info
._sifields
._sigfault
._addr
= env
->cr
[2];
349 queue_signal(env
, info
.si_signo
, &info
);
352 #ifndef TARGET_X86_64
353 if (env
->eflags
& VM_MASK
) {
354 handle_vm86_trap(env
, trapnr
);
358 /* division by zero */
359 info
.si_signo
= TARGET_SIGFPE
;
361 info
.si_code
= TARGET_FPE_INTDIV
;
362 info
._sifields
._sigfault
._addr
= env
->eip
;
363 queue_signal(env
, info
.si_signo
, &info
);
368 #ifndef TARGET_X86_64
369 if (env
->eflags
& VM_MASK
) {
370 handle_vm86_trap(env
, trapnr
);
374 info
.si_signo
= TARGET_SIGTRAP
;
376 if (trapnr
== EXCP01_DB
) {
377 info
.si_code
= TARGET_TRAP_BRKPT
;
378 info
._sifields
._sigfault
._addr
= env
->eip
;
380 info
.si_code
= TARGET_SI_KERNEL
;
381 info
._sifields
._sigfault
._addr
= 0;
383 queue_signal(env
, info
.si_signo
, &info
);
388 #ifndef TARGET_X86_64
389 if (env
->eflags
& VM_MASK
) {
390 handle_vm86_trap(env
, trapnr
);
394 info
.si_signo
= TARGET_SIGSEGV
;
396 info
.si_code
= TARGET_SI_KERNEL
;
397 info
._sifields
._sigfault
._addr
= 0;
398 queue_signal(env
, info
.si_signo
, &info
);
402 info
.si_signo
= TARGET_SIGILL
;
404 info
.si_code
= TARGET_ILL_ILLOPN
;
405 info
._sifields
._sigfault
._addr
= env
->eip
;
406 queue_signal(env
, info
.si_signo
, &info
);
409 /* just indicate that signals should be handled asap */
415 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
420 info
.si_code
= TARGET_TRAP_BRKPT
;
421 queue_signal(env
, info
.si_signo
, &info
);
426 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
427 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
431 process_pending_signals(env
);
438 #define get_user_code_u32(x, gaddr, env) \
439 ({ abi_long __r = get_user_u32((x), (gaddr)); \
440 if (!__r && bswap_code(arm_sctlr_b(env))) { \
446 #define get_user_code_u16(x, gaddr, env) \
447 ({ abi_long __r = get_user_u16((x), (gaddr)); \
448 if (!__r && bswap_code(arm_sctlr_b(env))) { \
454 #define get_user_data_u32(x, gaddr, env) \
455 ({ abi_long __r = get_user_u32((x), (gaddr)); \
456 if (!__r && arm_cpu_bswap_data(env)) { \
462 #define get_user_data_u16(x, gaddr, env) \
463 ({ abi_long __r = get_user_u16((x), (gaddr)); \
464 if (!__r && arm_cpu_bswap_data(env)) { \
470 #define put_user_data_u32(x, gaddr, env) \
471 ({ typeof(x) __x = (x); \
472 if (arm_cpu_bswap_data(env)) { \
473 __x = bswap32(__x); \
475 put_user_u32(__x, (gaddr)); \
478 #define put_user_data_u16(x, gaddr, env) \
479 ({ typeof(x) __x = (x); \
480 if (arm_cpu_bswap_data(env)) { \
481 __x = bswap16(__x); \
483 put_user_u16(__x, (gaddr)); \
487 /* Commpage handling -- there is no commpage for AArch64 */
490 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
492 * r0 = pointer to oldval
493 * r1 = pointer to newval
494 * r2 = pointer to target value
497 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
498 * C set if *ptr was changed, clear if no exchange happened
500 * Note segv's in kernel helpers are a bit tricky, we can set the
501 * data address sensibly but the PC address is just the entry point.
503 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
505 uint64_t oldval
, newval
, val
;
507 target_siginfo_t info
;
509 /* Based on the 32 bit code in do_kernel_trap */
511 /* XXX: This only works between threads, not between processes.
512 It's probably possible to implement this with native host
513 operations. However things like ldrex/strex are much harder so
514 there's not much point trying. */
516 cpsr
= cpsr_read(env
);
519 if (get_user_u64(oldval
, env
->regs
[0])) {
520 env
->exception
.vaddress
= env
->regs
[0];
524 if (get_user_u64(newval
, env
->regs
[1])) {
525 env
->exception
.vaddress
= env
->regs
[1];
529 if (get_user_u64(val
, addr
)) {
530 env
->exception
.vaddress
= addr
;
537 if (put_user_u64(val
, addr
)) {
538 env
->exception
.vaddress
= addr
;
548 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
554 /* We get the PC of the entry address - which is as good as anything,
555 on a real kernel what you get depends on which mode it uses. */
556 info
.si_signo
= TARGET_SIGSEGV
;
558 /* XXX: check env->error_code */
559 info
.si_code
= TARGET_SEGV_MAPERR
;
560 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
561 queue_signal(env
, info
.si_signo
, &info
);
564 /* Handle a jump to the kernel code page. */
566 do_kernel_trap(CPUARMState
*env
)
572 switch (env
->regs
[15]) {
573 case 0xffff0fa0: /* __kernel_memory_barrier */
574 /* ??? No-op. Will need to do better for SMP. */
576 case 0xffff0fc0: /* __kernel_cmpxchg */
577 /* XXX: This only works between threads, not between processes.
578 It's probably possible to implement this with native host
579 operations. However things like ldrex/strex are much harder so
580 there's not much point trying. */
582 cpsr
= cpsr_read(env
);
584 /* FIXME: This should SEGV if the access fails. */
585 if (get_user_u32(val
, addr
))
587 if (val
== env
->regs
[0]) {
589 /* FIXME: Check for segfaults. */
590 put_user_u32(val
, addr
);
597 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
600 case 0xffff0fe0: /* __kernel_get_tls */
601 env
->regs
[0] = cpu_get_tls(env
);
603 case 0xffff0f60: /* __kernel_cmpxchg64 */
604 arm_kernel_cmpxchg64_helper(env
);
610 /* Jump back to the caller. */
611 addr
= env
->regs
[14];
616 env
->regs
[15] = addr
;
621 /* Store exclusive handling for AArch32 */
622 static int do_strex(CPUARMState
*env
)
630 if (env
->exclusive_addr
!= env
->exclusive_test
) {
633 /* We know we're always AArch32 so the address is in uint32_t range
634 * unless it was the -1 exclusive-monitor-lost value (which won't
635 * match exclusive_test above).
637 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
638 addr
= env
->exclusive_addr
;
639 size
= env
->exclusive_info
& 0xf;
642 segv
= get_user_u8(val
, addr
);
645 segv
= get_user_data_u16(val
, addr
, env
);
649 segv
= get_user_data_u32(val
, addr
, env
);
655 env
->exception
.vaddress
= addr
;
660 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
662 env
->exception
.vaddress
= addr
+ 4;
665 if (arm_cpu_bswap_data(env
)) {
666 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
668 val
= deposit64(val
, 32, 32, valhi
);
671 if (val
!= env
->exclusive_val
) {
675 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
678 segv
= put_user_u8(val
, addr
);
681 segv
= put_user_data_u16(val
, addr
, env
);
685 segv
= put_user_data_u32(val
, addr
, env
);
689 env
->exception
.vaddress
= addr
;
693 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
694 segv
= put_user_data_u32(val
, addr
+ 4, env
);
696 env
->exception
.vaddress
= addr
+ 4;
703 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
709 void cpu_loop(CPUARMState
*env
)
711 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
713 unsigned int n
, insn
;
714 target_siginfo_t info
;
719 trapnr
= cpu_arm_exec(cs
);
724 TaskState
*ts
= cs
->opaque
;
728 /* we handle the FPU emulation here, as Linux */
729 /* we get the opcode */
730 /* FIXME - what to do if get_user() fails? */
731 get_user_code_u32(opcode
, env
->regs
[15], env
);
733 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
734 if (rc
== 0) { /* illegal instruction */
735 info
.si_signo
= TARGET_SIGILL
;
737 info
.si_code
= TARGET_ILL_ILLOPN
;
738 info
._sifields
._sigfault
._addr
= env
->regs
[15];
739 queue_signal(env
, info
.si_signo
, &info
);
740 } else if (rc
< 0) { /* FP exception */
743 /* translate softfloat flags to FPSR flags */
744 if (-rc
& float_flag_invalid
)
746 if (-rc
& float_flag_divbyzero
)
748 if (-rc
& float_flag_overflow
)
750 if (-rc
& float_flag_underflow
)
752 if (-rc
& float_flag_inexact
)
755 FPSR fpsr
= ts
->fpa
.fpsr
;
756 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
758 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
759 info
.si_signo
= TARGET_SIGFPE
;
762 /* ordered by priority, least first */
763 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
764 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
765 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
766 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
767 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
769 info
._sifields
._sigfault
._addr
= env
->regs
[15];
770 queue_signal(env
, info
.si_signo
, &info
);
775 /* accumulate unenabled exceptions */
776 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
778 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
780 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
782 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
784 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
787 } else { /* everything OK */
798 if (trapnr
== EXCP_BKPT
) {
800 /* FIXME - what to do if get_user() fails? */
801 get_user_code_u16(insn
, env
->regs
[15], env
);
805 /* FIXME - what to do if get_user() fails? */
806 get_user_code_u32(insn
, env
->regs
[15], env
);
807 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
812 /* FIXME - what to do if get_user() fails? */
813 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
816 /* FIXME - what to do if get_user() fails? */
817 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
822 if (n
== ARM_NR_cacheflush
) {
824 } else if (n
== ARM_NR_semihosting
825 || n
== ARM_NR_thumb_semihosting
) {
826 env
->regs
[0] = do_arm_semihosting (env
);
827 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
829 if (env
->thumb
|| n
== 0) {
832 n
-= ARM_SYSCALL_BASE
;
835 if ( n
> ARM_NR_BASE
) {
837 case ARM_NR_cacheflush
:
841 cpu_set_tls(env
, env
->regs
[0]);
844 case ARM_NR_breakpoint
:
845 env
->regs
[15] -= env
->thumb
? 2 : 4;
848 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
850 env
->regs
[0] = -TARGET_ENOSYS
;
854 env
->regs
[0] = do_syscall(env
,
870 /* just indicate that signals should be handled asap */
873 if (!do_strex(env
)) {
876 /* fall through for segv */
877 case EXCP_PREFETCH_ABORT
:
878 case EXCP_DATA_ABORT
:
879 addr
= env
->exception
.vaddress
;
881 info
.si_signo
= TARGET_SIGSEGV
;
883 /* XXX: check env->error_code */
884 info
.si_code
= TARGET_SEGV_MAPERR
;
885 info
._sifields
._sigfault
._addr
= addr
;
886 queue_signal(env
, info
.si_signo
, &info
);
894 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
899 info
.si_code
= TARGET_TRAP_BRKPT
;
900 queue_signal(env
, info
.si_signo
, &info
);
904 case EXCP_KERNEL_TRAP
:
905 if (do_kernel_trap(env
))
910 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
913 process_pending_signals(env
);
920 * Handle AArch64 store-release exclusive
922 * rs = gets the status result of store exclusive
923 * rt = is the register that is stored
924 * rt2 = is the second register store (in STP)
927 static int do_strex_a64(CPUARMState
*env
)
938 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
939 size
= extract32(env
->exclusive_info
, 0, 2);
940 is_pair
= extract32(env
->exclusive_info
, 2, 1);
941 rs
= extract32(env
->exclusive_info
, 4, 5);
942 rt
= extract32(env
->exclusive_info
, 9, 5);
943 rt2
= extract32(env
->exclusive_info
, 14, 5);
945 addr
= env
->exclusive_addr
;
947 if (addr
!= env
->exclusive_test
) {
953 segv
= get_user_u8(val
, addr
);
956 segv
= get_user_u16(val
, addr
);
959 segv
= get_user_u32(val
, addr
);
962 segv
= get_user_u64(val
, addr
);
968 env
->exception
.vaddress
= addr
;
971 if (val
!= env
->exclusive_val
) {
976 segv
= get_user_u32(val
, addr
+ 4);
978 segv
= get_user_u64(val
, addr
+ 8);
981 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
984 if (val
!= env
->exclusive_high
) {
988 /* handle the zero register */
989 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
992 segv
= put_user_u8(val
, addr
);
995 segv
= put_user_u16(val
, addr
);
998 segv
= put_user_u32(val
, addr
);
1001 segv
= put_user_u64(val
, addr
);
1008 /* handle the zero register */
1009 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1011 segv
= put_user_u32(val
, addr
+ 4);
1013 segv
= put_user_u64(val
, addr
+ 8);
1016 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1023 /* rs == 31 encodes a write to the ZR, thus throwing away
1024 * the status return. This is rather silly but valid.
1027 env
->xregs
[rs
] = rc
;
1030 /* instruction faulted, PC does not advance */
1031 /* either way a strex releases any exclusive lock we have */
1032 env
->exclusive_addr
= -1;
1037 /* AArch64 main loop */
1038 void cpu_loop(CPUARMState
*env
)
1040 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1042 target_siginfo_t info
;
1046 trapnr
= cpu_arm_exec(cs
);
1051 env
->xregs
[0] = do_syscall(env
,
1061 case EXCP_INTERRUPT
:
1062 /* just indicate that signals should be handled asap */
1065 info
.si_signo
= TARGET_SIGILL
;
1067 info
.si_code
= TARGET_ILL_ILLOPN
;
1068 info
._sifields
._sigfault
._addr
= env
->pc
;
1069 queue_signal(env
, info
.si_signo
, &info
);
1072 if (!do_strex_a64(env
)) {
1075 /* fall through for segv */
1076 case EXCP_PREFETCH_ABORT
:
1077 case EXCP_DATA_ABORT
:
1078 info
.si_signo
= TARGET_SIGSEGV
;
1080 /* XXX: check env->error_code */
1081 info
.si_code
= TARGET_SEGV_MAPERR
;
1082 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1083 queue_signal(env
, info
.si_signo
, &info
);
1087 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1089 info
.si_signo
= sig
;
1091 info
.si_code
= TARGET_TRAP_BRKPT
;
1092 queue_signal(env
, info
.si_signo
, &info
);
1096 env
->xregs
[0] = do_arm_semihosting(env
);
1099 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1102 process_pending_signals(env
);
1103 /* Exception return on AArch64 always clears the exclusive monitor,
1104 * so any return to running guest code implies this.
1105 * A strex (successful or otherwise) also clears the monitor, so
1106 * we don't need to specialcase EXCP_STREX.
1108 env
->exclusive_addr
= -1;
1111 #endif /* ndef TARGET_ABI32 */
1115 #ifdef TARGET_UNICORE32
1117 void cpu_loop(CPUUniCore32State
*env
)
1119 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1121 unsigned int n
, insn
;
1122 target_siginfo_t info
;
1126 trapnr
= uc32_cpu_exec(cs
);
1129 case UC32_EXCP_PRIV
:
1132 get_user_u32(insn
, env
->regs
[31] - 4);
1133 n
= insn
& 0xffffff;
1135 if (n
>= UC32_SYSCALL_BASE
) {
1137 n
-= UC32_SYSCALL_BASE
;
1138 if (n
== UC32_SYSCALL_NR_set_tls
) {
1139 cpu_set_tls(env
, env
->regs
[0]);
1142 env
->regs
[0] = do_syscall(env
,
1157 case UC32_EXCP_DTRAP
:
1158 case UC32_EXCP_ITRAP
:
1159 info
.si_signo
= TARGET_SIGSEGV
;
1161 /* XXX: check env->error_code */
1162 info
.si_code
= TARGET_SEGV_MAPERR
;
1163 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1164 queue_signal(env
, info
.si_signo
, &info
);
1166 case EXCP_INTERRUPT
:
1167 /* just indicate that signals should be handled asap */
1173 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1175 info
.si_signo
= sig
;
1177 info
.si_code
= TARGET_TRAP_BRKPT
;
1178 queue_signal(env
, info
.si_signo
, &info
);
1185 process_pending_signals(env
);
1189 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1195 #define SPARC64_STACK_BIAS 2047
1199 /* WARNING: dealing with register windows _is_ complicated. More info
1200 can be found at http://www.sics.se/~psm/sparcstack.html */
1201 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1203 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1204 /* wrap handling : if cwp is on the last window, then we use the
1205 registers 'after' the end */
1206 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1207 index
+= 16 * env
->nwindows
;
1211 /* save the register window 'cwp1' */
1212 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1217 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1218 #ifdef TARGET_SPARC64
1220 sp_ptr
+= SPARC64_STACK_BIAS
;
1222 #if defined(DEBUG_WIN)
1223 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1226 for(i
= 0; i
< 16; i
++) {
1227 /* FIXME - what to do if put_user() fails? */
1228 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1229 sp_ptr
+= sizeof(abi_ulong
);
1233 static void save_window(CPUSPARCState
*env
)
1235 #ifndef TARGET_SPARC64
1236 unsigned int new_wim
;
1237 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1238 ((1LL << env
->nwindows
) - 1);
1239 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1242 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1248 static void restore_window(CPUSPARCState
*env
)
1250 #ifndef TARGET_SPARC64
1251 unsigned int new_wim
;
1253 unsigned int i
, cwp1
;
1256 #ifndef TARGET_SPARC64
1257 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1258 ((1LL << env
->nwindows
) - 1);
1261 /* restore the invalid window */
1262 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1263 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1264 #ifdef TARGET_SPARC64
1266 sp_ptr
+= SPARC64_STACK_BIAS
;
1268 #if defined(DEBUG_WIN)
1269 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1272 for(i
= 0; i
< 16; i
++) {
1273 /* FIXME - what to do if get_user() fails? */
1274 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1275 sp_ptr
+= sizeof(abi_ulong
);
1277 #ifdef TARGET_SPARC64
1279 if (env
->cleanwin
< env
->nwindows
- 1)
1287 static void flush_windows(CPUSPARCState
*env
)
1293 /* if restore would invoke restore_window(), then we can stop */
1294 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1295 #ifndef TARGET_SPARC64
1296 if (env
->wim
& (1 << cwp1
))
1299 if (env
->canrestore
== 0)
1304 save_window_offset(env
, cwp1
);
1307 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1308 #ifndef TARGET_SPARC64
1309 /* set wim so that restore will reload the registers */
1310 env
->wim
= 1 << cwp1
;
1312 #if defined(DEBUG_WIN)
1313 printf("flush_windows: nb=%d\n", offset
- 1);
1317 void cpu_loop (CPUSPARCState
*env
)
1319 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1322 target_siginfo_t info
;
1326 trapnr
= cpu_sparc_exec(cs
);
1329 /* Compute PSR before exposing state. */
1330 if (env
->cc_op
!= CC_OP_FLAGS
) {
1335 #ifndef TARGET_SPARC64
1342 ret
= do_syscall (env
, env
->gregs
[1],
1343 env
->regwptr
[0], env
->regwptr
[1],
1344 env
->regwptr
[2], env
->regwptr
[3],
1345 env
->regwptr
[4], env
->regwptr
[5],
1347 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1348 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1349 env
->xcc
|= PSR_CARRY
;
1351 env
->psr
|= PSR_CARRY
;
1355 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1356 env
->xcc
&= ~PSR_CARRY
;
1358 env
->psr
&= ~PSR_CARRY
;
1361 env
->regwptr
[0] = ret
;
1362 /* next instruction */
1364 env
->npc
= env
->npc
+ 4;
1366 case 0x83: /* flush windows */
1371 /* next instruction */
1373 env
->npc
= env
->npc
+ 4;
1375 #ifndef TARGET_SPARC64
1376 case TT_WIN_OVF
: /* window overflow */
1379 case TT_WIN_UNF
: /* window underflow */
1380 restore_window(env
);
1385 info
.si_signo
= TARGET_SIGSEGV
;
1387 /* XXX: check env->error_code */
1388 info
.si_code
= TARGET_SEGV_MAPERR
;
1389 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1390 queue_signal(env
, info
.si_signo
, &info
);
1394 case TT_SPILL
: /* window overflow */
1397 case TT_FILL
: /* window underflow */
1398 restore_window(env
);
1403 info
.si_signo
= TARGET_SIGSEGV
;
1405 /* XXX: check env->error_code */
1406 info
.si_code
= TARGET_SEGV_MAPERR
;
1407 if (trapnr
== TT_DFAULT
)
1408 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1410 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1411 queue_signal(env
, info
.si_signo
, &info
);
1414 #ifndef TARGET_ABI32
1417 sparc64_get_context(env
);
1421 sparc64_set_context(env
);
1425 case EXCP_INTERRUPT
:
1426 /* just indicate that signals should be handled asap */
1430 info
.si_signo
= TARGET_SIGILL
;
1432 info
.si_code
= TARGET_ILL_ILLOPC
;
1433 info
._sifields
._sigfault
._addr
= env
->pc
;
1434 queue_signal(env
, info
.si_signo
, &info
);
1441 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1444 info
.si_signo
= sig
;
1446 info
.si_code
= TARGET_TRAP_BRKPT
;
1447 queue_signal(env
, info
.si_signo
, &info
);
1452 printf ("Unhandled trap: 0x%x\n", trapnr
);
1453 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1456 process_pending_signals (env
);
1463 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1465 return cpu_get_host_ticks();
1468 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1470 return cpu_ppc_get_tb(env
);
1473 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1475 return cpu_ppc_get_tb(env
) >> 32;
1478 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1480 return cpu_ppc_get_tb(env
);
1483 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1485 return cpu_ppc_get_tb(env
) >> 32;
1488 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1489 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1491 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1493 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1496 /* XXX: to be fixed */
1497 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1502 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1507 static int do_store_exclusive(CPUPPCState
*env
)
1510 target_ulong page_addr
;
1511 target_ulong val
, val2
__attribute__((unused
)) = 0;
1515 addr
= env
->reserve_ea
;
1516 page_addr
= addr
& TARGET_PAGE_MASK
;
1519 flags
= page_get_flags(page_addr
);
1520 if ((flags
& PAGE_READ
) == 0) {
1523 int reg
= env
->reserve_info
& 0x1f;
1524 int size
= env
->reserve_info
>> 5;
1527 if (addr
== env
->reserve_addr
) {
1529 case 1: segv
= get_user_u8(val
, addr
); break;
1530 case 2: segv
= get_user_u16(val
, addr
); break;
1531 case 4: segv
= get_user_u32(val
, addr
); break;
1532 #if defined(TARGET_PPC64)
1533 case 8: segv
= get_user_u64(val
, addr
); break;
1535 segv
= get_user_u64(val
, addr
);
1537 segv
= get_user_u64(val2
, addr
+ 8);
1544 if (!segv
&& val
== env
->reserve_val
) {
1545 val
= env
->gpr
[reg
];
1547 case 1: segv
= put_user_u8(val
, addr
); break;
1548 case 2: segv
= put_user_u16(val
, addr
); break;
1549 case 4: segv
= put_user_u32(val
, addr
); break;
1550 #if defined(TARGET_PPC64)
1551 case 8: segv
= put_user_u64(val
, addr
); break;
1553 if (val2
== env
->reserve_val2
) {
1556 val
= env
->gpr
[reg
+1];
1558 val2
= env
->gpr
[reg
+1];
1560 segv
= put_user_u64(val
, addr
);
1562 segv
= put_user_u64(val2
, addr
+ 8);
1575 env
->crf
[0] = (stored
<< 1) | xer_so
;
1576 env
->reserve_addr
= (target_ulong
)-1;
1586 void cpu_loop(CPUPPCState
*env
)
1588 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1589 target_siginfo_t info
;
1595 trapnr
= cpu_ppc_exec(cs
);
1598 case POWERPC_EXCP_NONE
:
1601 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1602 cpu_abort(cs
, "Critical interrupt while in user mode. "
1605 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1606 cpu_abort(cs
, "Machine check exception while in user mode. "
1609 case POWERPC_EXCP_DSI
: /* Data storage exception */
1610 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1612 /* XXX: check this. Seems bugged */
1613 switch (env
->error_code
& 0xFF000000) {
1615 info
.si_signo
= TARGET_SIGSEGV
;
1617 info
.si_code
= TARGET_SEGV_MAPERR
;
1620 info
.si_signo
= TARGET_SIGILL
;
1622 info
.si_code
= TARGET_ILL_ILLADR
;
1625 info
.si_signo
= TARGET_SIGSEGV
;
1627 info
.si_code
= TARGET_SEGV_ACCERR
;
1630 /* Let's send a regular segfault... */
1631 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1633 info
.si_signo
= TARGET_SIGSEGV
;
1635 info
.si_code
= TARGET_SEGV_MAPERR
;
1638 info
._sifields
._sigfault
._addr
= env
->nip
;
1639 queue_signal(env
, info
.si_signo
, &info
);
1641 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1642 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1643 "\n", env
->spr
[SPR_SRR0
]);
1644 /* XXX: check this */
1645 switch (env
->error_code
& 0xFF000000) {
1647 info
.si_signo
= TARGET_SIGSEGV
;
1649 info
.si_code
= TARGET_SEGV_MAPERR
;
1653 info
.si_signo
= TARGET_SIGSEGV
;
1655 info
.si_code
= TARGET_SEGV_ACCERR
;
1658 /* Let's send a regular segfault... */
1659 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1661 info
.si_signo
= TARGET_SIGSEGV
;
1663 info
.si_code
= TARGET_SEGV_MAPERR
;
1666 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1667 queue_signal(env
, info
.si_signo
, &info
);
1669 case POWERPC_EXCP_EXTERNAL
: /* External input */
1670 cpu_abort(cs
, "External interrupt while in user mode. "
1673 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1674 EXCP_DUMP(env
, "Unaligned memory access\n");
1675 /* XXX: check this */
1676 info
.si_signo
= TARGET_SIGBUS
;
1678 info
.si_code
= TARGET_BUS_ADRALN
;
1679 info
._sifields
._sigfault
._addr
= env
->nip
;
1680 queue_signal(env
, info
.si_signo
, &info
);
1682 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1683 /* XXX: check this */
1684 switch (env
->error_code
& ~0xF) {
1685 case POWERPC_EXCP_FP
:
1686 EXCP_DUMP(env
, "Floating point program exception\n");
1687 info
.si_signo
= TARGET_SIGFPE
;
1689 switch (env
->error_code
& 0xF) {
1690 case POWERPC_EXCP_FP_OX
:
1691 info
.si_code
= TARGET_FPE_FLTOVF
;
1693 case POWERPC_EXCP_FP_UX
:
1694 info
.si_code
= TARGET_FPE_FLTUND
;
1696 case POWERPC_EXCP_FP_ZX
:
1697 case POWERPC_EXCP_FP_VXZDZ
:
1698 info
.si_code
= TARGET_FPE_FLTDIV
;
1700 case POWERPC_EXCP_FP_XX
:
1701 info
.si_code
= TARGET_FPE_FLTRES
;
1703 case POWERPC_EXCP_FP_VXSOFT
:
1704 info
.si_code
= TARGET_FPE_FLTINV
;
1706 case POWERPC_EXCP_FP_VXSNAN
:
1707 case POWERPC_EXCP_FP_VXISI
:
1708 case POWERPC_EXCP_FP_VXIDI
:
1709 case POWERPC_EXCP_FP_VXIMZ
:
1710 case POWERPC_EXCP_FP_VXVC
:
1711 case POWERPC_EXCP_FP_VXSQRT
:
1712 case POWERPC_EXCP_FP_VXCVI
:
1713 info
.si_code
= TARGET_FPE_FLTSUB
;
1716 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1721 case POWERPC_EXCP_INVAL
:
1722 EXCP_DUMP(env
, "Invalid instruction\n");
1723 info
.si_signo
= TARGET_SIGILL
;
1725 switch (env
->error_code
& 0xF) {
1726 case POWERPC_EXCP_INVAL_INVAL
:
1727 info
.si_code
= TARGET_ILL_ILLOPC
;
1729 case POWERPC_EXCP_INVAL_LSWX
:
1730 info
.si_code
= TARGET_ILL_ILLOPN
;
1732 case POWERPC_EXCP_INVAL_SPR
:
1733 info
.si_code
= TARGET_ILL_PRVREG
;
1735 case POWERPC_EXCP_INVAL_FP
:
1736 info
.si_code
= TARGET_ILL_COPROC
;
1739 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1740 env
->error_code
& 0xF);
1741 info
.si_code
= TARGET_ILL_ILLADR
;
1745 case POWERPC_EXCP_PRIV
:
1746 EXCP_DUMP(env
, "Privilege violation\n");
1747 info
.si_signo
= TARGET_SIGILL
;
1749 switch (env
->error_code
& 0xF) {
1750 case POWERPC_EXCP_PRIV_OPC
:
1751 info
.si_code
= TARGET_ILL_PRVOPC
;
1753 case POWERPC_EXCP_PRIV_REG
:
1754 info
.si_code
= TARGET_ILL_PRVREG
;
1757 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1758 env
->error_code
& 0xF);
1759 info
.si_code
= TARGET_ILL_PRVOPC
;
1763 case POWERPC_EXCP_TRAP
:
1764 cpu_abort(cs
, "Tried to call a TRAP\n");
1767 /* Should not happen ! */
1768 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1772 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1773 queue_signal(env
, info
.si_signo
, &info
);
1775 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1776 EXCP_DUMP(env
, "No floating point allowed\n");
1777 info
.si_signo
= TARGET_SIGILL
;
1779 info
.si_code
= TARGET_ILL_COPROC
;
1780 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1781 queue_signal(env
, info
.si_signo
, &info
);
1783 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1784 cpu_abort(cs
, "Syscall exception while in user mode. "
1787 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1788 EXCP_DUMP(env
, "No APU instruction allowed\n");
1789 info
.si_signo
= TARGET_SIGILL
;
1791 info
.si_code
= TARGET_ILL_COPROC
;
1792 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1793 queue_signal(env
, info
.si_signo
, &info
);
1795 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1796 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1799 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1800 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1803 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1804 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1807 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1808 cpu_abort(cs
, "Data TLB exception while in user mode. "
1811 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1812 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1815 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1816 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1817 info
.si_signo
= TARGET_SIGILL
;
1819 info
.si_code
= TARGET_ILL_COPROC
;
1820 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1821 queue_signal(env
, info
.si_signo
, &info
);
1823 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1824 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1826 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1827 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1829 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1830 cpu_abort(cs
, "Performance monitor exception not handled\n");
1832 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1833 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1836 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1837 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1840 case POWERPC_EXCP_RESET
: /* System reset exception */
1841 cpu_abort(cs
, "Reset interrupt while in user mode. "
1844 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1845 cpu_abort(cs
, "Data segment exception while in user mode. "
1848 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1849 cpu_abort(cs
, "Instruction segment exception "
1850 "while in user mode. Aborting\n");
1852 /* PowerPC 64 with hypervisor mode support */
1853 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1854 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1855 "while in user mode. Aborting\n");
1857 case POWERPC_EXCP_TRACE
: /* Trace exception */
1859 * we use this exception to emulate step-by-step execution mode.
1862 /* PowerPC 64 with hypervisor mode support */
1863 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1864 cpu_abort(cs
, "Hypervisor data storage exception "
1865 "while in user mode. Aborting\n");
1867 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1868 cpu_abort(cs
, "Hypervisor instruction storage exception "
1869 "while in user mode. Aborting\n");
1871 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1872 cpu_abort(cs
, "Hypervisor data segment exception "
1873 "while in user mode. Aborting\n");
1875 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1876 cpu_abort(cs
, "Hypervisor instruction segment exception "
1877 "while in user mode. Aborting\n");
1879 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1880 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1881 info
.si_signo
= TARGET_SIGILL
;
1883 info
.si_code
= TARGET_ILL_COPROC
;
1884 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1885 queue_signal(env
, info
.si_signo
, &info
);
1887 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1888 cpu_abort(cs
, "Programmable interval timer interrupt "
1889 "while in user mode. Aborting\n");
1891 case POWERPC_EXCP_IO
: /* IO error exception */
1892 cpu_abort(cs
, "IO error exception while in user mode. "
1895 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1896 cpu_abort(cs
, "Run mode exception while in user mode. "
1899 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1900 cpu_abort(cs
, "Emulation trap exception not handled\n");
1902 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1903 cpu_abort(cs
, "Instruction fetch TLB exception "
1904 "while in user-mode. Aborting");
1906 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1907 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1910 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1911 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1914 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1915 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1917 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1918 cpu_abort(cs
, "Instruction address breakpoint exception "
1921 case POWERPC_EXCP_SMI
: /* System management interrupt */
1922 cpu_abort(cs
, "System management interrupt while in user mode. "
1925 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1926 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1929 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1930 cpu_abort(cs
, "Performance monitor exception not handled\n");
1932 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1933 cpu_abort(cs
, "Vector assist exception not handled\n");
1935 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1936 cpu_abort(cs
, "Soft patch exception not handled\n");
1938 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1939 cpu_abort(cs
, "Maintenance exception while in user mode. "
1942 case POWERPC_EXCP_STOP
: /* stop translation */
1943 /* We did invalidate the instruction cache. Go on */
1945 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1946 /* We just stopped because of a branch. Go on */
1948 case POWERPC_EXCP_SYSCALL_USER
:
1949 /* system call in user-mode emulation */
1951 * PPC ABI uses overflow flag in cr0 to signal an error
1954 env
->crf
[0] &= ~0x1;
1955 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1956 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1958 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1959 /* Returning from a successful sigreturn syscall.
1960 Avoid corrupting register state. */
1963 if (ret
> (target_ulong
)(-515)) {
1969 case POWERPC_EXCP_STCX
:
1970 if (do_store_exclusive(env
)) {
1971 info
.si_signo
= TARGET_SIGSEGV
;
1973 info
.si_code
= TARGET_SEGV_MAPERR
;
1974 info
._sifields
._sigfault
._addr
= env
->nip
;
1975 queue_signal(env
, info
.si_signo
, &info
);
1982 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1984 info
.si_signo
= sig
;
1986 info
.si_code
= TARGET_TRAP_BRKPT
;
1987 queue_signal(env
, info
.si_signo
, &info
);
1991 case EXCP_INTERRUPT
:
1992 /* just indicate that signals should be handled asap */
1995 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1998 process_pending_signals(env
);
2005 # ifdef TARGET_ABI_MIPSO32
2006 # define MIPS_SYS(name, args) args,
2007 static const uint8_t mips_syscall_args
[] = {
2008 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2009 MIPS_SYS(sys_exit
, 1)
2010 MIPS_SYS(sys_fork
, 0)
2011 MIPS_SYS(sys_read
, 3)
2012 MIPS_SYS(sys_write
, 3)
2013 MIPS_SYS(sys_open
, 3) /* 4005 */
2014 MIPS_SYS(sys_close
, 1)
2015 MIPS_SYS(sys_waitpid
, 3)
2016 MIPS_SYS(sys_creat
, 2)
2017 MIPS_SYS(sys_link
, 2)
2018 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2019 MIPS_SYS(sys_execve
, 0)
2020 MIPS_SYS(sys_chdir
, 1)
2021 MIPS_SYS(sys_time
, 1)
2022 MIPS_SYS(sys_mknod
, 3)
2023 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2024 MIPS_SYS(sys_lchown
, 3)
2025 MIPS_SYS(sys_ni_syscall
, 0)
2026 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2027 MIPS_SYS(sys_lseek
, 3)
2028 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2029 MIPS_SYS(sys_mount
, 5)
2030 MIPS_SYS(sys_umount
, 1)
2031 MIPS_SYS(sys_setuid
, 1)
2032 MIPS_SYS(sys_getuid
, 0)
2033 MIPS_SYS(sys_stime
, 1) /* 4025 */
2034 MIPS_SYS(sys_ptrace
, 4)
2035 MIPS_SYS(sys_alarm
, 1)
2036 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2037 MIPS_SYS(sys_pause
, 0)
2038 MIPS_SYS(sys_utime
, 2) /* 4030 */
2039 MIPS_SYS(sys_ni_syscall
, 0)
2040 MIPS_SYS(sys_ni_syscall
, 0)
2041 MIPS_SYS(sys_access
, 2)
2042 MIPS_SYS(sys_nice
, 1)
2043 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2044 MIPS_SYS(sys_sync
, 0)
2045 MIPS_SYS(sys_kill
, 2)
2046 MIPS_SYS(sys_rename
, 2)
2047 MIPS_SYS(sys_mkdir
, 2)
2048 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2049 MIPS_SYS(sys_dup
, 1)
2050 MIPS_SYS(sys_pipe
, 0)
2051 MIPS_SYS(sys_times
, 1)
2052 MIPS_SYS(sys_ni_syscall
, 0)
2053 MIPS_SYS(sys_brk
, 1) /* 4045 */
2054 MIPS_SYS(sys_setgid
, 1)
2055 MIPS_SYS(sys_getgid
, 0)
2056 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2057 MIPS_SYS(sys_geteuid
, 0)
2058 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2059 MIPS_SYS(sys_acct
, 0)
2060 MIPS_SYS(sys_umount2
, 2)
2061 MIPS_SYS(sys_ni_syscall
, 0)
2062 MIPS_SYS(sys_ioctl
, 3)
2063 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2064 MIPS_SYS(sys_ni_syscall
, 2)
2065 MIPS_SYS(sys_setpgid
, 2)
2066 MIPS_SYS(sys_ni_syscall
, 0)
2067 MIPS_SYS(sys_olduname
, 1)
2068 MIPS_SYS(sys_umask
, 1) /* 4060 */
2069 MIPS_SYS(sys_chroot
, 1)
2070 MIPS_SYS(sys_ustat
, 2)
2071 MIPS_SYS(sys_dup2
, 2)
2072 MIPS_SYS(sys_getppid
, 0)
2073 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2074 MIPS_SYS(sys_setsid
, 0)
2075 MIPS_SYS(sys_sigaction
, 3)
2076 MIPS_SYS(sys_sgetmask
, 0)
2077 MIPS_SYS(sys_ssetmask
, 1)
2078 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2079 MIPS_SYS(sys_setregid
, 2)
2080 MIPS_SYS(sys_sigsuspend
, 0)
2081 MIPS_SYS(sys_sigpending
, 1)
2082 MIPS_SYS(sys_sethostname
, 2)
2083 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2084 MIPS_SYS(sys_getrlimit
, 2)
2085 MIPS_SYS(sys_getrusage
, 2)
2086 MIPS_SYS(sys_gettimeofday
, 2)
2087 MIPS_SYS(sys_settimeofday
, 2)
2088 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2089 MIPS_SYS(sys_setgroups
, 2)
2090 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2091 MIPS_SYS(sys_symlink
, 2)
2092 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2093 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2094 MIPS_SYS(sys_uselib
, 1)
2095 MIPS_SYS(sys_swapon
, 2)
2096 MIPS_SYS(sys_reboot
, 3)
2097 MIPS_SYS(old_readdir
, 3)
2098 MIPS_SYS(old_mmap
, 6) /* 4090 */
2099 MIPS_SYS(sys_munmap
, 2)
2100 MIPS_SYS(sys_truncate
, 2)
2101 MIPS_SYS(sys_ftruncate
, 2)
2102 MIPS_SYS(sys_fchmod
, 2)
2103 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2104 MIPS_SYS(sys_getpriority
, 2)
2105 MIPS_SYS(sys_setpriority
, 3)
2106 MIPS_SYS(sys_ni_syscall
, 0)
2107 MIPS_SYS(sys_statfs
, 2)
2108 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2109 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2110 MIPS_SYS(sys_socketcall
, 2)
2111 MIPS_SYS(sys_syslog
, 3)
2112 MIPS_SYS(sys_setitimer
, 3)
2113 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2114 MIPS_SYS(sys_newstat
, 2)
2115 MIPS_SYS(sys_newlstat
, 2)
2116 MIPS_SYS(sys_newfstat
, 2)
2117 MIPS_SYS(sys_uname
, 1)
2118 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2119 MIPS_SYS(sys_vhangup
, 0)
2120 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2121 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2122 MIPS_SYS(sys_wait4
, 4)
2123 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2124 MIPS_SYS(sys_sysinfo
, 1)
2125 MIPS_SYS(sys_ipc
, 6)
2126 MIPS_SYS(sys_fsync
, 1)
2127 MIPS_SYS(sys_sigreturn
, 0)
2128 MIPS_SYS(sys_clone
, 6) /* 4120 */
2129 MIPS_SYS(sys_setdomainname
, 2)
2130 MIPS_SYS(sys_newuname
, 1)
2131 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2132 MIPS_SYS(sys_adjtimex
, 1)
2133 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2134 MIPS_SYS(sys_sigprocmask
, 3)
2135 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2136 MIPS_SYS(sys_init_module
, 5)
2137 MIPS_SYS(sys_delete_module
, 1)
2138 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2139 MIPS_SYS(sys_quotactl
, 0)
2140 MIPS_SYS(sys_getpgid
, 1)
2141 MIPS_SYS(sys_fchdir
, 1)
2142 MIPS_SYS(sys_bdflush
, 2)
2143 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2144 MIPS_SYS(sys_personality
, 1)
2145 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2146 MIPS_SYS(sys_setfsuid
, 1)
2147 MIPS_SYS(sys_setfsgid
, 1)
2148 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2149 MIPS_SYS(sys_getdents
, 3)
2150 MIPS_SYS(sys_select
, 5)
2151 MIPS_SYS(sys_flock
, 2)
2152 MIPS_SYS(sys_msync
, 3)
2153 MIPS_SYS(sys_readv
, 3) /* 4145 */
2154 MIPS_SYS(sys_writev
, 3)
2155 MIPS_SYS(sys_cacheflush
, 3)
2156 MIPS_SYS(sys_cachectl
, 3)
2157 MIPS_SYS(sys_sysmips
, 4)
2158 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2159 MIPS_SYS(sys_getsid
, 1)
2160 MIPS_SYS(sys_fdatasync
, 0)
2161 MIPS_SYS(sys_sysctl
, 1)
2162 MIPS_SYS(sys_mlock
, 2)
2163 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2164 MIPS_SYS(sys_mlockall
, 1)
2165 MIPS_SYS(sys_munlockall
, 0)
2166 MIPS_SYS(sys_sched_setparam
, 2)
2167 MIPS_SYS(sys_sched_getparam
, 2)
2168 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2169 MIPS_SYS(sys_sched_getscheduler
, 1)
2170 MIPS_SYS(sys_sched_yield
, 0)
2171 MIPS_SYS(sys_sched_get_priority_max
, 1)
2172 MIPS_SYS(sys_sched_get_priority_min
, 1)
2173 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2174 MIPS_SYS(sys_nanosleep
, 2)
2175 MIPS_SYS(sys_mremap
, 5)
2176 MIPS_SYS(sys_accept
, 3)
2177 MIPS_SYS(sys_bind
, 3)
2178 MIPS_SYS(sys_connect
, 3) /* 4170 */
2179 MIPS_SYS(sys_getpeername
, 3)
2180 MIPS_SYS(sys_getsockname
, 3)
2181 MIPS_SYS(sys_getsockopt
, 5)
2182 MIPS_SYS(sys_listen
, 2)
2183 MIPS_SYS(sys_recv
, 4) /* 4175 */
2184 MIPS_SYS(sys_recvfrom
, 6)
2185 MIPS_SYS(sys_recvmsg
, 3)
2186 MIPS_SYS(sys_send
, 4)
2187 MIPS_SYS(sys_sendmsg
, 3)
2188 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2189 MIPS_SYS(sys_setsockopt
, 5)
2190 MIPS_SYS(sys_shutdown
, 2)
2191 MIPS_SYS(sys_socket
, 3)
2192 MIPS_SYS(sys_socketpair
, 4)
2193 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2194 MIPS_SYS(sys_getresuid
, 3)
2195 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2196 MIPS_SYS(sys_poll
, 3)
2197 MIPS_SYS(sys_nfsservctl
, 3)
2198 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2199 MIPS_SYS(sys_getresgid
, 3)
2200 MIPS_SYS(sys_prctl
, 5)
2201 MIPS_SYS(sys_rt_sigreturn
, 0)
2202 MIPS_SYS(sys_rt_sigaction
, 4)
2203 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2204 MIPS_SYS(sys_rt_sigpending
, 2)
2205 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2206 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2207 MIPS_SYS(sys_rt_sigsuspend
, 0)
2208 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2209 MIPS_SYS(sys_pwrite64
, 6)
2210 MIPS_SYS(sys_chown
, 3)
2211 MIPS_SYS(sys_getcwd
, 2)
2212 MIPS_SYS(sys_capget
, 2)
2213 MIPS_SYS(sys_capset
, 2) /* 4205 */
2214 MIPS_SYS(sys_sigaltstack
, 2)
2215 MIPS_SYS(sys_sendfile
, 4)
2216 MIPS_SYS(sys_ni_syscall
, 0)
2217 MIPS_SYS(sys_ni_syscall
, 0)
2218 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2219 MIPS_SYS(sys_truncate64
, 4)
2220 MIPS_SYS(sys_ftruncate64
, 4)
2221 MIPS_SYS(sys_stat64
, 2)
2222 MIPS_SYS(sys_lstat64
, 2)
2223 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2224 MIPS_SYS(sys_pivot_root
, 2)
2225 MIPS_SYS(sys_mincore
, 3)
2226 MIPS_SYS(sys_madvise
, 3)
2227 MIPS_SYS(sys_getdents64
, 3)
2228 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2229 MIPS_SYS(sys_ni_syscall
, 0)
2230 MIPS_SYS(sys_gettid
, 0)
2231 MIPS_SYS(sys_readahead
, 5)
2232 MIPS_SYS(sys_setxattr
, 5)
2233 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2234 MIPS_SYS(sys_fsetxattr
, 5)
2235 MIPS_SYS(sys_getxattr
, 4)
2236 MIPS_SYS(sys_lgetxattr
, 4)
2237 MIPS_SYS(sys_fgetxattr
, 4)
2238 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2239 MIPS_SYS(sys_llistxattr
, 3)
2240 MIPS_SYS(sys_flistxattr
, 3)
2241 MIPS_SYS(sys_removexattr
, 2)
2242 MIPS_SYS(sys_lremovexattr
, 2)
2243 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2244 MIPS_SYS(sys_tkill
, 2)
2245 MIPS_SYS(sys_sendfile64
, 5)
2246 MIPS_SYS(sys_futex
, 6)
2247 MIPS_SYS(sys_sched_setaffinity
, 3)
2248 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2249 MIPS_SYS(sys_io_setup
, 2)
2250 MIPS_SYS(sys_io_destroy
, 1)
2251 MIPS_SYS(sys_io_getevents
, 5)
2252 MIPS_SYS(sys_io_submit
, 3)
2253 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2254 MIPS_SYS(sys_exit_group
, 1)
2255 MIPS_SYS(sys_lookup_dcookie
, 3)
2256 MIPS_SYS(sys_epoll_create
, 1)
2257 MIPS_SYS(sys_epoll_ctl
, 4)
2258 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2259 MIPS_SYS(sys_remap_file_pages
, 5)
2260 MIPS_SYS(sys_set_tid_address
, 1)
2261 MIPS_SYS(sys_restart_syscall
, 0)
2262 MIPS_SYS(sys_fadvise64_64
, 7)
2263 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2264 MIPS_SYS(sys_fstatfs64
, 2)
2265 MIPS_SYS(sys_timer_create
, 3)
2266 MIPS_SYS(sys_timer_settime
, 4)
2267 MIPS_SYS(sys_timer_gettime
, 2)
2268 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2269 MIPS_SYS(sys_timer_delete
, 1)
2270 MIPS_SYS(sys_clock_settime
, 2)
2271 MIPS_SYS(sys_clock_gettime
, 2)
2272 MIPS_SYS(sys_clock_getres
, 2)
2273 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2274 MIPS_SYS(sys_tgkill
, 3)
2275 MIPS_SYS(sys_utimes
, 2)
2276 MIPS_SYS(sys_mbind
, 4)
2277 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2278 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2279 MIPS_SYS(sys_mq_open
, 4)
2280 MIPS_SYS(sys_mq_unlink
, 1)
2281 MIPS_SYS(sys_mq_timedsend
, 5)
2282 MIPS_SYS(sys_mq_timedreceive
, 5)
2283 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2284 MIPS_SYS(sys_mq_getsetattr
, 3)
2285 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2286 MIPS_SYS(sys_waitid
, 4)
2287 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2288 MIPS_SYS(sys_add_key
, 5)
2289 MIPS_SYS(sys_request_key
, 4)
2290 MIPS_SYS(sys_keyctl
, 5)
2291 MIPS_SYS(sys_set_thread_area
, 1)
2292 MIPS_SYS(sys_inotify_init
, 0)
2293 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2294 MIPS_SYS(sys_inotify_rm_watch
, 2)
2295 MIPS_SYS(sys_migrate_pages
, 4)
2296 MIPS_SYS(sys_openat
, 4)
2297 MIPS_SYS(sys_mkdirat
, 3)
2298 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2299 MIPS_SYS(sys_fchownat
, 5)
2300 MIPS_SYS(sys_futimesat
, 3)
2301 MIPS_SYS(sys_fstatat64
, 4)
2302 MIPS_SYS(sys_unlinkat
, 3)
2303 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2304 MIPS_SYS(sys_linkat
, 5)
2305 MIPS_SYS(sys_symlinkat
, 3)
2306 MIPS_SYS(sys_readlinkat
, 4)
2307 MIPS_SYS(sys_fchmodat
, 3)
2308 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2309 MIPS_SYS(sys_pselect6
, 6)
2310 MIPS_SYS(sys_ppoll
, 5)
2311 MIPS_SYS(sys_unshare
, 1)
2312 MIPS_SYS(sys_splice
, 6)
2313 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2314 MIPS_SYS(sys_tee
, 4)
2315 MIPS_SYS(sys_vmsplice
, 4)
2316 MIPS_SYS(sys_move_pages
, 6)
2317 MIPS_SYS(sys_set_robust_list
, 2)
2318 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2319 MIPS_SYS(sys_kexec_load
, 4)
2320 MIPS_SYS(sys_getcpu
, 3)
2321 MIPS_SYS(sys_epoll_pwait
, 6)
2322 MIPS_SYS(sys_ioprio_set
, 3)
2323 MIPS_SYS(sys_ioprio_get
, 2)
2324 MIPS_SYS(sys_utimensat
, 4)
2325 MIPS_SYS(sys_signalfd
, 3)
2326 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2327 MIPS_SYS(sys_eventfd
, 1)
2328 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2329 MIPS_SYS(sys_timerfd_create
, 2)
2330 MIPS_SYS(sys_timerfd_gettime
, 2)
2331 MIPS_SYS(sys_timerfd_settime
, 4)
2332 MIPS_SYS(sys_signalfd4
, 4)
2333 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2334 MIPS_SYS(sys_epoll_create1
, 1)
2335 MIPS_SYS(sys_dup3
, 3)
2336 MIPS_SYS(sys_pipe2
, 2)
2337 MIPS_SYS(sys_inotify_init1
, 1)
2338 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2339 MIPS_SYS(sys_pwritev
, 6)
2340 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2341 MIPS_SYS(sys_perf_event_open
, 5)
2342 MIPS_SYS(sys_accept4
, 4)
2343 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2344 MIPS_SYS(sys_fanotify_init
, 2)
2345 MIPS_SYS(sys_fanotify_mark
, 6)
2346 MIPS_SYS(sys_prlimit64
, 4)
2347 MIPS_SYS(sys_name_to_handle_at
, 5)
2348 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2349 MIPS_SYS(sys_clock_adjtime
, 2)
2350 MIPS_SYS(sys_syncfs
, 1)
2355 static int do_store_exclusive(CPUMIPSState
*env
)
2358 target_ulong page_addr
;
2366 page_addr
= addr
& TARGET_PAGE_MASK
;
2369 flags
= page_get_flags(page_addr
);
2370 if ((flags
& PAGE_READ
) == 0) {
2373 reg
= env
->llreg
& 0x1f;
2374 d
= (env
->llreg
& 0x20) != 0;
2376 segv
= get_user_s64(val
, addr
);
2378 segv
= get_user_s32(val
, addr
);
2381 if (val
!= env
->llval
) {
2382 env
->active_tc
.gpr
[reg
] = 0;
2385 segv
= put_user_u64(env
->llnewval
, addr
);
2387 segv
= put_user_u32(env
->llnewval
, addr
);
2390 env
->active_tc
.gpr
[reg
] = 1;
2397 env
->active_tc
.PC
+= 4;
2410 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2418 info
->si_signo
= TARGET_SIGFPE
;
2420 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2421 queue_signal(env
, info
->si_signo
, &*info
);
2425 info
->si_signo
= TARGET_SIGTRAP
;
2427 queue_signal(env
, info
->si_signo
, &*info
);
2435 void cpu_loop(CPUMIPSState
*env
)
2437 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2438 target_siginfo_t info
;
2441 # ifdef TARGET_ABI_MIPSO32
2442 unsigned int syscall_num
;
2447 trapnr
= cpu_mips_exec(cs
);
2451 env
->active_tc
.PC
+= 4;
2452 # ifdef TARGET_ABI_MIPSO32
2453 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2454 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2455 ret
= -TARGET_ENOSYS
;
2459 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2461 nb_args
= mips_syscall_args
[syscall_num
];
2462 sp_reg
= env
->active_tc
.gpr
[29];
2464 /* these arguments are taken from the stack */
2466 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2470 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2474 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2478 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2484 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2485 env
->active_tc
.gpr
[4],
2486 env
->active_tc
.gpr
[5],
2487 env
->active_tc
.gpr
[6],
2488 env
->active_tc
.gpr
[7],
2489 arg5
, arg6
, arg7
, arg8
);
2493 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2494 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2495 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2496 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2497 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2499 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2500 /* Returning from a successful sigreturn syscall.
2501 Avoid clobbering register state. */
2504 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2505 env
->active_tc
.gpr
[7] = 1; /* error flag */
2508 env
->active_tc
.gpr
[7] = 0; /* error flag */
2510 env
->active_tc
.gpr
[2] = ret
;
2516 info
.si_signo
= TARGET_SIGSEGV
;
2518 /* XXX: check env->error_code */
2519 info
.si_code
= TARGET_SEGV_MAPERR
;
2520 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2521 queue_signal(env
, info
.si_signo
, &info
);
2525 info
.si_signo
= TARGET_SIGILL
;
2528 queue_signal(env
, info
.si_signo
, &info
);
2530 case EXCP_INTERRUPT
:
2531 /* just indicate that signals should be handled asap */
2537 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2540 info
.si_signo
= sig
;
2542 info
.si_code
= TARGET_TRAP_BRKPT
;
2543 queue_signal(env
, info
.si_signo
, &info
);
2548 if (do_store_exclusive(env
)) {
2549 info
.si_signo
= TARGET_SIGSEGV
;
2551 info
.si_code
= TARGET_SEGV_MAPERR
;
2552 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2553 queue_signal(env
, info
.si_signo
, &info
);
2557 info
.si_signo
= TARGET_SIGILL
;
2559 info
.si_code
= TARGET_ILL_ILLOPC
;
2560 queue_signal(env
, info
.si_signo
, &info
);
2562 /* The code below was inspired by the MIPS Linux kernel trap
2563 * handling code in arch/mips/kernel/traps.c.
2567 abi_ulong trap_instr
;
2570 if (env
->hflags
& MIPS_HFLAG_M16
) {
2571 if (env
->insn_flags
& ASE_MICROMIPS
) {
2572 /* microMIPS mode */
2573 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2578 if ((trap_instr
>> 10) == 0x11) {
2579 /* 16-bit instruction */
2580 code
= trap_instr
& 0xf;
2582 /* 32-bit instruction */
2585 ret
= get_user_u16(instr_lo
,
2586 env
->active_tc
.PC
+ 2);
2590 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2591 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2592 /* Unfortunately, microMIPS also suffers from
2593 the old assembler bug... */
2594 if (code
>= (1 << 10)) {
2600 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2604 code
= (trap_instr
>> 6) & 0x3f;
2607 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2612 /* As described in the original Linux kernel code, the
2613 * below checks on 'code' are to work around an old
2616 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2617 if (code
>= (1 << 10)) {
2622 if (do_break(env
, &info
, code
) != 0) {
2629 abi_ulong trap_instr
;
2630 unsigned int code
= 0;
2632 if (env
->hflags
& MIPS_HFLAG_M16
) {
2633 /* microMIPS mode */
2636 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2637 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2639 trap_instr
= (instr
[0] << 16) | instr
[1];
2641 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2648 /* The immediate versions don't provide a code. */
2649 if (!(trap_instr
& 0xFC000000)) {
2650 if (env
->hflags
& MIPS_HFLAG_M16
) {
2651 /* microMIPS mode */
2652 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2654 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2658 if (do_break(env
, &info
, code
) != 0) {
2665 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2668 process_pending_signals(env
);
2673 #ifdef TARGET_OPENRISC
2675 void cpu_loop(CPUOpenRISCState
*env
)
2677 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2682 trapnr
= cpu_openrisc_exec(cs
);
2688 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2692 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2693 gdbsig
= TARGET_SIGBUS
;
2697 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2698 gdbsig
= TARGET_SIGSEGV
;
2701 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2704 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2705 gdbsig
= TARGET_SIGBUS
;
2708 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2709 gdbsig
= TARGET_SIGILL
;
2712 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2716 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2719 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2720 gdbsig
= TARGET_SIGSEGV
;
2723 env
->pc
+= 4; /* 0xc00; */
2724 env
->gpr
[11] = do_syscall(env
,
2725 env
->gpr
[11], /* return value */
2726 env
->gpr
[3], /* r3 - r7 are params */
2734 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2737 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2738 gdbsig
= TARGET_SIGTRAP
;
2741 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2744 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2746 gdbsig
= TARGET_SIGILL
;
2750 gdb_handlesig(cs
, gdbsig
);
2751 if (gdbsig
!= TARGET_SIGTRAP
) {
2756 process_pending_signals(env
);
2760 #endif /* TARGET_OPENRISC */
2763 void cpu_loop(CPUSH4State
*env
)
2765 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2767 target_siginfo_t info
;
2771 trapnr
= cpu_sh4_exec(cs
);
2777 ret
= do_syscall(env
,
2786 env
->gregs
[0] = ret
;
2788 case EXCP_INTERRUPT
:
2789 /* just indicate that signals should be handled asap */
2795 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2798 info
.si_signo
= sig
;
2800 info
.si_code
= TARGET_TRAP_BRKPT
;
2801 queue_signal(env
, info
.si_signo
, &info
);
2807 info
.si_signo
= TARGET_SIGSEGV
;
2809 info
.si_code
= TARGET_SEGV_MAPERR
;
2810 info
._sifields
._sigfault
._addr
= env
->tea
;
2811 queue_signal(env
, info
.si_signo
, &info
);
2815 printf ("Unhandled trap: 0x%x\n", trapnr
);
2816 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2819 process_pending_signals (env
);
2825 void cpu_loop(CPUCRISState
*env
)
2827 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2829 target_siginfo_t info
;
2833 trapnr
= cpu_cris_exec(cs
);
2838 info
.si_signo
= TARGET_SIGSEGV
;
2840 /* XXX: check env->error_code */
2841 info
.si_code
= TARGET_SEGV_MAPERR
;
2842 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2843 queue_signal(env
, info
.si_signo
, &info
);
2846 case EXCP_INTERRUPT
:
2847 /* just indicate that signals should be handled asap */
2850 ret
= do_syscall(env
,
2859 env
->regs
[10] = ret
;
2865 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2868 info
.si_signo
= sig
;
2870 info
.si_code
= TARGET_TRAP_BRKPT
;
2871 queue_signal(env
, info
.si_signo
, &info
);
2876 printf ("Unhandled trap: 0x%x\n", trapnr
);
2877 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2880 process_pending_signals (env
);
2885 #ifdef TARGET_MICROBLAZE
2886 void cpu_loop(CPUMBState
*env
)
2888 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2890 target_siginfo_t info
;
2894 trapnr
= cpu_mb_exec(cs
);
2899 info
.si_signo
= TARGET_SIGSEGV
;
2901 /* XXX: check env->error_code */
2902 info
.si_code
= TARGET_SEGV_MAPERR
;
2903 info
._sifields
._sigfault
._addr
= 0;
2904 queue_signal(env
, info
.si_signo
, &info
);
2907 case EXCP_INTERRUPT
:
2908 /* just indicate that signals should be handled asap */
2911 /* Return address is 4 bytes after the call. */
2913 env
->sregs
[SR_PC
] = env
->regs
[14];
2914 ret
= do_syscall(env
,
2926 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2927 if (env
->iflags
& D_FLAG
) {
2928 env
->sregs
[SR_ESR
] |= 1 << 12;
2929 env
->sregs
[SR_PC
] -= 4;
2930 /* FIXME: if branch was immed, replay the imm as well. */
2933 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2935 switch (env
->sregs
[SR_ESR
] & 31) {
2936 case ESR_EC_DIVZERO
:
2937 info
.si_signo
= TARGET_SIGFPE
;
2939 info
.si_code
= TARGET_FPE_FLTDIV
;
2940 info
._sifields
._sigfault
._addr
= 0;
2941 queue_signal(env
, info
.si_signo
, &info
);
2944 info
.si_signo
= TARGET_SIGFPE
;
2946 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2947 info
.si_code
= TARGET_FPE_FLTINV
;
2949 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2950 info
.si_code
= TARGET_FPE_FLTDIV
;
2952 info
._sifields
._sigfault
._addr
= 0;
2953 queue_signal(env
, info
.si_signo
, &info
);
2956 printf ("Unhandled hw-exception: 0x%x\n",
2957 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2958 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2967 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2970 info
.si_signo
= sig
;
2972 info
.si_code
= TARGET_TRAP_BRKPT
;
2973 queue_signal(env
, info
.si_signo
, &info
);
2978 printf ("Unhandled trap: 0x%x\n", trapnr
);
2979 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2982 process_pending_signals (env
);
2989 void cpu_loop(CPUM68KState
*env
)
2991 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2994 target_siginfo_t info
;
2995 TaskState
*ts
= cs
->opaque
;
2999 trapnr
= cpu_m68k_exec(cs
);
3004 if (ts
->sim_syscalls
) {
3006 get_user_u16(nr
, env
->pc
+ 2);
3008 do_m68k_simcall(env
, nr
);
3014 case EXCP_HALT_INSN
:
3015 /* Semihosing syscall. */
3017 do_m68k_semihosting(env
, env
->dregs
[0]);
3021 case EXCP_UNSUPPORTED
:
3023 info
.si_signo
= TARGET_SIGILL
;
3025 info
.si_code
= TARGET_ILL_ILLOPN
;
3026 info
._sifields
._sigfault
._addr
= env
->pc
;
3027 queue_signal(env
, info
.si_signo
, &info
);
3031 ts
->sim_syscalls
= 0;
3034 env
->dregs
[0] = do_syscall(env
,
3045 case EXCP_INTERRUPT
:
3046 /* just indicate that signals should be handled asap */
3050 info
.si_signo
= TARGET_SIGSEGV
;
3052 /* XXX: check env->error_code */
3053 info
.si_code
= TARGET_SEGV_MAPERR
;
3054 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3055 queue_signal(env
, info
.si_signo
, &info
);
3062 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3065 info
.si_signo
= sig
;
3067 info
.si_code
= TARGET_TRAP_BRKPT
;
3068 queue_signal(env
, info
.si_signo
, &info
);
3073 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3076 process_pending_signals(env
);
3079 #endif /* TARGET_M68K */
3082 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3084 target_ulong addr
, val
, tmp
;
3085 target_siginfo_t info
;
3088 addr
= env
->lock_addr
;
3089 tmp
= env
->lock_st_addr
;
3090 env
->lock_addr
= -1;
3091 env
->lock_st_addr
= 0;
3097 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3101 if (val
== env
->lock_value
) {
3103 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3120 info
.si_signo
= TARGET_SIGSEGV
;
3122 info
.si_code
= TARGET_SEGV_MAPERR
;
3123 info
._sifields
._sigfault
._addr
= addr
;
3124 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3127 void cpu_loop(CPUAlphaState
*env
)
3129 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3131 target_siginfo_t info
;
3136 trapnr
= cpu_alpha_exec(cs
);
3139 /* All of the traps imply a transition through PALcode, which
3140 implies an REI instruction has been executed. Which means
3141 that the intr_flag should be cleared. */
3146 fprintf(stderr
, "Reset requested. Exit\n");
3150 fprintf(stderr
, "Machine check exception. Exit\n");
3153 case EXCP_SMP_INTERRUPT
:
3154 case EXCP_CLK_INTERRUPT
:
3155 case EXCP_DEV_INTERRUPT
:
3156 fprintf(stderr
, "External interrupt. Exit\n");
3160 env
->lock_addr
= -1;
3161 info
.si_signo
= TARGET_SIGSEGV
;
3163 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3164 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3165 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3166 queue_signal(env
, info
.si_signo
, &info
);
3169 env
->lock_addr
= -1;
3170 info
.si_signo
= TARGET_SIGBUS
;
3172 info
.si_code
= TARGET_BUS_ADRALN
;
3173 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3174 queue_signal(env
, info
.si_signo
, &info
);
3178 env
->lock_addr
= -1;
3179 info
.si_signo
= TARGET_SIGILL
;
3181 info
.si_code
= TARGET_ILL_ILLOPC
;
3182 info
._sifields
._sigfault
._addr
= env
->pc
;
3183 queue_signal(env
, info
.si_signo
, &info
);
3186 env
->lock_addr
= -1;
3187 info
.si_signo
= TARGET_SIGFPE
;
3189 info
.si_code
= TARGET_FPE_FLTINV
;
3190 info
._sifields
._sigfault
._addr
= env
->pc
;
3191 queue_signal(env
, info
.si_signo
, &info
);
3194 /* No-op. Linux simply re-enables the FPU. */
3197 env
->lock_addr
= -1;
3198 switch (env
->error_code
) {
3201 info
.si_signo
= TARGET_SIGTRAP
;
3203 info
.si_code
= TARGET_TRAP_BRKPT
;
3204 info
._sifields
._sigfault
._addr
= env
->pc
;
3205 queue_signal(env
, info
.si_signo
, &info
);
3209 info
.si_signo
= TARGET_SIGTRAP
;
3212 info
._sifields
._sigfault
._addr
= env
->pc
;
3213 queue_signal(env
, info
.si_signo
, &info
);
3217 trapnr
= env
->ir
[IR_V0
];
3218 sysret
= do_syscall(env
, trapnr
,
3219 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3220 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3221 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3223 if (trapnr
== TARGET_NR_sigreturn
3224 || trapnr
== TARGET_NR_rt_sigreturn
) {
3227 /* Syscall writes 0 to V0 to bypass error check, similar
3228 to how this is handled internal to Linux kernel.
3229 (Ab)use trapnr temporarily as boolean indicating error. */
3230 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3231 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3232 env
->ir
[IR_A3
] = trapnr
;
3236 /* ??? We can probably elide the code using page_unprotect
3237 that is checking for self-modifying code. Instead we
3238 could simply call tb_flush here. Until we work out the
3239 changes required to turn off the extra write protection,
3240 this can be a no-op. */
3244 /* Handled in the translator for usermode. */
3248 /* Handled in the translator for usermode. */
3252 info
.si_signo
= TARGET_SIGFPE
;
3253 switch (env
->ir
[IR_A0
]) {
3254 case TARGET_GEN_INTOVF
:
3255 info
.si_code
= TARGET_FPE_INTOVF
;
3257 case TARGET_GEN_INTDIV
:
3258 info
.si_code
= TARGET_FPE_INTDIV
;
3260 case TARGET_GEN_FLTOVF
:
3261 info
.si_code
= TARGET_FPE_FLTOVF
;
3263 case TARGET_GEN_FLTUND
:
3264 info
.si_code
= TARGET_FPE_FLTUND
;
3266 case TARGET_GEN_FLTINV
:
3267 info
.si_code
= TARGET_FPE_FLTINV
;
3269 case TARGET_GEN_FLTINE
:
3270 info
.si_code
= TARGET_FPE_FLTRES
;
3272 case TARGET_GEN_ROPRAND
:
3276 info
.si_signo
= TARGET_SIGTRAP
;
3281 info
._sifields
._sigfault
._addr
= env
->pc
;
3282 queue_signal(env
, info
.si_signo
, &info
);
3289 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3290 if (info
.si_signo
) {
3291 env
->lock_addr
= -1;
3293 info
.si_code
= TARGET_TRAP_BRKPT
;
3294 queue_signal(env
, info
.si_signo
, &info
);
3299 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3301 case EXCP_INTERRUPT
:
3302 /* Just indicate that signals should be handled asap. */
3305 printf ("Unhandled trap: 0x%x\n", trapnr
);
3306 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3309 process_pending_signals (env
);
3312 #endif /* TARGET_ALPHA */
3315 void cpu_loop(CPUS390XState
*env
)
3317 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3319 target_siginfo_t info
;
3324 trapnr
= cpu_s390x_exec(cs
);
3327 case EXCP_INTERRUPT
:
3328 /* Just indicate that signals should be handled asap. */
3332 n
= env
->int_svc_code
;
3334 /* syscalls > 255 */
3337 env
->psw
.addr
+= env
->int_svc_ilen
;
3338 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3339 env
->regs
[4], env
->regs
[5],
3340 env
->regs
[6], env
->regs
[7], 0, 0);
3344 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3346 n
= TARGET_TRAP_BRKPT
;
3351 n
= env
->int_pgm_code
;
3354 case PGM_PRIVILEGED
:
3355 sig
= TARGET_SIGILL
;
3356 n
= TARGET_ILL_ILLOPC
;
3358 case PGM_PROTECTION
:
3359 case PGM_ADDRESSING
:
3360 sig
= TARGET_SIGSEGV
;
3361 /* XXX: check env->error_code */
3362 n
= TARGET_SEGV_MAPERR
;
3363 addr
= env
->__excp_addr
;
3366 case PGM_SPECIFICATION
:
3367 case PGM_SPECIAL_OP
:
3370 sig
= TARGET_SIGILL
;
3371 n
= TARGET_ILL_ILLOPN
;
3374 case PGM_FIXPT_OVERFLOW
:
3375 sig
= TARGET_SIGFPE
;
3376 n
= TARGET_FPE_INTOVF
;
3378 case PGM_FIXPT_DIVIDE
:
3379 sig
= TARGET_SIGFPE
;
3380 n
= TARGET_FPE_INTDIV
;
3384 n
= (env
->fpc
>> 8) & 0xff;
3386 /* compare-and-trap */
3389 /* An IEEE exception, simulated or otherwise. */
3391 n
= TARGET_FPE_FLTINV
;
3392 } else if (n
& 0x40) {
3393 n
= TARGET_FPE_FLTDIV
;
3394 } else if (n
& 0x20) {
3395 n
= TARGET_FPE_FLTOVF
;
3396 } else if (n
& 0x10) {
3397 n
= TARGET_FPE_FLTUND
;
3398 } else if (n
& 0x08) {
3399 n
= TARGET_FPE_FLTRES
;
3401 /* ??? Quantum exception; BFP, DFP error. */
3404 sig
= TARGET_SIGFPE
;
3409 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3410 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3416 addr
= env
->psw
.addr
;
3418 info
.si_signo
= sig
;
3421 info
._sifields
._sigfault
._addr
= addr
;
3422 queue_signal(env
, info
.si_signo
, &info
);
3426 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3427 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3430 process_pending_signals (env
);
3434 #endif /* TARGET_S390X */
3436 #ifdef TARGET_TILEGX
3438 static void gen_sigill_reg(CPUTLGState
*env
)
3440 target_siginfo_t info
;
3442 info
.si_signo
= TARGET_SIGILL
;
3444 info
.si_code
= TARGET_ILL_PRVREG
;
3445 info
._sifields
._sigfault
._addr
= env
->pc
;
3446 queue_signal(env
, info
.si_signo
, &info
);
3449 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3451 target_siginfo_t info
;
3453 info
.si_signo
= signo
;
3455 info
._sifields
._sigfault
._addr
= env
->pc
;
3457 if (signo
== TARGET_SIGSEGV
) {
3458 /* The passed in sigcode is a dummy; check for a page mapping
3459 and pass either MAPERR or ACCERR. */
3460 target_ulong addr
= env
->excaddr
;
3461 info
._sifields
._sigfault
._addr
= addr
;
3462 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3463 sigcode
= TARGET_SEGV_MAPERR
;
3465 sigcode
= TARGET_SEGV_ACCERR
;
3468 info
.si_code
= sigcode
;
3470 queue_signal(env
, info
.si_signo
, &info
);
3473 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3475 env
->excaddr
= addr
;
3476 do_signal(env
, TARGET_SIGSEGV
, 0);
3479 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3481 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3492 gen_sigill_reg(env
);
3495 g_assert_not_reached();
3498 env
->regs
[reg
] = val
;
3502 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3503 * memory at the address held in the first source register. If the values are
3504 * not equal, then no memory operation is performed. If the values are equal,
3505 * the 8-byte quantity from the second source register is written into memory
3506 * at the address held in the first source register. In either case, the result
3507 * of the instruction is the value read from memory. The compare and write to
3508 * memory are atomic and thus can be used for synchronization purposes. This
3509 * instruction only operates for addresses aligned to a 8-byte boundary.
3510 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3512 * Functional Description (64-bit)
3513 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3514 * rf[Dest] = memVal;
3515 * if (memVal == SPR[CmpValueSPR])
3516 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3518 * Functional Description (32-bit)
3519 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3520 * rf[Dest] = memVal;
3521 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3522 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3525 * This function also processes exch and exch4 which need not process SPR.
3527 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3530 target_long val
, sprval
;
3534 addr
= env
->atomic_srca
;
3535 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3536 goto sigsegv_maperr
;
3541 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3543 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3547 if (!cmp
|| val
== sprval
) {
3548 target_long valb
= env
->atomic_srcb
;
3549 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3550 goto sigsegv_maperr
;
3554 set_regval(env
, env
->atomic_dstr
, val
);
3560 gen_sigsegv_maperr(env
, addr
);
3563 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3567 target_long val
, valb
;
3571 addr
= env
->atomic_srca
;
3572 valb
= env
->atomic_srcb
;
3573 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3574 goto sigsegv_maperr
;
3578 case TILEGX_EXCP_OPCODE_FETCHADD
:
3579 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3582 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3588 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3590 if ((int32_t)valb
< 0) {
3594 case TILEGX_EXCP_OPCODE_FETCHAND
:
3595 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3598 case TILEGX_EXCP_OPCODE_FETCHOR
:
3599 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3603 g_assert_not_reached();
3607 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3608 goto sigsegv_maperr
;
3612 set_regval(env
, env
->atomic_dstr
, val
);
3618 gen_sigsegv_maperr(env
, addr
);
3621 void cpu_loop(CPUTLGState
*env
)
3623 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3628 trapnr
= cpu_tilegx_exec(cs
);
3631 case TILEGX_EXCP_SYSCALL
:
3632 env
->regs
[TILEGX_R_RE
] = do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3633 env
->regs
[0], env
->regs
[1],
3634 env
->regs
[2], env
->regs
[3],
3635 env
->regs
[4], env
->regs
[5],
3636 env
->regs
[6], env
->regs
[7]);
3637 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(env
->regs
[TILEGX_R_RE
])
3638 ? - env
->regs
[TILEGX_R_RE
]
3641 case TILEGX_EXCP_OPCODE_EXCH
:
3642 do_exch(env
, true, false);
3644 case TILEGX_EXCP_OPCODE_EXCH4
:
3645 do_exch(env
, false, false);
3647 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3648 do_exch(env
, true, true);
3650 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3651 do_exch(env
, false, true);
3653 case TILEGX_EXCP_OPCODE_FETCHADD
:
3654 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3655 case TILEGX_EXCP_OPCODE_FETCHAND
:
3656 case TILEGX_EXCP_OPCODE_FETCHOR
:
3657 do_fetch(env
, trapnr
, true);
3659 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3660 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3661 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3662 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3663 do_fetch(env
, trapnr
, false);
3665 case TILEGX_EXCP_SIGNAL
:
3666 do_signal(env
, env
->signo
, env
->sigcode
);
3668 case TILEGX_EXCP_REG_IDN_ACCESS
:
3669 case TILEGX_EXCP_REG_UDN_ACCESS
:
3670 gen_sigill_reg(env
);
3673 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3674 g_assert_not_reached();
3676 process_pending_signals(env
);
3682 THREAD CPUState
*thread_cpu
;
3684 void task_settid(TaskState
*ts
)
3686 if (ts
->ts_tid
== 0) {
3687 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3691 void stop_all_tasks(void)
3694 * We trust that when using NPTL, start_exclusive()
3695 * handles thread stopping correctly.
3700 /* Assumes contents are already zeroed. */
3701 void init_task_state(TaskState
*ts
)
3706 ts
->first_free
= ts
->sigqueue_table
;
3707 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3708 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3710 ts
->sigqueue_table
[i
].next
= NULL
;
3713 CPUArchState
*cpu_copy(CPUArchState
*env
)
3715 CPUState
*cpu
= ENV_GET_CPU(env
);
3716 CPUState
*new_cpu
= cpu_init(cpu_model
);
3717 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3721 /* Reset non arch specific state */
3724 memcpy(new_env
, env
, sizeof(CPUArchState
));
3726 /* Clone all break/watchpoints.
3727 Note: Once we support ptrace with hw-debug register access, make sure
3728 BP_CPU break/watchpoints are handled correctly on clone. */
3729 QTAILQ_INIT(&new_cpu
->breakpoints
);
3730 QTAILQ_INIT(&new_cpu
->watchpoints
);
3731 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3732 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3734 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3735 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3741 static void handle_arg_help(const char *arg
)
3743 usage(EXIT_SUCCESS
);
3746 static void handle_arg_log(const char *arg
)
3750 mask
= qemu_str_to_log_mask(arg
);
3752 qemu_print_log_usage(stdout
);
3758 static void handle_arg_log_filename(const char *arg
)
3760 qemu_set_log_filename(arg
);
3763 static void handle_arg_set_env(const char *arg
)
3765 char *r
, *p
, *token
;
3766 r
= p
= strdup(arg
);
3767 while ((token
= strsep(&p
, ",")) != NULL
) {
3768 if (envlist_setenv(envlist
, token
) != 0) {
3769 usage(EXIT_FAILURE
);
3775 static void handle_arg_unset_env(const char *arg
)
3777 char *r
, *p
, *token
;
3778 r
= p
= strdup(arg
);
3779 while ((token
= strsep(&p
, ",")) != NULL
) {
3780 if (envlist_unsetenv(envlist
, token
) != 0) {
3781 usage(EXIT_FAILURE
);
3787 static void handle_arg_argv0(const char *arg
)
3789 argv0
= strdup(arg
);
3792 static void handle_arg_stack_size(const char *arg
)
3795 guest_stack_size
= strtoul(arg
, &p
, 0);
3796 if (guest_stack_size
== 0) {
3797 usage(EXIT_FAILURE
);
3801 guest_stack_size
*= 1024 * 1024;
3802 } else if (*p
== 'k' || *p
== 'K') {
3803 guest_stack_size
*= 1024;
3807 static void handle_arg_ld_prefix(const char *arg
)
3809 interp_prefix
= strdup(arg
);
3812 static void handle_arg_pagesize(const char *arg
)
3814 qemu_host_page_size
= atoi(arg
);
3815 if (qemu_host_page_size
== 0 ||
3816 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3817 fprintf(stderr
, "page size must be a power of two\n");
3822 static void handle_arg_randseed(const char *arg
)
3824 unsigned long long seed
;
3826 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3827 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3833 static void handle_arg_gdb(const char *arg
)
3835 gdbstub_port
= atoi(arg
);
3838 static void handle_arg_uname(const char *arg
)
3840 qemu_uname_release
= strdup(arg
);
3843 static void handle_arg_cpu(const char *arg
)
3845 cpu_model
= strdup(arg
);
3846 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3847 /* XXX: implement xxx_cpu_list for targets that still miss it */
3848 #if defined(cpu_list)
3849 cpu_list(stdout
, &fprintf
);
3855 static void handle_arg_guest_base(const char *arg
)
3857 guest_base
= strtol(arg
, NULL
, 0);
3858 have_guest_base
= 1;
3861 static void handle_arg_reserved_va(const char *arg
)
3865 reserved_va
= strtoul(arg
, &p
, 0);
3879 unsigned long unshifted
= reserved_va
;
3881 reserved_va
<<= shift
;
3882 if (((reserved_va
>> shift
) != unshifted
)
3883 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3884 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3887 fprintf(stderr
, "Reserved virtual address too big\n");
3892 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3897 static void handle_arg_singlestep(const char *arg
)
3902 static void handle_arg_strace(const char *arg
)
3907 static void handle_arg_version(const char *arg
)
3909 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3910 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3914 struct qemu_argument
{
3918 void (*handle_opt
)(const char *arg
);
3919 const char *example
;
3923 static const struct qemu_argument arg_table
[] = {
3924 {"h", "", false, handle_arg_help
,
3925 "", "print this help"},
3926 {"help", "", false, handle_arg_help
,
3928 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3929 "port", "wait gdb connection to 'port'"},
3930 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3931 "path", "set the elf interpreter prefix to 'path'"},
3932 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3933 "size", "set the stack size to 'size' bytes"},
3934 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3935 "model", "select CPU (-cpu help for list)"},
3936 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3937 "var=value", "sets targets environment variable (see below)"},
3938 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3939 "var", "unsets targets environment variable (see below)"},
3940 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3941 "argv0", "forces target process argv[0] to be 'argv0'"},
3942 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3943 "uname", "set qemu uname release string to 'uname'"},
3944 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3945 "address", "set guest_base address to 'address'"},
3946 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3947 "size", "reserve 'size' bytes for guest virtual address space"},
3948 {"d", "QEMU_LOG", true, handle_arg_log
,
3949 "item[,...]", "enable logging of specified items "
3950 "(use '-d help' for a list of items)"},
3951 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3952 "logfile", "write logs to 'logfile' (default stderr)"},
3953 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3954 "pagesize", "set the host page size to 'pagesize'"},
3955 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3956 "", "run in singlestep mode"},
3957 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3958 "", "log system calls"},
3959 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3960 "", "Seed for pseudo-random number generator"},
3961 {"version", "QEMU_VERSION", false, handle_arg_version
,
3962 "", "display version information and exit"},
3963 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3966 static void usage(int exitcode
)
3968 const struct qemu_argument
*arginfo
;
3972 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3973 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3975 "Options and associated environment variables:\n"
3978 /* Calculate column widths. We must always have at least enough space
3979 * for the column header.
3981 maxarglen
= strlen("Argument");
3982 maxenvlen
= strlen("Env-variable");
3984 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3985 int arglen
= strlen(arginfo
->argv
);
3986 if (arginfo
->has_arg
) {
3987 arglen
+= strlen(arginfo
->example
) + 1;
3989 if (strlen(arginfo
->env
) > maxenvlen
) {
3990 maxenvlen
= strlen(arginfo
->env
);
3992 if (arglen
> maxarglen
) {
3997 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3998 maxenvlen
, "Env-variable");
4000 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4001 if (arginfo
->has_arg
) {
4002 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4003 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4004 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4006 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4007 maxenvlen
, arginfo
->env
,
4014 "QEMU_LD_PREFIX = %s\n"
4015 "QEMU_STACK_SIZE = %ld byte\n",
4020 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4021 "QEMU_UNSET_ENV environment variables to set and unset\n"
4022 "environment variables for the target process.\n"
4023 "It is possible to provide several variables by separating them\n"
4024 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4025 "provide the -E and -U options multiple times.\n"
4026 "The following lines are equivalent:\n"
4027 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4028 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4029 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4030 "Note that if you provide several changes to a single variable\n"
4031 "the last change will stay in effect.\n");
4036 static int parse_args(int argc
, char **argv
)
4040 const struct qemu_argument
*arginfo
;
4042 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4043 if (arginfo
->env
== NULL
) {
4047 r
= getenv(arginfo
->env
);
4049 arginfo
->handle_opt(r
);
4055 if (optind
>= argc
) {
4064 if (!strcmp(r
, "-")) {
4067 /* Treat --foo the same as -foo. */
4072 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4073 if (!strcmp(r
, arginfo
->argv
)) {
4074 if (arginfo
->has_arg
) {
4075 if (optind
>= argc
) {
4076 (void) fprintf(stderr
,
4077 "qemu: missing argument for option '%s'\n", r
);
4080 arginfo
->handle_opt(argv
[optind
]);
4083 arginfo
->handle_opt(NULL
);
4089 /* no option matched the current argv */
4090 if (arginfo
->handle_opt
== NULL
) {
4091 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4096 if (optind
>= argc
) {
4097 (void) fprintf(stderr
, "qemu: no user program specified\n");
4101 filename
= argv
[optind
];
4102 exec_path
= argv
[optind
];
4107 int main(int argc
, char **argv
, char **envp
)
4109 struct target_pt_regs regs1
, *regs
= ®s1
;
4110 struct image_info info1
, *info
= &info1
;
4111 struct linux_binprm bprm
;
4116 char **target_environ
, **wrk
;
4123 module_call_init(MODULE_INIT_QOM
);
4125 if ((envlist
= envlist_create()) == NULL
) {
4126 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4130 /* add current environment into the list */
4131 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4132 (void) envlist_setenv(envlist
, *wrk
);
4135 /* Read the stack limit from the kernel. If it's "unlimited",
4136 then we can do little else besides use the default. */
4139 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4140 && lim
.rlim_cur
!= RLIM_INFINITY
4141 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4142 guest_stack_size
= lim
.rlim_cur
;
4147 #if defined(cpudef_setup)
4148 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
4153 optind
= parse_args(argc
, argv
);
4156 memset(regs
, 0, sizeof(struct target_pt_regs
));
4158 /* Zero out image_info */
4159 memset(info
, 0, sizeof(struct image_info
));
4161 memset(&bprm
, 0, sizeof (bprm
));
4163 /* Scan interp_prefix dir for replacement files. */
4164 init_paths(interp_prefix
);
4166 init_qemu_uname_release();
4168 if (cpu_model
== NULL
) {
4169 #if defined(TARGET_I386)
4170 #ifdef TARGET_X86_64
4171 cpu_model
= "qemu64";
4173 cpu_model
= "qemu32";
4175 #elif defined(TARGET_ARM)
4177 #elif defined(TARGET_UNICORE32)
4179 #elif defined(TARGET_M68K)
4181 #elif defined(TARGET_SPARC)
4182 #ifdef TARGET_SPARC64
4183 cpu_model
= "TI UltraSparc II";
4185 cpu_model
= "Fujitsu MB86904";
4187 #elif defined(TARGET_MIPS)
4188 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4193 #elif defined TARGET_OPENRISC
4194 cpu_model
= "or1200";
4195 #elif defined(TARGET_PPC)
4196 # ifdef TARGET_PPC64
4197 cpu_model
= "POWER8";
4201 #elif defined TARGET_SH4
4202 cpu_model
= TYPE_SH7785_CPU
;
4208 /* NOTE: we need to init the CPU at this stage to get
4209 qemu_host_page_size */
4210 cpu
= cpu_init(cpu_model
);
4212 fprintf(stderr
, "Unable to find CPU definition\n");
4220 if (getenv("QEMU_STRACE")) {
4224 if (getenv("QEMU_RAND_SEED")) {
4225 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4228 target_environ
= envlist_to_environ(envlist
, NULL
);
4229 envlist_free(envlist
);
4232 * Now that page sizes are configured in cpu_init() we can do
4233 * proper page alignment for guest_base.
4235 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4237 if (reserved_va
|| have_guest_base
) {
4238 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4240 if (guest_base
== (unsigned long)-1) {
4241 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4242 "space for use as guest address space (check your virtual "
4243 "memory ulimit setting or reserve less using -R option)\n",
4249 mmap_next_start
= reserved_va
;
4254 * Read in mmap_min_addr kernel parameter. This value is used
4255 * When loading the ELF image to determine whether guest_base
4256 * is needed. It is also used in mmap_find_vma.
4261 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4263 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4264 mmap_min_addr
= tmp
;
4265 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4272 * Prepare copy of argv vector for target.
4274 target_argc
= argc
- optind
;
4275 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4276 if (target_argv
== NULL
) {
4277 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4282 * If argv0 is specified (using '-0' switch) we replace
4283 * argv[0] pointer with the given one.
4286 if (argv0
!= NULL
) {
4287 target_argv
[i
++] = strdup(argv0
);
4289 for (; i
< target_argc
; i
++) {
4290 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4292 target_argv
[target_argc
] = NULL
;
4294 ts
= g_new0(TaskState
, 1);
4295 init_task_state(ts
);
4296 /* build Task State */
4302 execfd
= qemu_getauxval(AT_EXECFD
);
4304 execfd
= open(filename
, O_RDONLY
);
4306 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4307 _exit(EXIT_FAILURE
);
4311 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4314 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4315 _exit(EXIT_FAILURE
);
4318 for (wrk
= target_environ
; *wrk
; wrk
++) {
4322 free(target_environ
);
4324 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4325 qemu_log("guest_base 0x%lx\n", guest_base
);
4328 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4329 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4330 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4332 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4334 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4335 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4337 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4338 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4341 target_set_brk(info
->brk
);
4345 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4346 generating the prologue until now so that the prologue can take
4347 the real value of GUEST_BASE into account. */
4348 tcg_prologue_init(&tcg_ctx
);
4350 #if defined(TARGET_I386)
4351 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4352 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4353 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4354 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4355 env
->hflags
|= HF_OSFXSR_MASK
;
4357 #ifndef TARGET_ABI32
4358 /* enable 64 bit mode if possible */
4359 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4360 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4363 env
->cr
[4] |= CR4_PAE_MASK
;
4364 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4365 env
->hflags
|= HF_LMA_MASK
;
4368 /* flags setup : we activate the IRQs by default as in user mode */
4369 env
->eflags
|= IF_MASK
;
4371 /* linux register setup */
4372 #ifndef TARGET_ABI32
4373 env
->regs
[R_EAX
] = regs
->rax
;
4374 env
->regs
[R_EBX
] = regs
->rbx
;
4375 env
->regs
[R_ECX
] = regs
->rcx
;
4376 env
->regs
[R_EDX
] = regs
->rdx
;
4377 env
->regs
[R_ESI
] = regs
->rsi
;
4378 env
->regs
[R_EDI
] = regs
->rdi
;
4379 env
->regs
[R_EBP
] = regs
->rbp
;
4380 env
->regs
[R_ESP
] = regs
->rsp
;
4381 env
->eip
= regs
->rip
;
4383 env
->regs
[R_EAX
] = regs
->eax
;
4384 env
->regs
[R_EBX
] = regs
->ebx
;
4385 env
->regs
[R_ECX
] = regs
->ecx
;
4386 env
->regs
[R_EDX
] = regs
->edx
;
4387 env
->regs
[R_ESI
] = regs
->esi
;
4388 env
->regs
[R_EDI
] = regs
->edi
;
4389 env
->regs
[R_EBP
] = regs
->ebp
;
4390 env
->regs
[R_ESP
] = regs
->esp
;
4391 env
->eip
= regs
->eip
;
4394 /* linux interrupt setup */
4395 #ifndef TARGET_ABI32
4396 env
->idt
.limit
= 511;
4398 env
->idt
.limit
= 255;
4400 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4401 PROT_READ
|PROT_WRITE
,
4402 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4403 idt_table
= g2h(env
->idt
.base
);
4426 /* linux segment setup */
4428 uint64_t *gdt_table
;
4429 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4430 PROT_READ
|PROT_WRITE
,
4431 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4432 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4433 gdt_table
= g2h(env
->gdt
.base
);
4435 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4436 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4437 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4439 /* 64 bit code segment */
4440 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4441 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4443 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4445 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4446 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4447 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4449 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4450 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4452 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4453 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4454 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4455 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4456 /* This hack makes Wine work... */
4457 env
->segs
[R_FS
].selector
= 0;
4459 cpu_x86_load_seg(env
, R_DS
, 0);
4460 cpu_x86_load_seg(env
, R_ES
, 0);
4461 cpu_x86_load_seg(env
, R_FS
, 0);
4462 cpu_x86_load_seg(env
, R_GS
, 0);
4464 #elif defined(TARGET_AARCH64)
4468 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4470 "The selected ARM CPU does not support 64 bit mode\n");
4474 for (i
= 0; i
< 31; i
++) {
4475 env
->xregs
[i
] = regs
->regs
[i
];
4478 env
->xregs
[31] = regs
->sp
;
4480 #elif defined(TARGET_ARM)
4483 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4485 for(i
= 0; i
< 16; i
++) {
4486 env
->regs
[i
] = regs
->uregs
[i
];
4488 #ifdef TARGET_WORDS_BIGENDIAN
4490 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4491 && (info
->elf_flags
& EF_ARM_BE8
)) {
4492 env
->uncached_cpsr
|= CPSR_E
;
4493 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4495 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4499 #elif defined(TARGET_UNICORE32)
4502 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4503 for (i
= 0; i
< 32; i
++) {
4504 env
->regs
[i
] = regs
->uregs
[i
];
4507 #elif defined(TARGET_SPARC)
4511 env
->npc
= regs
->npc
;
4513 for(i
= 0; i
< 8; i
++)
4514 env
->gregs
[i
] = regs
->u_regs
[i
];
4515 for(i
= 0; i
< 8; i
++)
4516 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4518 #elif defined(TARGET_PPC)
4522 #if defined(TARGET_PPC64)
4523 #if defined(TARGET_ABI32)
4524 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4526 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4529 env
->nip
= regs
->nip
;
4530 for(i
= 0; i
< 32; i
++) {
4531 env
->gpr
[i
] = regs
->gpr
[i
];
4534 #elif defined(TARGET_M68K)
4537 env
->dregs
[0] = regs
->d0
;
4538 env
->dregs
[1] = regs
->d1
;
4539 env
->dregs
[2] = regs
->d2
;
4540 env
->dregs
[3] = regs
->d3
;
4541 env
->dregs
[4] = regs
->d4
;
4542 env
->dregs
[5] = regs
->d5
;
4543 env
->dregs
[6] = regs
->d6
;
4544 env
->dregs
[7] = regs
->d7
;
4545 env
->aregs
[0] = regs
->a0
;
4546 env
->aregs
[1] = regs
->a1
;
4547 env
->aregs
[2] = regs
->a2
;
4548 env
->aregs
[3] = regs
->a3
;
4549 env
->aregs
[4] = regs
->a4
;
4550 env
->aregs
[5] = regs
->a5
;
4551 env
->aregs
[6] = regs
->a6
;
4552 env
->aregs
[7] = regs
->usp
;
4554 ts
->sim_syscalls
= 1;
4556 #elif defined(TARGET_MICROBLAZE)
4558 env
->regs
[0] = regs
->r0
;
4559 env
->regs
[1] = regs
->r1
;
4560 env
->regs
[2] = regs
->r2
;
4561 env
->regs
[3] = regs
->r3
;
4562 env
->regs
[4] = regs
->r4
;
4563 env
->regs
[5] = regs
->r5
;
4564 env
->regs
[6] = regs
->r6
;
4565 env
->regs
[7] = regs
->r7
;
4566 env
->regs
[8] = regs
->r8
;
4567 env
->regs
[9] = regs
->r9
;
4568 env
->regs
[10] = regs
->r10
;
4569 env
->regs
[11] = regs
->r11
;
4570 env
->regs
[12] = regs
->r12
;
4571 env
->regs
[13] = regs
->r13
;
4572 env
->regs
[14] = regs
->r14
;
4573 env
->regs
[15] = regs
->r15
;
4574 env
->regs
[16] = regs
->r16
;
4575 env
->regs
[17] = regs
->r17
;
4576 env
->regs
[18] = regs
->r18
;
4577 env
->regs
[19] = regs
->r19
;
4578 env
->regs
[20] = regs
->r20
;
4579 env
->regs
[21] = regs
->r21
;
4580 env
->regs
[22] = regs
->r22
;
4581 env
->regs
[23] = regs
->r23
;
4582 env
->regs
[24] = regs
->r24
;
4583 env
->regs
[25] = regs
->r25
;
4584 env
->regs
[26] = regs
->r26
;
4585 env
->regs
[27] = regs
->r27
;
4586 env
->regs
[28] = regs
->r28
;
4587 env
->regs
[29] = regs
->r29
;
4588 env
->regs
[30] = regs
->r30
;
4589 env
->regs
[31] = regs
->r31
;
4590 env
->sregs
[SR_PC
] = regs
->pc
;
4592 #elif defined(TARGET_MIPS)
4596 for(i
= 0; i
< 32; i
++) {
4597 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4599 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4600 if (regs
->cp0_epc
& 1) {
4601 env
->hflags
|= MIPS_HFLAG_M16
;
4604 #elif defined(TARGET_OPENRISC)
4608 for (i
= 0; i
< 32; i
++) {
4609 env
->gpr
[i
] = regs
->gpr
[i
];
4615 #elif defined(TARGET_SH4)
4619 for(i
= 0; i
< 16; i
++) {
4620 env
->gregs
[i
] = regs
->regs
[i
];
4624 #elif defined(TARGET_ALPHA)
4628 for(i
= 0; i
< 28; i
++) {
4629 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4631 env
->ir
[IR_SP
] = regs
->usp
;
4634 #elif defined(TARGET_CRIS)
4636 env
->regs
[0] = regs
->r0
;
4637 env
->regs
[1] = regs
->r1
;
4638 env
->regs
[2] = regs
->r2
;
4639 env
->regs
[3] = regs
->r3
;
4640 env
->regs
[4] = regs
->r4
;
4641 env
->regs
[5] = regs
->r5
;
4642 env
->regs
[6] = regs
->r6
;
4643 env
->regs
[7] = regs
->r7
;
4644 env
->regs
[8] = regs
->r8
;
4645 env
->regs
[9] = regs
->r9
;
4646 env
->regs
[10] = regs
->r10
;
4647 env
->regs
[11] = regs
->r11
;
4648 env
->regs
[12] = regs
->r12
;
4649 env
->regs
[13] = regs
->r13
;
4650 env
->regs
[14] = info
->start_stack
;
4651 env
->regs
[15] = regs
->acr
;
4652 env
->pc
= regs
->erp
;
4654 #elif defined(TARGET_S390X)
4657 for (i
= 0; i
< 16; i
++) {
4658 env
->regs
[i
] = regs
->gprs
[i
];
4660 env
->psw
.mask
= regs
->psw
.mask
;
4661 env
->psw
.addr
= regs
->psw
.addr
;
4663 #elif defined(TARGET_TILEGX)
4666 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4667 env
->regs
[i
] = regs
->regs
[i
];
4669 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4675 #error unsupported target CPU
4678 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4679 ts
->stack_base
= info
->start_stack
;
4680 ts
->heap_base
= info
->brk
;
4681 /* This will be filled in on the first SYS_HEAPINFO call. */
4686 if (gdbserver_start(gdbstub_port
) < 0) {
4687 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4691 gdb_handlesig(cpu
, 0);