4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu/path.h"
26 #include "qemu/cutils.h"
27 #include "qemu/help_option.h"
29 #include "exec/exec-all.h"
31 #include "qemu/timer.h"
32 #include "qemu/envlist.h"
39 static const char *filename
;
40 static const char *argv0
;
41 static int gdbstub_port
;
42 static envlist_t
*envlist
;
43 static const char *cpu_model
;
44 unsigned long mmap_min_addr
;
45 unsigned long guest_base
;
48 #define EXCP_DUMP(env, fmt, ...) \
50 CPUState *cs = ENV_GET_CPU(env); \
51 fprintf(stderr, fmt , ## __VA_ARGS__); \
52 cpu_dump_state(cs, stderr, fprintf, 0); \
53 if (qemu_log_separate()) { \
54 qemu_log(fmt, ## __VA_ARGS__); \
55 log_cpu_state(cs, 0); \
59 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
61 * When running 32-on-64 we should make sure we can fit all of the possible
62 * guest address space into a contiguous chunk of virtual host memory.
64 * This way we will never overlap with our own libraries or binaries or stack
65 * or anything else that QEMU maps.
68 /* MIPS only supports 31 bits of virtual address space for user space */
69 unsigned long reserved_va
= 0x77000000;
71 unsigned long reserved_va
= 0xf7000000;
74 unsigned long reserved_va
;
77 static void usage(int exitcode
);
79 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
80 const char *qemu_uname_release
;
82 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
83 we allocate a bigger stack. Need a better solution, for example
84 by remapping the process stack directly at the right place */
85 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
87 void gemu_log(const char *fmt
, ...)
92 vfprintf(stderr
, fmt
, ap
);
96 #if defined(TARGET_I386)
97 int cpu_get_pic_interrupt(CPUX86State
*env
)
103 /***********************************************************/
104 /* Helper routines for implementing atomic operations. */
106 /* To implement exclusive operations we force all cpus to syncronise.
107 We don't require a full sync, only that no cpus are executing guest code.
108 The alternative is to map target atomic ops onto host equivalents,
109 which requires quite a lot of per host/target work. */
110 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
111 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
112 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
113 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
114 static int pending_cpus
;
116 /* Make sure everything is in a consistent state for calling fork(). */
117 void fork_start(void)
119 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
120 pthread_mutex_lock(&exclusive_lock
);
124 void fork_end(int child
)
126 mmap_fork_end(child
);
128 CPUState
*cpu
, *next_cpu
;
129 /* Child processes created by fork() only have a single thread.
130 Discard information about the parent threads. */
131 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
132 if (cpu
!= thread_cpu
) {
133 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
137 pthread_mutex_init(&exclusive_lock
, NULL
);
138 pthread_mutex_init(&cpu_list_mutex
, NULL
);
139 pthread_cond_init(&exclusive_cond
, NULL
);
140 pthread_cond_init(&exclusive_resume
, NULL
);
141 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
142 gdbserver_fork(thread_cpu
);
144 pthread_mutex_unlock(&exclusive_lock
);
145 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
149 /* Wait for pending exclusive operations to complete. The exclusive lock
151 static inline void exclusive_idle(void)
153 while (pending_cpus
) {
154 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
158 /* Start an exclusive operation.
159 Must only be called from outside cpu_arm_exec. */
160 static inline void start_exclusive(void)
164 pthread_mutex_lock(&exclusive_lock
);
168 /* Make all other cpus stop executing. */
169 CPU_FOREACH(other_cpu
) {
170 if (other_cpu
->running
) {
175 if (pending_cpus
> 1) {
176 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
180 /* Finish an exclusive operation. */
181 static inline void __attribute__((unused
)) end_exclusive(void)
184 pthread_cond_broadcast(&exclusive_resume
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Wait for exclusive ops to finish, and begin cpu execution. */
189 static inline void cpu_exec_start(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
194 pthread_mutex_unlock(&exclusive_lock
);
197 /* Mark cpu as not executing, and release pending exclusive ops. */
198 static inline void cpu_exec_end(CPUState
*cpu
)
200 pthread_mutex_lock(&exclusive_lock
);
201 cpu
->running
= false;
202 if (pending_cpus
> 1) {
204 if (pending_cpus
== 1) {
205 pthread_cond_signal(&exclusive_cond
);
209 pthread_mutex_unlock(&exclusive_lock
);
212 void cpu_list_lock(void)
214 pthread_mutex_lock(&cpu_list_mutex
);
217 void cpu_list_unlock(void)
219 pthread_mutex_unlock(&cpu_list_mutex
);
224 /***********************************************************/
225 /* CPUX86 core interface */
227 uint64_t cpu_get_tsc(CPUX86State
*env
)
229 return cpu_get_host_ticks();
232 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
237 e1
= (addr
<< 16) | (limit
& 0xffff);
238 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
245 static uint64_t *idt_table
;
247 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
248 uint64_t addr
, unsigned int sel
)
251 e1
= (addr
& 0xffff) | (sel
<< 16);
252 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
256 p
[2] = tswap32(addr
>> 32);
259 /* only dpl matters as we do only user space emulation */
260 static void set_idt(int n
, unsigned int dpl
)
262 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
265 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
266 uint32_t addr
, unsigned int sel
)
269 e1
= (addr
& 0xffff) | (sel
<< 16);
270 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
276 /* only dpl matters as we do only user space emulation */
277 static void set_idt(int n
, unsigned int dpl
)
279 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
283 void cpu_loop(CPUX86State
*env
)
285 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
289 target_siginfo_t info
;
293 trapnr
= cpu_x86_exec(cs
);
297 /* linux syscall from int $0x80 */
298 ret
= do_syscall(env
,
307 if (ret
== -TARGET_ERESTARTSYS
) {
309 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
310 env
->regs
[R_EAX
] = ret
;
315 /* linux syscall from syscall instruction */
316 ret
= do_syscall(env
,
325 if (ret
== -TARGET_ERESTARTSYS
) {
327 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
328 env
->regs
[R_EAX
] = ret
;
334 info
.si_signo
= TARGET_SIGBUS
;
336 info
.si_code
= TARGET_SI_KERNEL
;
337 info
._sifields
._sigfault
._addr
= 0;
338 queue_signal(env
, info
.si_signo
, &info
);
341 /* XXX: potential problem if ABI32 */
342 #ifndef TARGET_X86_64
343 if (env
->eflags
& VM_MASK
) {
344 handle_vm86_fault(env
);
348 info
.si_signo
= TARGET_SIGSEGV
;
350 info
.si_code
= TARGET_SI_KERNEL
;
351 info
._sifields
._sigfault
._addr
= 0;
352 queue_signal(env
, info
.si_signo
, &info
);
356 info
.si_signo
= TARGET_SIGSEGV
;
358 if (!(env
->error_code
& 1))
359 info
.si_code
= TARGET_SEGV_MAPERR
;
361 info
.si_code
= TARGET_SEGV_ACCERR
;
362 info
._sifields
._sigfault
._addr
= env
->cr
[2];
363 queue_signal(env
, info
.si_signo
, &info
);
366 #ifndef TARGET_X86_64
367 if (env
->eflags
& VM_MASK
) {
368 handle_vm86_trap(env
, trapnr
);
372 /* division by zero */
373 info
.si_signo
= TARGET_SIGFPE
;
375 info
.si_code
= TARGET_FPE_INTDIV
;
376 info
._sifields
._sigfault
._addr
= env
->eip
;
377 queue_signal(env
, info
.si_signo
, &info
);
382 #ifndef TARGET_X86_64
383 if (env
->eflags
& VM_MASK
) {
384 handle_vm86_trap(env
, trapnr
);
388 info
.si_signo
= TARGET_SIGTRAP
;
390 if (trapnr
== EXCP01_DB
) {
391 info
.si_code
= TARGET_TRAP_BRKPT
;
392 info
._sifields
._sigfault
._addr
= env
->eip
;
394 info
.si_code
= TARGET_SI_KERNEL
;
395 info
._sifields
._sigfault
._addr
= 0;
397 queue_signal(env
, info
.si_signo
, &info
);
402 #ifndef TARGET_X86_64
403 if (env
->eflags
& VM_MASK
) {
404 handle_vm86_trap(env
, trapnr
);
408 info
.si_signo
= TARGET_SIGSEGV
;
410 info
.si_code
= TARGET_SI_KERNEL
;
411 info
._sifields
._sigfault
._addr
= 0;
412 queue_signal(env
, info
.si_signo
, &info
);
416 info
.si_signo
= TARGET_SIGILL
;
418 info
.si_code
= TARGET_ILL_ILLOPN
;
419 info
._sifields
._sigfault
._addr
= env
->eip
;
420 queue_signal(env
, info
.si_signo
, &info
);
423 /* just indicate that signals should be handled asap */
429 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
434 info
.si_code
= TARGET_TRAP_BRKPT
;
435 queue_signal(env
, info
.si_signo
, &info
);
440 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
441 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
445 process_pending_signals(env
);
452 #define get_user_code_u32(x, gaddr, env) \
453 ({ abi_long __r = get_user_u32((x), (gaddr)); \
454 if (!__r && bswap_code(arm_sctlr_b(env))) { \
460 #define get_user_code_u16(x, gaddr, env) \
461 ({ abi_long __r = get_user_u16((x), (gaddr)); \
462 if (!__r && bswap_code(arm_sctlr_b(env))) { \
468 #define get_user_data_u32(x, gaddr, env) \
469 ({ abi_long __r = get_user_u32((x), (gaddr)); \
470 if (!__r && arm_cpu_bswap_data(env)) { \
476 #define get_user_data_u16(x, gaddr, env) \
477 ({ abi_long __r = get_user_u16((x), (gaddr)); \
478 if (!__r && arm_cpu_bswap_data(env)) { \
484 #define put_user_data_u32(x, gaddr, env) \
485 ({ typeof(x) __x = (x); \
486 if (arm_cpu_bswap_data(env)) { \
487 __x = bswap32(__x); \
489 put_user_u32(__x, (gaddr)); \
492 #define put_user_data_u16(x, gaddr, env) \
493 ({ typeof(x) __x = (x); \
494 if (arm_cpu_bswap_data(env)) { \
495 __x = bswap16(__x); \
497 put_user_u16(__x, (gaddr)); \
501 /* Commpage handling -- there is no commpage for AArch64 */
504 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
506 * r0 = pointer to oldval
507 * r1 = pointer to newval
508 * r2 = pointer to target value
511 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
512 * C set if *ptr was changed, clear if no exchange happened
514 * Note segv's in kernel helpers are a bit tricky, we can set the
515 * data address sensibly but the PC address is just the entry point.
517 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
519 uint64_t oldval
, newval
, val
;
521 target_siginfo_t info
;
523 /* Based on the 32 bit code in do_kernel_trap */
525 /* XXX: This only works between threads, not between processes.
526 It's probably possible to implement this with native host
527 operations. However things like ldrex/strex are much harder so
528 there's not much point trying. */
530 cpsr
= cpsr_read(env
);
533 if (get_user_u64(oldval
, env
->regs
[0])) {
534 env
->exception
.vaddress
= env
->regs
[0];
538 if (get_user_u64(newval
, env
->regs
[1])) {
539 env
->exception
.vaddress
= env
->regs
[1];
543 if (get_user_u64(val
, addr
)) {
544 env
->exception
.vaddress
= addr
;
551 if (put_user_u64(val
, addr
)) {
552 env
->exception
.vaddress
= addr
;
562 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
568 /* We get the PC of the entry address - which is as good as anything,
569 on a real kernel what you get depends on which mode it uses. */
570 info
.si_signo
= TARGET_SIGSEGV
;
572 /* XXX: check env->error_code */
573 info
.si_code
= TARGET_SEGV_MAPERR
;
574 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
575 queue_signal(env
, info
.si_signo
, &info
);
578 /* Handle a jump to the kernel code page. */
580 do_kernel_trap(CPUARMState
*env
)
586 switch (env
->regs
[15]) {
587 case 0xffff0fa0: /* __kernel_memory_barrier */
588 /* ??? No-op. Will need to do better for SMP. */
590 case 0xffff0fc0: /* __kernel_cmpxchg */
591 /* XXX: This only works between threads, not between processes.
592 It's probably possible to implement this with native host
593 operations. However things like ldrex/strex are much harder so
594 there's not much point trying. */
596 cpsr
= cpsr_read(env
);
598 /* FIXME: This should SEGV if the access fails. */
599 if (get_user_u32(val
, addr
))
601 if (val
== env
->regs
[0]) {
603 /* FIXME: Check for segfaults. */
604 put_user_u32(val
, addr
);
611 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
614 case 0xffff0fe0: /* __kernel_get_tls */
615 env
->regs
[0] = cpu_get_tls(env
);
617 case 0xffff0f60: /* __kernel_cmpxchg64 */
618 arm_kernel_cmpxchg64_helper(env
);
624 /* Jump back to the caller. */
625 addr
= env
->regs
[14];
630 env
->regs
[15] = addr
;
635 /* Store exclusive handling for AArch32 */
636 static int do_strex(CPUARMState
*env
)
644 if (env
->exclusive_addr
!= env
->exclusive_test
) {
647 /* We know we're always AArch32 so the address is in uint32_t range
648 * unless it was the -1 exclusive-monitor-lost value (which won't
649 * match exclusive_test above).
651 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
652 addr
= env
->exclusive_addr
;
653 size
= env
->exclusive_info
& 0xf;
656 segv
= get_user_u8(val
, addr
);
659 segv
= get_user_data_u16(val
, addr
, env
);
663 segv
= get_user_data_u32(val
, addr
, env
);
669 env
->exception
.vaddress
= addr
;
674 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
676 env
->exception
.vaddress
= addr
+ 4;
679 if (arm_cpu_bswap_data(env
)) {
680 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
682 val
= deposit64(val
, 32, 32, valhi
);
685 if (val
!= env
->exclusive_val
) {
689 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
692 segv
= put_user_u8(val
, addr
);
695 segv
= put_user_data_u16(val
, addr
, env
);
699 segv
= put_user_data_u32(val
, addr
, env
);
703 env
->exception
.vaddress
= addr
;
707 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
708 segv
= put_user_data_u32(val
, addr
+ 4, env
);
710 env
->exception
.vaddress
= addr
+ 4;
717 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
723 void cpu_loop(CPUARMState
*env
)
725 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
727 unsigned int n
, insn
;
728 target_siginfo_t info
;
734 trapnr
= cpu_arm_exec(cs
);
739 TaskState
*ts
= cs
->opaque
;
743 /* we handle the FPU emulation here, as Linux */
744 /* we get the opcode */
745 /* FIXME - what to do if get_user() fails? */
746 get_user_code_u32(opcode
, env
->regs
[15], env
);
748 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
749 if (rc
== 0) { /* illegal instruction */
750 info
.si_signo
= TARGET_SIGILL
;
752 info
.si_code
= TARGET_ILL_ILLOPN
;
753 info
._sifields
._sigfault
._addr
= env
->regs
[15];
754 queue_signal(env
, info
.si_signo
, &info
);
755 } else if (rc
< 0) { /* FP exception */
758 /* translate softfloat flags to FPSR flags */
759 if (-rc
& float_flag_invalid
)
761 if (-rc
& float_flag_divbyzero
)
763 if (-rc
& float_flag_overflow
)
765 if (-rc
& float_flag_underflow
)
767 if (-rc
& float_flag_inexact
)
770 FPSR fpsr
= ts
->fpa
.fpsr
;
771 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
773 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
774 info
.si_signo
= TARGET_SIGFPE
;
777 /* ordered by priority, least first */
778 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
779 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
780 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
781 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
782 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
784 info
._sifields
._sigfault
._addr
= env
->regs
[15];
785 queue_signal(env
, info
.si_signo
, &info
);
790 /* accumulate unenabled exceptions */
791 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
793 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
795 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
797 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
799 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
802 } else { /* everything OK */
813 if (trapnr
== EXCP_BKPT
) {
815 /* FIXME - what to do if get_user() fails? */
816 get_user_code_u16(insn
, env
->regs
[15], env
);
820 /* FIXME - what to do if get_user() fails? */
821 get_user_code_u32(insn
, env
->regs
[15], env
);
822 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
827 /* FIXME - what to do if get_user() fails? */
828 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
831 /* FIXME - what to do if get_user() fails? */
832 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
837 if (n
== ARM_NR_cacheflush
) {
839 } else if (n
== ARM_NR_semihosting
840 || n
== ARM_NR_thumb_semihosting
) {
841 env
->regs
[0] = do_arm_semihosting (env
);
842 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
844 if (env
->thumb
|| n
== 0) {
847 n
-= ARM_SYSCALL_BASE
;
850 if ( n
> ARM_NR_BASE
) {
852 case ARM_NR_cacheflush
:
856 cpu_set_tls(env
, env
->regs
[0]);
859 case ARM_NR_breakpoint
:
860 env
->regs
[15] -= env
->thumb
? 2 : 4;
863 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
865 env
->regs
[0] = -TARGET_ENOSYS
;
869 ret
= do_syscall(env
,
878 if (ret
== -TARGET_ERESTARTSYS
) {
879 env
->regs
[15] -= env
->thumb
? 2 : 4;
880 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
890 /* just indicate that signals should be handled asap */
893 if (!do_strex(env
)) {
896 /* fall through for segv */
897 case EXCP_PREFETCH_ABORT
:
898 case EXCP_DATA_ABORT
:
899 addr
= env
->exception
.vaddress
;
901 info
.si_signo
= TARGET_SIGSEGV
;
903 /* XXX: check env->error_code */
904 info
.si_code
= TARGET_SEGV_MAPERR
;
905 info
._sifields
._sigfault
._addr
= addr
;
906 queue_signal(env
, info
.si_signo
, &info
);
914 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
919 info
.si_code
= TARGET_TRAP_BRKPT
;
920 queue_signal(env
, info
.si_signo
, &info
);
924 case EXCP_KERNEL_TRAP
:
925 if (do_kernel_trap(env
))
929 /* nothing to do here for user-mode, just resume guest code */
933 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
936 process_pending_signals(env
);
943 * Handle AArch64 store-release exclusive
945 * rs = gets the status result of store exclusive
946 * rt = is the register that is stored
947 * rt2 = is the second register store (in STP)
950 static int do_strex_a64(CPUARMState
*env
)
961 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
962 size
= extract32(env
->exclusive_info
, 0, 2);
963 is_pair
= extract32(env
->exclusive_info
, 2, 1);
964 rs
= extract32(env
->exclusive_info
, 4, 5);
965 rt
= extract32(env
->exclusive_info
, 9, 5);
966 rt2
= extract32(env
->exclusive_info
, 14, 5);
968 addr
= env
->exclusive_addr
;
970 if (addr
!= env
->exclusive_test
) {
976 segv
= get_user_u8(val
, addr
);
979 segv
= get_user_u16(val
, addr
);
982 segv
= get_user_u32(val
, addr
);
985 segv
= get_user_u64(val
, addr
);
991 env
->exception
.vaddress
= addr
;
994 if (val
!= env
->exclusive_val
) {
999 segv
= get_user_u32(val
, addr
+ 4);
1001 segv
= get_user_u64(val
, addr
+ 8);
1004 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1007 if (val
!= env
->exclusive_high
) {
1011 /* handle the zero register */
1012 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
1015 segv
= put_user_u8(val
, addr
);
1018 segv
= put_user_u16(val
, addr
);
1021 segv
= put_user_u32(val
, addr
);
1024 segv
= put_user_u64(val
, addr
);
1031 /* handle the zero register */
1032 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1034 segv
= put_user_u32(val
, addr
+ 4);
1036 segv
= put_user_u64(val
, addr
+ 8);
1039 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1046 /* rs == 31 encodes a write to the ZR, thus throwing away
1047 * the status return. This is rather silly but valid.
1050 env
->xregs
[rs
] = rc
;
1053 /* instruction faulted, PC does not advance */
1054 /* either way a strex releases any exclusive lock we have */
1055 env
->exclusive_addr
= -1;
1060 /* AArch64 main loop */
1061 void cpu_loop(CPUARMState
*env
)
1063 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1066 target_siginfo_t info
;
1070 trapnr
= cpu_arm_exec(cs
);
1075 ret
= do_syscall(env
,
1084 if (ret
== -TARGET_ERESTARTSYS
) {
1086 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1087 env
->xregs
[0] = ret
;
1090 case EXCP_INTERRUPT
:
1091 /* just indicate that signals should be handled asap */
1094 info
.si_signo
= TARGET_SIGILL
;
1096 info
.si_code
= TARGET_ILL_ILLOPN
;
1097 info
._sifields
._sigfault
._addr
= env
->pc
;
1098 queue_signal(env
, info
.si_signo
, &info
);
1101 if (!do_strex_a64(env
)) {
1104 /* fall through for segv */
1105 case EXCP_PREFETCH_ABORT
:
1106 case EXCP_DATA_ABORT
:
1107 info
.si_signo
= TARGET_SIGSEGV
;
1109 /* XXX: check env->error_code */
1110 info
.si_code
= TARGET_SEGV_MAPERR
;
1111 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1112 queue_signal(env
, info
.si_signo
, &info
);
1116 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1118 info
.si_signo
= sig
;
1120 info
.si_code
= TARGET_TRAP_BRKPT
;
1121 queue_signal(env
, info
.si_signo
, &info
);
1125 env
->xregs
[0] = do_arm_semihosting(env
);
1128 /* nothing to do here for user-mode, just resume guest code */
1131 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1134 process_pending_signals(env
);
1135 /* Exception return on AArch64 always clears the exclusive monitor,
1136 * so any return to running guest code implies this.
1137 * A strex (successful or otherwise) also clears the monitor, so
1138 * we don't need to specialcase EXCP_STREX.
1140 env
->exclusive_addr
= -1;
1143 #endif /* ndef TARGET_ABI32 */
1147 #ifdef TARGET_UNICORE32
1149 void cpu_loop(CPUUniCore32State
*env
)
1151 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1153 unsigned int n
, insn
;
1154 target_siginfo_t info
;
1158 trapnr
= uc32_cpu_exec(cs
);
1161 case UC32_EXCP_PRIV
:
1164 get_user_u32(insn
, env
->regs
[31] - 4);
1165 n
= insn
& 0xffffff;
1167 if (n
>= UC32_SYSCALL_BASE
) {
1169 n
-= UC32_SYSCALL_BASE
;
1170 if (n
== UC32_SYSCALL_NR_set_tls
) {
1171 cpu_set_tls(env
, env
->regs
[0]);
1174 env
->regs
[0] = do_syscall(env
,
1189 case UC32_EXCP_DTRAP
:
1190 case UC32_EXCP_ITRAP
:
1191 info
.si_signo
= TARGET_SIGSEGV
;
1193 /* XXX: check env->error_code */
1194 info
.si_code
= TARGET_SEGV_MAPERR
;
1195 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1196 queue_signal(env
, info
.si_signo
, &info
);
1198 case EXCP_INTERRUPT
:
1199 /* just indicate that signals should be handled asap */
1205 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1207 info
.si_signo
= sig
;
1209 info
.si_code
= TARGET_TRAP_BRKPT
;
1210 queue_signal(env
, info
.si_signo
, &info
);
1217 process_pending_signals(env
);
1221 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1227 #define SPARC64_STACK_BIAS 2047
1231 /* WARNING: dealing with register windows _is_ complicated. More info
1232 can be found at http://www.sics.se/~psm/sparcstack.html */
1233 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1235 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1236 /* wrap handling : if cwp is on the last window, then we use the
1237 registers 'after' the end */
1238 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1239 index
+= 16 * env
->nwindows
;
1243 /* save the register window 'cwp1' */
1244 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1249 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1250 #ifdef TARGET_SPARC64
1252 sp_ptr
+= SPARC64_STACK_BIAS
;
1254 #if defined(DEBUG_WIN)
1255 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1258 for(i
= 0; i
< 16; i
++) {
1259 /* FIXME - what to do if put_user() fails? */
1260 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1261 sp_ptr
+= sizeof(abi_ulong
);
1265 static void save_window(CPUSPARCState
*env
)
1267 #ifndef TARGET_SPARC64
1268 unsigned int new_wim
;
1269 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1270 ((1LL << env
->nwindows
) - 1);
1271 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1274 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1280 static void restore_window(CPUSPARCState
*env
)
1282 #ifndef TARGET_SPARC64
1283 unsigned int new_wim
;
1285 unsigned int i
, cwp1
;
1288 #ifndef TARGET_SPARC64
1289 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1290 ((1LL << env
->nwindows
) - 1);
1293 /* restore the invalid window */
1294 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1295 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1296 #ifdef TARGET_SPARC64
1298 sp_ptr
+= SPARC64_STACK_BIAS
;
1300 #if defined(DEBUG_WIN)
1301 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1304 for(i
= 0; i
< 16; i
++) {
1305 /* FIXME - what to do if get_user() fails? */
1306 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1307 sp_ptr
+= sizeof(abi_ulong
);
1309 #ifdef TARGET_SPARC64
1311 if (env
->cleanwin
< env
->nwindows
- 1)
1319 static void flush_windows(CPUSPARCState
*env
)
1325 /* if restore would invoke restore_window(), then we can stop */
1326 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1327 #ifndef TARGET_SPARC64
1328 if (env
->wim
& (1 << cwp1
))
1331 if (env
->canrestore
== 0)
1336 save_window_offset(env
, cwp1
);
1339 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1340 #ifndef TARGET_SPARC64
1341 /* set wim so that restore will reload the registers */
1342 env
->wim
= 1 << cwp1
;
1344 #if defined(DEBUG_WIN)
1345 printf("flush_windows: nb=%d\n", offset
- 1);
1349 void cpu_loop (CPUSPARCState
*env
)
1351 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1354 target_siginfo_t info
;
1358 trapnr
= cpu_sparc_exec(cs
);
1361 /* Compute PSR before exposing state. */
1362 if (env
->cc_op
!= CC_OP_FLAGS
) {
1367 #ifndef TARGET_SPARC64
1374 ret
= do_syscall (env
, env
->gregs
[1],
1375 env
->regwptr
[0], env
->regwptr
[1],
1376 env
->regwptr
[2], env
->regwptr
[3],
1377 env
->regwptr
[4], env
->regwptr
[5],
1379 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1380 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1381 env
->xcc
|= PSR_CARRY
;
1383 env
->psr
|= PSR_CARRY
;
1387 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1388 env
->xcc
&= ~PSR_CARRY
;
1390 env
->psr
&= ~PSR_CARRY
;
1393 env
->regwptr
[0] = ret
;
1394 /* next instruction */
1396 env
->npc
= env
->npc
+ 4;
1398 case 0x83: /* flush windows */
1403 /* next instruction */
1405 env
->npc
= env
->npc
+ 4;
1407 #ifndef TARGET_SPARC64
1408 case TT_WIN_OVF
: /* window overflow */
1411 case TT_WIN_UNF
: /* window underflow */
1412 restore_window(env
);
1417 info
.si_signo
= TARGET_SIGSEGV
;
1419 /* XXX: check env->error_code */
1420 info
.si_code
= TARGET_SEGV_MAPERR
;
1421 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1422 queue_signal(env
, info
.si_signo
, &info
);
1426 case TT_SPILL
: /* window overflow */
1429 case TT_FILL
: /* window underflow */
1430 restore_window(env
);
1435 info
.si_signo
= TARGET_SIGSEGV
;
1437 /* XXX: check env->error_code */
1438 info
.si_code
= TARGET_SEGV_MAPERR
;
1439 if (trapnr
== TT_DFAULT
)
1440 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1442 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1443 queue_signal(env
, info
.si_signo
, &info
);
1446 #ifndef TARGET_ABI32
1449 sparc64_get_context(env
);
1453 sparc64_set_context(env
);
1457 case EXCP_INTERRUPT
:
1458 /* just indicate that signals should be handled asap */
1462 info
.si_signo
= TARGET_SIGILL
;
1464 info
.si_code
= TARGET_ILL_ILLOPC
;
1465 info
._sifields
._sigfault
._addr
= env
->pc
;
1466 queue_signal(env
, info
.si_signo
, &info
);
1473 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1476 info
.si_signo
= sig
;
1478 info
.si_code
= TARGET_TRAP_BRKPT
;
1479 queue_signal(env
, info
.si_signo
, &info
);
1484 printf ("Unhandled trap: 0x%x\n", trapnr
);
1485 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1488 process_pending_signals (env
);
1495 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1497 return cpu_get_host_ticks();
1500 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1502 return cpu_ppc_get_tb(env
);
1505 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1507 return cpu_ppc_get_tb(env
) >> 32;
1510 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1512 return cpu_ppc_get_tb(env
);
1515 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1517 return cpu_ppc_get_tb(env
) >> 32;
1520 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1521 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1523 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1525 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1528 /* XXX: to be fixed */
1529 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1534 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1539 static int do_store_exclusive(CPUPPCState
*env
)
1542 target_ulong page_addr
;
1543 target_ulong val
, val2
__attribute__((unused
)) = 0;
1547 addr
= env
->reserve_ea
;
1548 page_addr
= addr
& TARGET_PAGE_MASK
;
1551 flags
= page_get_flags(page_addr
);
1552 if ((flags
& PAGE_READ
) == 0) {
1555 int reg
= env
->reserve_info
& 0x1f;
1556 int size
= env
->reserve_info
>> 5;
1559 if (addr
== env
->reserve_addr
) {
1561 case 1: segv
= get_user_u8(val
, addr
); break;
1562 case 2: segv
= get_user_u16(val
, addr
); break;
1563 case 4: segv
= get_user_u32(val
, addr
); break;
1564 #if defined(TARGET_PPC64)
1565 case 8: segv
= get_user_u64(val
, addr
); break;
1567 segv
= get_user_u64(val
, addr
);
1569 segv
= get_user_u64(val2
, addr
+ 8);
1576 if (!segv
&& val
== env
->reserve_val
) {
1577 val
= env
->gpr
[reg
];
1579 case 1: segv
= put_user_u8(val
, addr
); break;
1580 case 2: segv
= put_user_u16(val
, addr
); break;
1581 case 4: segv
= put_user_u32(val
, addr
); break;
1582 #if defined(TARGET_PPC64)
1583 case 8: segv
= put_user_u64(val
, addr
); break;
1585 if (val2
== env
->reserve_val2
) {
1588 val
= env
->gpr
[reg
+1];
1590 val2
= env
->gpr
[reg
+1];
1592 segv
= put_user_u64(val
, addr
);
1594 segv
= put_user_u64(val2
, addr
+ 8);
1607 env
->crf
[0] = (stored
<< 1) | xer_so
;
1608 env
->reserve_addr
= (target_ulong
)-1;
1618 void cpu_loop(CPUPPCState
*env
)
1620 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1621 target_siginfo_t info
;
1627 trapnr
= cpu_ppc_exec(cs
);
1630 case POWERPC_EXCP_NONE
:
1633 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1634 cpu_abort(cs
, "Critical interrupt while in user mode. "
1637 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1638 cpu_abort(cs
, "Machine check exception while in user mode. "
1641 case POWERPC_EXCP_DSI
: /* Data storage exception */
1642 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1644 /* XXX: check this. Seems bugged */
1645 switch (env
->error_code
& 0xFF000000) {
1647 info
.si_signo
= TARGET_SIGSEGV
;
1649 info
.si_code
= TARGET_SEGV_MAPERR
;
1652 info
.si_signo
= TARGET_SIGILL
;
1654 info
.si_code
= TARGET_ILL_ILLADR
;
1657 info
.si_signo
= TARGET_SIGSEGV
;
1659 info
.si_code
= TARGET_SEGV_ACCERR
;
1662 /* Let's send a regular segfault... */
1663 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1665 info
.si_signo
= TARGET_SIGSEGV
;
1667 info
.si_code
= TARGET_SEGV_MAPERR
;
1670 info
._sifields
._sigfault
._addr
= env
->nip
;
1671 queue_signal(env
, info
.si_signo
, &info
);
1673 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1674 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1675 "\n", env
->spr
[SPR_SRR0
]);
1676 /* XXX: check this */
1677 switch (env
->error_code
& 0xFF000000) {
1679 info
.si_signo
= TARGET_SIGSEGV
;
1681 info
.si_code
= TARGET_SEGV_MAPERR
;
1685 info
.si_signo
= TARGET_SIGSEGV
;
1687 info
.si_code
= TARGET_SEGV_ACCERR
;
1690 /* Let's send a regular segfault... */
1691 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1693 info
.si_signo
= TARGET_SIGSEGV
;
1695 info
.si_code
= TARGET_SEGV_MAPERR
;
1698 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1699 queue_signal(env
, info
.si_signo
, &info
);
1701 case POWERPC_EXCP_EXTERNAL
: /* External input */
1702 cpu_abort(cs
, "External interrupt while in user mode. "
1705 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1706 EXCP_DUMP(env
, "Unaligned memory access\n");
1707 /* XXX: check this */
1708 info
.si_signo
= TARGET_SIGBUS
;
1710 info
.si_code
= TARGET_BUS_ADRALN
;
1711 info
._sifields
._sigfault
._addr
= env
->nip
;
1712 queue_signal(env
, info
.si_signo
, &info
);
1714 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1715 /* XXX: check this */
1716 switch (env
->error_code
& ~0xF) {
1717 case POWERPC_EXCP_FP
:
1718 EXCP_DUMP(env
, "Floating point program exception\n");
1719 info
.si_signo
= TARGET_SIGFPE
;
1721 switch (env
->error_code
& 0xF) {
1722 case POWERPC_EXCP_FP_OX
:
1723 info
.si_code
= TARGET_FPE_FLTOVF
;
1725 case POWERPC_EXCP_FP_UX
:
1726 info
.si_code
= TARGET_FPE_FLTUND
;
1728 case POWERPC_EXCP_FP_ZX
:
1729 case POWERPC_EXCP_FP_VXZDZ
:
1730 info
.si_code
= TARGET_FPE_FLTDIV
;
1732 case POWERPC_EXCP_FP_XX
:
1733 info
.si_code
= TARGET_FPE_FLTRES
;
1735 case POWERPC_EXCP_FP_VXSOFT
:
1736 info
.si_code
= TARGET_FPE_FLTINV
;
1738 case POWERPC_EXCP_FP_VXSNAN
:
1739 case POWERPC_EXCP_FP_VXISI
:
1740 case POWERPC_EXCP_FP_VXIDI
:
1741 case POWERPC_EXCP_FP_VXIMZ
:
1742 case POWERPC_EXCP_FP_VXVC
:
1743 case POWERPC_EXCP_FP_VXSQRT
:
1744 case POWERPC_EXCP_FP_VXCVI
:
1745 info
.si_code
= TARGET_FPE_FLTSUB
;
1748 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1753 case POWERPC_EXCP_INVAL
:
1754 EXCP_DUMP(env
, "Invalid instruction\n");
1755 info
.si_signo
= TARGET_SIGILL
;
1757 switch (env
->error_code
& 0xF) {
1758 case POWERPC_EXCP_INVAL_INVAL
:
1759 info
.si_code
= TARGET_ILL_ILLOPC
;
1761 case POWERPC_EXCP_INVAL_LSWX
:
1762 info
.si_code
= TARGET_ILL_ILLOPN
;
1764 case POWERPC_EXCP_INVAL_SPR
:
1765 info
.si_code
= TARGET_ILL_PRVREG
;
1767 case POWERPC_EXCP_INVAL_FP
:
1768 info
.si_code
= TARGET_ILL_COPROC
;
1771 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1772 env
->error_code
& 0xF);
1773 info
.si_code
= TARGET_ILL_ILLADR
;
1777 case POWERPC_EXCP_PRIV
:
1778 EXCP_DUMP(env
, "Privilege violation\n");
1779 info
.si_signo
= TARGET_SIGILL
;
1781 switch (env
->error_code
& 0xF) {
1782 case POWERPC_EXCP_PRIV_OPC
:
1783 info
.si_code
= TARGET_ILL_PRVOPC
;
1785 case POWERPC_EXCP_PRIV_REG
:
1786 info
.si_code
= TARGET_ILL_PRVREG
;
1789 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1790 env
->error_code
& 0xF);
1791 info
.si_code
= TARGET_ILL_PRVOPC
;
1795 case POWERPC_EXCP_TRAP
:
1796 cpu_abort(cs
, "Tried to call a TRAP\n");
1799 /* Should not happen ! */
1800 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1804 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1805 queue_signal(env
, info
.si_signo
, &info
);
1807 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1808 EXCP_DUMP(env
, "No floating point allowed\n");
1809 info
.si_signo
= TARGET_SIGILL
;
1811 info
.si_code
= TARGET_ILL_COPROC
;
1812 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1813 queue_signal(env
, info
.si_signo
, &info
);
1815 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1816 cpu_abort(cs
, "Syscall exception while in user mode. "
1819 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1820 EXCP_DUMP(env
, "No APU instruction allowed\n");
1821 info
.si_signo
= TARGET_SIGILL
;
1823 info
.si_code
= TARGET_ILL_COPROC
;
1824 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1825 queue_signal(env
, info
.si_signo
, &info
);
1827 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1828 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1831 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1832 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1835 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1836 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1839 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1840 cpu_abort(cs
, "Data TLB exception while in user mode. "
1843 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1844 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1847 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1848 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1849 info
.si_signo
= TARGET_SIGILL
;
1851 info
.si_code
= TARGET_ILL_COPROC
;
1852 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1853 queue_signal(env
, info
.si_signo
, &info
);
1855 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1856 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1858 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1859 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1861 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1862 cpu_abort(cs
, "Performance monitor exception not handled\n");
1864 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1865 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1868 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1869 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1872 case POWERPC_EXCP_RESET
: /* System reset exception */
1873 cpu_abort(cs
, "Reset interrupt while in user mode. "
1876 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1877 cpu_abort(cs
, "Data segment exception while in user mode. "
1880 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1881 cpu_abort(cs
, "Instruction segment exception "
1882 "while in user mode. Aborting\n");
1884 /* PowerPC 64 with hypervisor mode support */
1885 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1886 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1887 "while in user mode. Aborting\n");
1889 case POWERPC_EXCP_TRACE
: /* Trace exception */
1891 * we use this exception to emulate step-by-step execution mode.
1894 /* PowerPC 64 with hypervisor mode support */
1895 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1896 cpu_abort(cs
, "Hypervisor data storage exception "
1897 "while in user mode. Aborting\n");
1899 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1900 cpu_abort(cs
, "Hypervisor instruction storage exception "
1901 "while in user mode. Aborting\n");
1903 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1904 cpu_abort(cs
, "Hypervisor data segment exception "
1905 "while in user mode. Aborting\n");
1907 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1908 cpu_abort(cs
, "Hypervisor instruction segment exception "
1909 "while in user mode. Aborting\n");
1911 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1912 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1913 info
.si_signo
= TARGET_SIGILL
;
1915 info
.si_code
= TARGET_ILL_COPROC
;
1916 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1917 queue_signal(env
, info
.si_signo
, &info
);
1919 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1920 cpu_abort(cs
, "Programmable interval timer interrupt "
1921 "while in user mode. Aborting\n");
1923 case POWERPC_EXCP_IO
: /* IO error exception */
1924 cpu_abort(cs
, "IO error exception while in user mode. "
1927 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1928 cpu_abort(cs
, "Run mode exception while in user mode. "
1931 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1932 cpu_abort(cs
, "Emulation trap exception not handled\n");
1934 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1935 cpu_abort(cs
, "Instruction fetch TLB exception "
1936 "while in user-mode. Aborting");
1938 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1939 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1942 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1943 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1946 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1947 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1949 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1950 cpu_abort(cs
, "Instruction address breakpoint exception "
1953 case POWERPC_EXCP_SMI
: /* System management interrupt */
1954 cpu_abort(cs
, "System management interrupt while in user mode. "
1957 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1958 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1961 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1962 cpu_abort(cs
, "Performance monitor exception not handled\n");
1964 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1965 cpu_abort(cs
, "Vector assist exception not handled\n");
1967 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1968 cpu_abort(cs
, "Soft patch exception not handled\n");
1970 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1971 cpu_abort(cs
, "Maintenance exception while in user mode. "
1974 case POWERPC_EXCP_STOP
: /* stop translation */
1975 /* We did invalidate the instruction cache. Go on */
1977 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1978 /* We just stopped because of a branch. Go on */
1980 case POWERPC_EXCP_SYSCALL_USER
:
1981 /* system call in user-mode emulation */
1983 * PPC ABI uses overflow flag in cr0 to signal an error
1986 env
->crf
[0] &= ~0x1;
1987 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1988 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1990 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1991 /* Returning from a successful sigreturn syscall.
1992 Avoid corrupting register state. */
1995 if (ret
> (target_ulong
)(-515)) {
2001 case POWERPC_EXCP_STCX
:
2002 if (do_store_exclusive(env
)) {
2003 info
.si_signo
= TARGET_SIGSEGV
;
2005 info
.si_code
= TARGET_SEGV_MAPERR
;
2006 info
._sifields
._sigfault
._addr
= env
->nip
;
2007 queue_signal(env
, info
.si_signo
, &info
);
2014 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2016 info
.si_signo
= sig
;
2018 info
.si_code
= TARGET_TRAP_BRKPT
;
2019 queue_signal(env
, info
.si_signo
, &info
);
2023 case EXCP_INTERRUPT
:
2024 /* just indicate that signals should be handled asap */
2027 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2030 process_pending_signals(env
);
2037 # ifdef TARGET_ABI_MIPSO32
2038 # define MIPS_SYS(name, args) args,
2039 static const uint8_t mips_syscall_args
[] = {
2040 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2041 MIPS_SYS(sys_exit
, 1)
2042 MIPS_SYS(sys_fork
, 0)
2043 MIPS_SYS(sys_read
, 3)
2044 MIPS_SYS(sys_write
, 3)
2045 MIPS_SYS(sys_open
, 3) /* 4005 */
2046 MIPS_SYS(sys_close
, 1)
2047 MIPS_SYS(sys_waitpid
, 3)
2048 MIPS_SYS(sys_creat
, 2)
2049 MIPS_SYS(sys_link
, 2)
2050 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2051 MIPS_SYS(sys_execve
, 0)
2052 MIPS_SYS(sys_chdir
, 1)
2053 MIPS_SYS(sys_time
, 1)
2054 MIPS_SYS(sys_mknod
, 3)
2055 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2056 MIPS_SYS(sys_lchown
, 3)
2057 MIPS_SYS(sys_ni_syscall
, 0)
2058 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2059 MIPS_SYS(sys_lseek
, 3)
2060 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2061 MIPS_SYS(sys_mount
, 5)
2062 MIPS_SYS(sys_umount
, 1)
2063 MIPS_SYS(sys_setuid
, 1)
2064 MIPS_SYS(sys_getuid
, 0)
2065 MIPS_SYS(sys_stime
, 1) /* 4025 */
2066 MIPS_SYS(sys_ptrace
, 4)
2067 MIPS_SYS(sys_alarm
, 1)
2068 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2069 MIPS_SYS(sys_pause
, 0)
2070 MIPS_SYS(sys_utime
, 2) /* 4030 */
2071 MIPS_SYS(sys_ni_syscall
, 0)
2072 MIPS_SYS(sys_ni_syscall
, 0)
2073 MIPS_SYS(sys_access
, 2)
2074 MIPS_SYS(sys_nice
, 1)
2075 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2076 MIPS_SYS(sys_sync
, 0)
2077 MIPS_SYS(sys_kill
, 2)
2078 MIPS_SYS(sys_rename
, 2)
2079 MIPS_SYS(sys_mkdir
, 2)
2080 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2081 MIPS_SYS(sys_dup
, 1)
2082 MIPS_SYS(sys_pipe
, 0)
2083 MIPS_SYS(sys_times
, 1)
2084 MIPS_SYS(sys_ni_syscall
, 0)
2085 MIPS_SYS(sys_brk
, 1) /* 4045 */
2086 MIPS_SYS(sys_setgid
, 1)
2087 MIPS_SYS(sys_getgid
, 0)
2088 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2089 MIPS_SYS(sys_geteuid
, 0)
2090 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2091 MIPS_SYS(sys_acct
, 0)
2092 MIPS_SYS(sys_umount2
, 2)
2093 MIPS_SYS(sys_ni_syscall
, 0)
2094 MIPS_SYS(sys_ioctl
, 3)
2095 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2096 MIPS_SYS(sys_ni_syscall
, 2)
2097 MIPS_SYS(sys_setpgid
, 2)
2098 MIPS_SYS(sys_ni_syscall
, 0)
2099 MIPS_SYS(sys_olduname
, 1)
2100 MIPS_SYS(sys_umask
, 1) /* 4060 */
2101 MIPS_SYS(sys_chroot
, 1)
2102 MIPS_SYS(sys_ustat
, 2)
2103 MIPS_SYS(sys_dup2
, 2)
2104 MIPS_SYS(sys_getppid
, 0)
2105 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2106 MIPS_SYS(sys_setsid
, 0)
2107 MIPS_SYS(sys_sigaction
, 3)
2108 MIPS_SYS(sys_sgetmask
, 0)
2109 MIPS_SYS(sys_ssetmask
, 1)
2110 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2111 MIPS_SYS(sys_setregid
, 2)
2112 MIPS_SYS(sys_sigsuspend
, 0)
2113 MIPS_SYS(sys_sigpending
, 1)
2114 MIPS_SYS(sys_sethostname
, 2)
2115 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2116 MIPS_SYS(sys_getrlimit
, 2)
2117 MIPS_SYS(sys_getrusage
, 2)
2118 MIPS_SYS(sys_gettimeofday
, 2)
2119 MIPS_SYS(sys_settimeofday
, 2)
2120 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2121 MIPS_SYS(sys_setgroups
, 2)
2122 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2123 MIPS_SYS(sys_symlink
, 2)
2124 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2125 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2126 MIPS_SYS(sys_uselib
, 1)
2127 MIPS_SYS(sys_swapon
, 2)
2128 MIPS_SYS(sys_reboot
, 3)
2129 MIPS_SYS(old_readdir
, 3)
2130 MIPS_SYS(old_mmap
, 6) /* 4090 */
2131 MIPS_SYS(sys_munmap
, 2)
2132 MIPS_SYS(sys_truncate
, 2)
2133 MIPS_SYS(sys_ftruncate
, 2)
2134 MIPS_SYS(sys_fchmod
, 2)
2135 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2136 MIPS_SYS(sys_getpriority
, 2)
2137 MIPS_SYS(sys_setpriority
, 3)
2138 MIPS_SYS(sys_ni_syscall
, 0)
2139 MIPS_SYS(sys_statfs
, 2)
2140 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2141 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2142 MIPS_SYS(sys_socketcall
, 2)
2143 MIPS_SYS(sys_syslog
, 3)
2144 MIPS_SYS(sys_setitimer
, 3)
2145 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2146 MIPS_SYS(sys_newstat
, 2)
2147 MIPS_SYS(sys_newlstat
, 2)
2148 MIPS_SYS(sys_newfstat
, 2)
2149 MIPS_SYS(sys_uname
, 1)
2150 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2151 MIPS_SYS(sys_vhangup
, 0)
2152 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2153 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2154 MIPS_SYS(sys_wait4
, 4)
2155 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2156 MIPS_SYS(sys_sysinfo
, 1)
2157 MIPS_SYS(sys_ipc
, 6)
2158 MIPS_SYS(sys_fsync
, 1)
2159 MIPS_SYS(sys_sigreturn
, 0)
2160 MIPS_SYS(sys_clone
, 6) /* 4120 */
2161 MIPS_SYS(sys_setdomainname
, 2)
2162 MIPS_SYS(sys_newuname
, 1)
2163 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2164 MIPS_SYS(sys_adjtimex
, 1)
2165 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2166 MIPS_SYS(sys_sigprocmask
, 3)
2167 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2168 MIPS_SYS(sys_init_module
, 5)
2169 MIPS_SYS(sys_delete_module
, 1)
2170 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2171 MIPS_SYS(sys_quotactl
, 0)
2172 MIPS_SYS(sys_getpgid
, 1)
2173 MIPS_SYS(sys_fchdir
, 1)
2174 MIPS_SYS(sys_bdflush
, 2)
2175 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2176 MIPS_SYS(sys_personality
, 1)
2177 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2178 MIPS_SYS(sys_setfsuid
, 1)
2179 MIPS_SYS(sys_setfsgid
, 1)
2180 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2181 MIPS_SYS(sys_getdents
, 3)
2182 MIPS_SYS(sys_select
, 5)
2183 MIPS_SYS(sys_flock
, 2)
2184 MIPS_SYS(sys_msync
, 3)
2185 MIPS_SYS(sys_readv
, 3) /* 4145 */
2186 MIPS_SYS(sys_writev
, 3)
2187 MIPS_SYS(sys_cacheflush
, 3)
2188 MIPS_SYS(sys_cachectl
, 3)
2189 MIPS_SYS(sys_sysmips
, 4)
2190 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2191 MIPS_SYS(sys_getsid
, 1)
2192 MIPS_SYS(sys_fdatasync
, 0)
2193 MIPS_SYS(sys_sysctl
, 1)
2194 MIPS_SYS(sys_mlock
, 2)
2195 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2196 MIPS_SYS(sys_mlockall
, 1)
2197 MIPS_SYS(sys_munlockall
, 0)
2198 MIPS_SYS(sys_sched_setparam
, 2)
2199 MIPS_SYS(sys_sched_getparam
, 2)
2200 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2201 MIPS_SYS(sys_sched_getscheduler
, 1)
2202 MIPS_SYS(sys_sched_yield
, 0)
2203 MIPS_SYS(sys_sched_get_priority_max
, 1)
2204 MIPS_SYS(sys_sched_get_priority_min
, 1)
2205 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2206 MIPS_SYS(sys_nanosleep
, 2)
2207 MIPS_SYS(sys_mremap
, 5)
2208 MIPS_SYS(sys_accept
, 3)
2209 MIPS_SYS(sys_bind
, 3)
2210 MIPS_SYS(sys_connect
, 3) /* 4170 */
2211 MIPS_SYS(sys_getpeername
, 3)
2212 MIPS_SYS(sys_getsockname
, 3)
2213 MIPS_SYS(sys_getsockopt
, 5)
2214 MIPS_SYS(sys_listen
, 2)
2215 MIPS_SYS(sys_recv
, 4) /* 4175 */
2216 MIPS_SYS(sys_recvfrom
, 6)
2217 MIPS_SYS(sys_recvmsg
, 3)
2218 MIPS_SYS(sys_send
, 4)
2219 MIPS_SYS(sys_sendmsg
, 3)
2220 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2221 MIPS_SYS(sys_setsockopt
, 5)
2222 MIPS_SYS(sys_shutdown
, 2)
2223 MIPS_SYS(sys_socket
, 3)
2224 MIPS_SYS(sys_socketpair
, 4)
2225 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2226 MIPS_SYS(sys_getresuid
, 3)
2227 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2228 MIPS_SYS(sys_poll
, 3)
2229 MIPS_SYS(sys_nfsservctl
, 3)
2230 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2231 MIPS_SYS(sys_getresgid
, 3)
2232 MIPS_SYS(sys_prctl
, 5)
2233 MIPS_SYS(sys_rt_sigreturn
, 0)
2234 MIPS_SYS(sys_rt_sigaction
, 4)
2235 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2236 MIPS_SYS(sys_rt_sigpending
, 2)
2237 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2238 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2239 MIPS_SYS(sys_rt_sigsuspend
, 0)
2240 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2241 MIPS_SYS(sys_pwrite64
, 6)
2242 MIPS_SYS(sys_chown
, 3)
2243 MIPS_SYS(sys_getcwd
, 2)
2244 MIPS_SYS(sys_capget
, 2)
2245 MIPS_SYS(sys_capset
, 2) /* 4205 */
2246 MIPS_SYS(sys_sigaltstack
, 2)
2247 MIPS_SYS(sys_sendfile
, 4)
2248 MIPS_SYS(sys_ni_syscall
, 0)
2249 MIPS_SYS(sys_ni_syscall
, 0)
2250 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2251 MIPS_SYS(sys_truncate64
, 4)
2252 MIPS_SYS(sys_ftruncate64
, 4)
2253 MIPS_SYS(sys_stat64
, 2)
2254 MIPS_SYS(sys_lstat64
, 2)
2255 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2256 MIPS_SYS(sys_pivot_root
, 2)
2257 MIPS_SYS(sys_mincore
, 3)
2258 MIPS_SYS(sys_madvise
, 3)
2259 MIPS_SYS(sys_getdents64
, 3)
2260 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2261 MIPS_SYS(sys_ni_syscall
, 0)
2262 MIPS_SYS(sys_gettid
, 0)
2263 MIPS_SYS(sys_readahead
, 5)
2264 MIPS_SYS(sys_setxattr
, 5)
2265 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2266 MIPS_SYS(sys_fsetxattr
, 5)
2267 MIPS_SYS(sys_getxattr
, 4)
2268 MIPS_SYS(sys_lgetxattr
, 4)
2269 MIPS_SYS(sys_fgetxattr
, 4)
2270 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2271 MIPS_SYS(sys_llistxattr
, 3)
2272 MIPS_SYS(sys_flistxattr
, 3)
2273 MIPS_SYS(sys_removexattr
, 2)
2274 MIPS_SYS(sys_lremovexattr
, 2)
2275 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2276 MIPS_SYS(sys_tkill
, 2)
2277 MIPS_SYS(sys_sendfile64
, 5)
2278 MIPS_SYS(sys_futex
, 6)
2279 MIPS_SYS(sys_sched_setaffinity
, 3)
2280 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2281 MIPS_SYS(sys_io_setup
, 2)
2282 MIPS_SYS(sys_io_destroy
, 1)
2283 MIPS_SYS(sys_io_getevents
, 5)
2284 MIPS_SYS(sys_io_submit
, 3)
2285 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2286 MIPS_SYS(sys_exit_group
, 1)
2287 MIPS_SYS(sys_lookup_dcookie
, 3)
2288 MIPS_SYS(sys_epoll_create
, 1)
2289 MIPS_SYS(sys_epoll_ctl
, 4)
2290 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2291 MIPS_SYS(sys_remap_file_pages
, 5)
2292 MIPS_SYS(sys_set_tid_address
, 1)
2293 MIPS_SYS(sys_restart_syscall
, 0)
2294 MIPS_SYS(sys_fadvise64_64
, 7)
2295 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2296 MIPS_SYS(sys_fstatfs64
, 2)
2297 MIPS_SYS(sys_timer_create
, 3)
2298 MIPS_SYS(sys_timer_settime
, 4)
2299 MIPS_SYS(sys_timer_gettime
, 2)
2300 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2301 MIPS_SYS(sys_timer_delete
, 1)
2302 MIPS_SYS(sys_clock_settime
, 2)
2303 MIPS_SYS(sys_clock_gettime
, 2)
2304 MIPS_SYS(sys_clock_getres
, 2)
2305 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2306 MIPS_SYS(sys_tgkill
, 3)
2307 MIPS_SYS(sys_utimes
, 2)
2308 MIPS_SYS(sys_mbind
, 4)
2309 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2310 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2311 MIPS_SYS(sys_mq_open
, 4)
2312 MIPS_SYS(sys_mq_unlink
, 1)
2313 MIPS_SYS(sys_mq_timedsend
, 5)
2314 MIPS_SYS(sys_mq_timedreceive
, 5)
2315 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2316 MIPS_SYS(sys_mq_getsetattr
, 3)
2317 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2318 MIPS_SYS(sys_waitid
, 4)
2319 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2320 MIPS_SYS(sys_add_key
, 5)
2321 MIPS_SYS(sys_request_key
, 4)
2322 MIPS_SYS(sys_keyctl
, 5)
2323 MIPS_SYS(sys_set_thread_area
, 1)
2324 MIPS_SYS(sys_inotify_init
, 0)
2325 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2326 MIPS_SYS(sys_inotify_rm_watch
, 2)
2327 MIPS_SYS(sys_migrate_pages
, 4)
2328 MIPS_SYS(sys_openat
, 4)
2329 MIPS_SYS(sys_mkdirat
, 3)
2330 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2331 MIPS_SYS(sys_fchownat
, 5)
2332 MIPS_SYS(sys_futimesat
, 3)
2333 MIPS_SYS(sys_fstatat64
, 4)
2334 MIPS_SYS(sys_unlinkat
, 3)
2335 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2336 MIPS_SYS(sys_linkat
, 5)
2337 MIPS_SYS(sys_symlinkat
, 3)
2338 MIPS_SYS(sys_readlinkat
, 4)
2339 MIPS_SYS(sys_fchmodat
, 3)
2340 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2341 MIPS_SYS(sys_pselect6
, 6)
2342 MIPS_SYS(sys_ppoll
, 5)
2343 MIPS_SYS(sys_unshare
, 1)
2344 MIPS_SYS(sys_splice
, 6)
2345 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2346 MIPS_SYS(sys_tee
, 4)
2347 MIPS_SYS(sys_vmsplice
, 4)
2348 MIPS_SYS(sys_move_pages
, 6)
2349 MIPS_SYS(sys_set_robust_list
, 2)
2350 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2351 MIPS_SYS(sys_kexec_load
, 4)
2352 MIPS_SYS(sys_getcpu
, 3)
2353 MIPS_SYS(sys_epoll_pwait
, 6)
2354 MIPS_SYS(sys_ioprio_set
, 3)
2355 MIPS_SYS(sys_ioprio_get
, 2)
2356 MIPS_SYS(sys_utimensat
, 4)
2357 MIPS_SYS(sys_signalfd
, 3)
2358 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2359 MIPS_SYS(sys_eventfd
, 1)
2360 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2361 MIPS_SYS(sys_timerfd_create
, 2)
2362 MIPS_SYS(sys_timerfd_gettime
, 2)
2363 MIPS_SYS(sys_timerfd_settime
, 4)
2364 MIPS_SYS(sys_signalfd4
, 4)
2365 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2366 MIPS_SYS(sys_epoll_create1
, 1)
2367 MIPS_SYS(sys_dup3
, 3)
2368 MIPS_SYS(sys_pipe2
, 2)
2369 MIPS_SYS(sys_inotify_init1
, 1)
2370 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2371 MIPS_SYS(sys_pwritev
, 6)
2372 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2373 MIPS_SYS(sys_perf_event_open
, 5)
2374 MIPS_SYS(sys_accept4
, 4)
2375 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2376 MIPS_SYS(sys_fanotify_init
, 2)
2377 MIPS_SYS(sys_fanotify_mark
, 6)
2378 MIPS_SYS(sys_prlimit64
, 4)
2379 MIPS_SYS(sys_name_to_handle_at
, 5)
2380 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2381 MIPS_SYS(sys_clock_adjtime
, 2)
2382 MIPS_SYS(sys_syncfs
, 1)
2387 static int do_store_exclusive(CPUMIPSState
*env
)
2390 target_ulong page_addr
;
2398 page_addr
= addr
& TARGET_PAGE_MASK
;
2401 flags
= page_get_flags(page_addr
);
2402 if ((flags
& PAGE_READ
) == 0) {
2405 reg
= env
->llreg
& 0x1f;
2406 d
= (env
->llreg
& 0x20) != 0;
2408 segv
= get_user_s64(val
, addr
);
2410 segv
= get_user_s32(val
, addr
);
2413 if (val
!= env
->llval
) {
2414 env
->active_tc
.gpr
[reg
] = 0;
2417 segv
= put_user_u64(env
->llnewval
, addr
);
2419 segv
= put_user_u32(env
->llnewval
, addr
);
2422 env
->active_tc
.gpr
[reg
] = 1;
2429 env
->active_tc
.PC
+= 4;
2442 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2450 info
->si_signo
= TARGET_SIGFPE
;
2452 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2453 queue_signal(env
, info
->si_signo
, &*info
);
2457 info
->si_signo
= TARGET_SIGTRAP
;
2459 queue_signal(env
, info
->si_signo
, &*info
);
2467 void cpu_loop(CPUMIPSState
*env
)
2469 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2470 target_siginfo_t info
;
2473 # ifdef TARGET_ABI_MIPSO32
2474 unsigned int syscall_num
;
2479 trapnr
= cpu_mips_exec(cs
);
2483 env
->active_tc
.PC
+= 4;
2484 # ifdef TARGET_ABI_MIPSO32
2485 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2486 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2487 ret
= -TARGET_ENOSYS
;
2491 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2493 nb_args
= mips_syscall_args
[syscall_num
];
2494 sp_reg
= env
->active_tc
.gpr
[29];
2496 /* these arguments are taken from the stack */
2498 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2502 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2506 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2510 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2516 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2517 env
->active_tc
.gpr
[4],
2518 env
->active_tc
.gpr
[5],
2519 env
->active_tc
.gpr
[6],
2520 env
->active_tc
.gpr
[7],
2521 arg5
, arg6
, arg7
, arg8
);
2525 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2526 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2527 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2528 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2529 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2531 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2532 /* Returning from a successful sigreturn syscall.
2533 Avoid clobbering register state. */
2536 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2537 env
->active_tc
.gpr
[7] = 1; /* error flag */
2540 env
->active_tc
.gpr
[7] = 0; /* error flag */
2542 env
->active_tc
.gpr
[2] = ret
;
2548 info
.si_signo
= TARGET_SIGSEGV
;
2550 /* XXX: check env->error_code */
2551 info
.si_code
= TARGET_SEGV_MAPERR
;
2552 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2553 queue_signal(env
, info
.si_signo
, &info
);
2557 info
.si_signo
= TARGET_SIGILL
;
2560 queue_signal(env
, info
.si_signo
, &info
);
2562 case EXCP_INTERRUPT
:
2563 /* just indicate that signals should be handled asap */
2569 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2572 info
.si_signo
= sig
;
2574 info
.si_code
= TARGET_TRAP_BRKPT
;
2575 queue_signal(env
, info
.si_signo
, &info
);
2580 if (do_store_exclusive(env
)) {
2581 info
.si_signo
= TARGET_SIGSEGV
;
2583 info
.si_code
= TARGET_SEGV_MAPERR
;
2584 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2585 queue_signal(env
, info
.si_signo
, &info
);
2589 info
.si_signo
= TARGET_SIGILL
;
2591 info
.si_code
= TARGET_ILL_ILLOPC
;
2592 queue_signal(env
, info
.si_signo
, &info
);
2594 /* The code below was inspired by the MIPS Linux kernel trap
2595 * handling code in arch/mips/kernel/traps.c.
2599 abi_ulong trap_instr
;
2602 if (env
->hflags
& MIPS_HFLAG_M16
) {
2603 if (env
->insn_flags
& ASE_MICROMIPS
) {
2604 /* microMIPS mode */
2605 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2610 if ((trap_instr
>> 10) == 0x11) {
2611 /* 16-bit instruction */
2612 code
= trap_instr
& 0xf;
2614 /* 32-bit instruction */
2617 ret
= get_user_u16(instr_lo
,
2618 env
->active_tc
.PC
+ 2);
2622 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2623 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2624 /* Unfortunately, microMIPS also suffers from
2625 the old assembler bug... */
2626 if (code
>= (1 << 10)) {
2632 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2636 code
= (trap_instr
>> 6) & 0x3f;
2639 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2644 /* As described in the original Linux kernel code, the
2645 * below checks on 'code' are to work around an old
2648 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2649 if (code
>= (1 << 10)) {
2654 if (do_break(env
, &info
, code
) != 0) {
2661 abi_ulong trap_instr
;
2662 unsigned int code
= 0;
2664 if (env
->hflags
& MIPS_HFLAG_M16
) {
2665 /* microMIPS mode */
2668 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2669 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2671 trap_instr
= (instr
[0] << 16) | instr
[1];
2673 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2680 /* The immediate versions don't provide a code. */
2681 if (!(trap_instr
& 0xFC000000)) {
2682 if (env
->hflags
& MIPS_HFLAG_M16
) {
2683 /* microMIPS mode */
2684 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2686 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2690 if (do_break(env
, &info
, code
) != 0) {
2697 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2700 process_pending_signals(env
);
2705 #ifdef TARGET_OPENRISC
2707 void cpu_loop(CPUOpenRISCState
*env
)
2709 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2714 trapnr
= cpu_openrisc_exec(cs
);
2720 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2724 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2725 gdbsig
= TARGET_SIGBUS
;
2729 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2730 gdbsig
= TARGET_SIGSEGV
;
2733 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2736 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2737 gdbsig
= TARGET_SIGBUS
;
2740 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2741 gdbsig
= TARGET_SIGILL
;
2744 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2748 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2751 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2752 gdbsig
= TARGET_SIGSEGV
;
2755 env
->pc
+= 4; /* 0xc00; */
2756 env
->gpr
[11] = do_syscall(env
,
2757 env
->gpr
[11], /* return value */
2758 env
->gpr
[3], /* r3 - r7 are params */
2766 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2769 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2770 gdbsig
= TARGET_SIGTRAP
;
2773 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2776 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2778 gdbsig
= TARGET_SIGILL
;
2782 gdb_handlesig(cs
, gdbsig
);
2783 if (gdbsig
!= TARGET_SIGTRAP
) {
2788 process_pending_signals(env
);
2792 #endif /* TARGET_OPENRISC */
2795 void cpu_loop(CPUSH4State
*env
)
2797 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2799 target_siginfo_t info
;
2803 trapnr
= cpu_sh4_exec(cs
);
2809 ret
= do_syscall(env
,
2818 env
->gregs
[0] = ret
;
2820 case EXCP_INTERRUPT
:
2821 /* just indicate that signals should be handled asap */
2827 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2830 info
.si_signo
= sig
;
2832 info
.si_code
= TARGET_TRAP_BRKPT
;
2833 queue_signal(env
, info
.si_signo
, &info
);
2839 info
.si_signo
= TARGET_SIGSEGV
;
2841 info
.si_code
= TARGET_SEGV_MAPERR
;
2842 info
._sifields
._sigfault
._addr
= env
->tea
;
2843 queue_signal(env
, info
.si_signo
, &info
);
2847 printf ("Unhandled trap: 0x%x\n", trapnr
);
2848 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2851 process_pending_signals (env
);
2857 void cpu_loop(CPUCRISState
*env
)
2859 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2861 target_siginfo_t info
;
2865 trapnr
= cpu_cris_exec(cs
);
2870 info
.si_signo
= TARGET_SIGSEGV
;
2872 /* XXX: check env->error_code */
2873 info
.si_code
= TARGET_SEGV_MAPERR
;
2874 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2875 queue_signal(env
, info
.si_signo
, &info
);
2878 case EXCP_INTERRUPT
:
2879 /* just indicate that signals should be handled asap */
2882 ret
= do_syscall(env
,
2891 env
->regs
[10] = ret
;
2897 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2900 info
.si_signo
= sig
;
2902 info
.si_code
= TARGET_TRAP_BRKPT
;
2903 queue_signal(env
, info
.si_signo
, &info
);
2908 printf ("Unhandled trap: 0x%x\n", trapnr
);
2909 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2912 process_pending_signals (env
);
2917 #ifdef TARGET_MICROBLAZE
2918 void cpu_loop(CPUMBState
*env
)
2920 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2922 target_siginfo_t info
;
2926 trapnr
= cpu_mb_exec(cs
);
2931 info
.si_signo
= TARGET_SIGSEGV
;
2933 /* XXX: check env->error_code */
2934 info
.si_code
= TARGET_SEGV_MAPERR
;
2935 info
._sifields
._sigfault
._addr
= 0;
2936 queue_signal(env
, info
.si_signo
, &info
);
2939 case EXCP_INTERRUPT
:
2940 /* just indicate that signals should be handled asap */
2943 /* Return address is 4 bytes after the call. */
2945 env
->sregs
[SR_PC
] = env
->regs
[14];
2946 ret
= do_syscall(env
,
2958 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2959 if (env
->iflags
& D_FLAG
) {
2960 env
->sregs
[SR_ESR
] |= 1 << 12;
2961 env
->sregs
[SR_PC
] -= 4;
2962 /* FIXME: if branch was immed, replay the imm as well. */
2965 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2967 switch (env
->sregs
[SR_ESR
] & 31) {
2968 case ESR_EC_DIVZERO
:
2969 info
.si_signo
= TARGET_SIGFPE
;
2971 info
.si_code
= TARGET_FPE_FLTDIV
;
2972 info
._sifields
._sigfault
._addr
= 0;
2973 queue_signal(env
, info
.si_signo
, &info
);
2976 info
.si_signo
= TARGET_SIGFPE
;
2978 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2979 info
.si_code
= TARGET_FPE_FLTINV
;
2981 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2982 info
.si_code
= TARGET_FPE_FLTDIV
;
2984 info
._sifields
._sigfault
._addr
= 0;
2985 queue_signal(env
, info
.si_signo
, &info
);
2988 printf ("Unhandled hw-exception: 0x%x\n",
2989 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2990 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2999 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3002 info
.si_signo
= sig
;
3004 info
.si_code
= TARGET_TRAP_BRKPT
;
3005 queue_signal(env
, info
.si_signo
, &info
);
3010 printf ("Unhandled trap: 0x%x\n", trapnr
);
3011 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3014 process_pending_signals (env
);
3021 void cpu_loop(CPUM68KState
*env
)
3023 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3026 target_siginfo_t info
;
3027 TaskState
*ts
= cs
->opaque
;
3031 trapnr
= cpu_m68k_exec(cs
);
3036 if (ts
->sim_syscalls
) {
3038 get_user_u16(nr
, env
->pc
+ 2);
3040 do_m68k_simcall(env
, nr
);
3046 case EXCP_HALT_INSN
:
3047 /* Semihosing syscall. */
3049 do_m68k_semihosting(env
, env
->dregs
[0]);
3053 case EXCP_UNSUPPORTED
:
3055 info
.si_signo
= TARGET_SIGILL
;
3057 info
.si_code
= TARGET_ILL_ILLOPN
;
3058 info
._sifields
._sigfault
._addr
= env
->pc
;
3059 queue_signal(env
, info
.si_signo
, &info
);
3063 ts
->sim_syscalls
= 0;
3066 env
->dregs
[0] = do_syscall(env
,
3077 case EXCP_INTERRUPT
:
3078 /* just indicate that signals should be handled asap */
3082 info
.si_signo
= TARGET_SIGSEGV
;
3084 /* XXX: check env->error_code */
3085 info
.si_code
= TARGET_SEGV_MAPERR
;
3086 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3087 queue_signal(env
, info
.si_signo
, &info
);
3094 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3097 info
.si_signo
= sig
;
3099 info
.si_code
= TARGET_TRAP_BRKPT
;
3100 queue_signal(env
, info
.si_signo
, &info
);
3105 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3108 process_pending_signals(env
);
3111 #endif /* TARGET_M68K */
3114 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3116 target_ulong addr
, val
, tmp
;
3117 target_siginfo_t info
;
3120 addr
= env
->lock_addr
;
3121 tmp
= env
->lock_st_addr
;
3122 env
->lock_addr
= -1;
3123 env
->lock_st_addr
= 0;
3129 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3133 if (val
== env
->lock_value
) {
3135 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3152 info
.si_signo
= TARGET_SIGSEGV
;
3154 info
.si_code
= TARGET_SEGV_MAPERR
;
3155 info
._sifields
._sigfault
._addr
= addr
;
3156 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3159 void cpu_loop(CPUAlphaState
*env
)
3161 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3163 target_siginfo_t info
;
3168 trapnr
= cpu_alpha_exec(cs
);
3171 /* All of the traps imply a transition through PALcode, which
3172 implies an REI instruction has been executed. Which means
3173 that the intr_flag should be cleared. */
3178 fprintf(stderr
, "Reset requested. Exit\n");
3182 fprintf(stderr
, "Machine check exception. Exit\n");
3185 case EXCP_SMP_INTERRUPT
:
3186 case EXCP_CLK_INTERRUPT
:
3187 case EXCP_DEV_INTERRUPT
:
3188 fprintf(stderr
, "External interrupt. Exit\n");
3192 env
->lock_addr
= -1;
3193 info
.si_signo
= TARGET_SIGSEGV
;
3195 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3196 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3197 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3198 queue_signal(env
, info
.si_signo
, &info
);
3201 env
->lock_addr
= -1;
3202 info
.si_signo
= TARGET_SIGBUS
;
3204 info
.si_code
= TARGET_BUS_ADRALN
;
3205 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3206 queue_signal(env
, info
.si_signo
, &info
);
3210 env
->lock_addr
= -1;
3211 info
.si_signo
= TARGET_SIGILL
;
3213 info
.si_code
= TARGET_ILL_ILLOPC
;
3214 info
._sifields
._sigfault
._addr
= env
->pc
;
3215 queue_signal(env
, info
.si_signo
, &info
);
3218 env
->lock_addr
= -1;
3219 info
.si_signo
= TARGET_SIGFPE
;
3221 info
.si_code
= TARGET_FPE_FLTINV
;
3222 info
._sifields
._sigfault
._addr
= env
->pc
;
3223 queue_signal(env
, info
.si_signo
, &info
);
3226 /* No-op. Linux simply re-enables the FPU. */
3229 env
->lock_addr
= -1;
3230 switch (env
->error_code
) {
3233 info
.si_signo
= TARGET_SIGTRAP
;
3235 info
.si_code
= TARGET_TRAP_BRKPT
;
3236 info
._sifields
._sigfault
._addr
= env
->pc
;
3237 queue_signal(env
, info
.si_signo
, &info
);
3241 info
.si_signo
= TARGET_SIGTRAP
;
3244 info
._sifields
._sigfault
._addr
= env
->pc
;
3245 queue_signal(env
, info
.si_signo
, &info
);
3249 trapnr
= env
->ir
[IR_V0
];
3250 sysret
= do_syscall(env
, trapnr
,
3251 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3252 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3253 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3255 if (trapnr
== TARGET_NR_sigreturn
3256 || trapnr
== TARGET_NR_rt_sigreturn
) {
3259 /* Syscall writes 0 to V0 to bypass error check, similar
3260 to how this is handled internal to Linux kernel.
3261 (Ab)use trapnr temporarily as boolean indicating error. */
3262 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3263 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3264 env
->ir
[IR_A3
] = trapnr
;
3268 /* ??? We can probably elide the code using page_unprotect
3269 that is checking for self-modifying code. Instead we
3270 could simply call tb_flush here. Until we work out the
3271 changes required to turn off the extra write protection,
3272 this can be a no-op. */
3276 /* Handled in the translator for usermode. */
3280 /* Handled in the translator for usermode. */
3284 info
.si_signo
= TARGET_SIGFPE
;
3285 switch (env
->ir
[IR_A0
]) {
3286 case TARGET_GEN_INTOVF
:
3287 info
.si_code
= TARGET_FPE_INTOVF
;
3289 case TARGET_GEN_INTDIV
:
3290 info
.si_code
= TARGET_FPE_INTDIV
;
3292 case TARGET_GEN_FLTOVF
:
3293 info
.si_code
= TARGET_FPE_FLTOVF
;
3295 case TARGET_GEN_FLTUND
:
3296 info
.si_code
= TARGET_FPE_FLTUND
;
3298 case TARGET_GEN_FLTINV
:
3299 info
.si_code
= TARGET_FPE_FLTINV
;
3301 case TARGET_GEN_FLTINE
:
3302 info
.si_code
= TARGET_FPE_FLTRES
;
3304 case TARGET_GEN_ROPRAND
:
3308 info
.si_signo
= TARGET_SIGTRAP
;
3313 info
._sifields
._sigfault
._addr
= env
->pc
;
3314 queue_signal(env
, info
.si_signo
, &info
);
3321 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3322 if (info
.si_signo
) {
3323 env
->lock_addr
= -1;
3325 info
.si_code
= TARGET_TRAP_BRKPT
;
3326 queue_signal(env
, info
.si_signo
, &info
);
3331 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3333 case EXCP_INTERRUPT
:
3334 /* Just indicate that signals should be handled asap. */
3337 printf ("Unhandled trap: 0x%x\n", trapnr
);
3338 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3341 process_pending_signals (env
);
3344 #endif /* TARGET_ALPHA */
3347 void cpu_loop(CPUS390XState
*env
)
3349 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3351 target_siginfo_t info
;
3356 trapnr
= cpu_s390x_exec(cs
);
3359 case EXCP_INTERRUPT
:
3360 /* Just indicate that signals should be handled asap. */
3364 n
= env
->int_svc_code
;
3366 /* syscalls > 255 */
3369 env
->psw
.addr
+= env
->int_svc_ilen
;
3370 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3371 env
->regs
[4], env
->regs
[5],
3372 env
->regs
[6], env
->regs
[7], 0, 0);
3376 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3378 n
= TARGET_TRAP_BRKPT
;
3383 n
= env
->int_pgm_code
;
3386 case PGM_PRIVILEGED
:
3387 sig
= TARGET_SIGILL
;
3388 n
= TARGET_ILL_ILLOPC
;
3390 case PGM_PROTECTION
:
3391 case PGM_ADDRESSING
:
3392 sig
= TARGET_SIGSEGV
;
3393 /* XXX: check env->error_code */
3394 n
= TARGET_SEGV_MAPERR
;
3395 addr
= env
->__excp_addr
;
3398 case PGM_SPECIFICATION
:
3399 case PGM_SPECIAL_OP
:
3402 sig
= TARGET_SIGILL
;
3403 n
= TARGET_ILL_ILLOPN
;
3406 case PGM_FIXPT_OVERFLOW
:
3407 sig
= TARGET_SIGFPE
;
3408 n
= TARGET_FPE_INTOVF
;
3410 case PGM_FIXPT_DIVIDE
:
3411 sig
= TARGET_SIGFPE
;
3412 n
= TARGET_FPE_INTDIV
;
3416 n
= (env
->fpc
>> 8) & 0xff;
3418 /* compare-and-trap */
3421 /* An IEEE exception, simulated or otherwise. */
3423 n
= TARGET_FPE_FLTINV
;
3424 } else if (n
& 0x40) {
3425 n
= TARGET_FPE_FLTDIV
;
3426 } else if (n
& 0x20) {
3427 n
= TARGET_FPE_FLTOVF
;
3428 } else if (n
& 0x10) {
3429 n
= TARGET_FPE_FLTUND
;
3430 } else if (n
& 0x08) {
3431 n
= TARGET_FPE_FLTRES
;
3433 /* ??? Quantum exception; BFP, DFP error. */
3436 sig
= TARGET_SIGFPE
;
3441 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3442 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3448 addr
= env
->psw
.addr
;
3450 info
.si_signo
= sig
;
3453 info
._sifields
._sigfault
._addr
= addr
;
3454 queue_signal(env
, info
.si_signo
, &info
);
3458 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3459 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3462 process_pending_signals (env
);
3466 #endif /* TARGET_S390X */
3468 #ifdef TARGET_TILEGX
3470 static void gen_sigill_reg(CPUTLGState
*env
)
3472 target_siginfo_t info
;
3474 info
.si_signo
= TARGET_SIGILL
;
3476 info
.si_code
= TARGET_ILL_PRVREG
;
3477 info
._sifields
._sigfault
._addr
= env
->pc
;
3478 queue_signal(env
, info
.si_signo
, &info
);
3481 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3483 target_siginfo_t info
;
3485 info
.si_signo
= signo
;
3487 info
._sifields
._sigfault
._addr
= env
->pc
;
3489 if (signo
== TARGET_SIGSEGV
) {
3490 /* The passed in sigcode is a dummy; check for a page mapping
3491 and pass either MAPERR or ACCERR. */
3492 target_ulong addr
= env
->excaddr
;
3493 info
._sifields
._sigfault
._addr
= addr
;
3494 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3495 sigcode
= TARGET_SEGV_MAPERR
;
3497 sigcode
= TARGET_SEGV_ACCERR
;
3500 info
.si_code
= sigcode
;
3502 queue_signal(env
, info
.si_signo
, &info
);
3505 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3507 env
->excaddr
= addr
;
3508 do_signal(env
, TARGET_SIGSEGV
, 0);
3511 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3513 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3524 gen_sigill_reg(env
);
3527 g_assert_not_reached();
3530 env
->regs
[reg
] = val
;
3534 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3535 * memory at the address held in the first source register. If the values are
3536 * not equal, then no memory operation is performed. If the values are equal,
3537 * the 8-byte quantity from the second source register is written into memory
3538 * at the address held in the first source register. In either case, the result
3539 * of the instruction is the value read from memory. The compare and write to
3540 * memory are atomic and thus can be used for synchronization purposes. This
3541 * instruction only operates for addresses aligned to a 8-byte boundary.
3542 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3544 * Functional Description (64-bit)
3545 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3546 * rf[Dest] = memVal;
3547 * if (memVal == SPR[CmpValueSPR])
3548 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3550 * Functional Description (32-bit)
3551 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3552 * rf[Dest] = memVal;
3553 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3554 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3557 * This function also processes exch and exch4 which need not process SPR.
3559 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3562 target_long val
, sprval
;
3566 addr
= env
->atomic_srca
;
3567 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3568 goto sigsegv_maperr
;
3573 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3575 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3579 if (!cmp
|| val
== sprval
) {
3580 target_long valb
= env
->atomic_srcb
;
3581 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3582 goto sigsegv_maperr
;
3586 set_regval(env
, env
->atomic_dstr
, val
);
3592 gen_sigsegv_maperr(env
, addr
);
3595 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3599 target_long val
, valb
;
3603 addr
= env
->atomic_srca
;
3604 valb
= env
->atomic_srcb
;
3605 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3606 goto sigsegv_maperr
;
3610 case TILEGX_EXCP_OPCODE_FETCHADD
:
3611 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3614 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3620 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3622 if ((int32_t)valb
< 0) {
3626 case TILEGX_EXCP_OPCODE_FETCHAND
:
3627 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3630 case TILEGX_EXCP_OPCODE_FETCHOR
:
3631 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3635 g_assert_not_reached();
3639 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3640 goto sigsegv_maperr
;
3644 set_regval(env
, env
->atomic_dstr
, val
);
3650 gen_sigsegv_maperr(env
, addr
);
3653 void cpu_loop(CPUTLGState
*env
)
3655 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3660 trapnr
= cpu_tilegx_exec(cs
);
3663 case TILEGX_EXCP_SYSCALL
:
3664 env
->regs
[TILEGX_R_RE
] = do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3665 env
->regs
[0], env
->regs
[1],
3666 env
->regs
[2], env
->regs
[3],
3667 env
->regs
[4], env
->regs
[5],
3668 env
->regs
[6], env
->regs
[7]);
3669 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(env
->regs
[TILEGX_R_RE
])
3670 ? - env
->regs
[TILEGX_R_RE
]
3673 case TILEGX_EXCP_OPCODE_EXCH
:
3674 do_exch(env
, true, false);
3676 case TILEGX_EXCP_OPCODE_EXCH4
:
3677 do_exch(env
, false, false);
3679 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3680 do_exch(env
, true, true);
3682 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3683 do_exch(env
, false, true);
3685 case TILEGX_EXCP_OPCODE_FETCHADD
:
3686 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3687 case TILEGX_EXCP_OPCODE_FETCHAND
:
3688 case TILEGX_EXCP_OPCODE_FETCHOR
:
3689 do_fetch(env
, trapnr
, true);
3691 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3692 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3693 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3694 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3695 do_fetch(env
, trapnr
, false);
3697 case TILEGX_EXCP_SIGNAL
:
3698 do_signal(env
, env
->signo
, env
->sigcode
);
3700 case TILEGX_EXCP_REG_IDN_ACCESS
:
3701 case TILEGX_EXCP_REG_UDN_ACCESS
:
3702 gen_sigill_reg(env
);
3705 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3706 g_assert_not_reached();
3708 process_pending_signals(env
);
3714 THREAD CPUState
*thread_cpu
;
3716 void task_settid(TaskState
*ts
)
3718 if (ts
->ts_tid
== 0) {
3719 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3723 void stop_all_tasks(void)
3726 * We trust that when using NPTL, start_exclusive()
3727 * handles thread stopping correctly.
3732 /* Assumes contents are already zeroed. */
3733 void init_task_state(TaskState
*ts
)
3738 ts
->first_free
= ts
->sigqueue_table
;
3739 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3740 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3742 ts
->sigqueue_table
[i
].next
= NULL
;
3745 CPUArchState
*cpu_copy(CPUArchState
*env
)
3747 CPUState
*cpu
= ENV_GET_CPU(env
);
3748 CPUState
*new_cpu
= cpu_init(cpu_model
);
3749 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3753 /* Reset non arch specific state */
3756 memcpy(new_env
, env
, sizeof(CPUArchState
));
3758 /* Clone all break/watchpoints.
3759 Note: Once we support ptrace with hw-debug register access, make sure
3760 BP_CPU break/watchpoints are handled correctly on clone. */
3761 QTAILQ_INIT(&new_cpu
->breakpoints
);
3762 QTAILQ_INIT(&new_cpu
->watchpoints
);
3763 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3764 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3766 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3767 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3773 static void handle_arg_help(const char *arg
)
3775 usage(EXIT_SUCCESS
);
3778 static void handle_arg_log(const char *arg
)
3782 mask
= qemu_str_to_log_mask(arg
);
3784 qemu_print_log_usage(stdout
);
3787 qemu_log_needs_buffers();
3791 static void handle_arg_log_filename(const char *arg
)
3793 qemu_set_log_filename(arg
);
3796 static void handle_arg_set_env(const char *arg
)
3798 char *r
, *p
, *token
;
3799 r
= p
= strdup(arg
);
3800 while ((token
= strsep(&p
, ",")) != NULL
) {
3801 if (envlist_setenv(envlist
, token
) != 0) {
3802 usage(EXIT_FAILURE
);
3808 static void handle_arg_unset_env(const char *arg
)
3810 char *r
, *p
, *token
;
3811 r
= p
= strdup(arg
);
3812 while ((token
= strsep(&p
, ",")) != NULL
) {
3813 if (envlist_unsetenv(envlist
, token
) != 0) {
3814 usage(EXIT_FAILURE
);
3820 static void handle_arg_argv0(const char *arg
)
3822 argv0
= strdup(arg
);
3825 static void handle_arg_stack_size(const char *arg
)
3828 guest_stack_size
= strtoul(arg
, &p
, 0);
3829 if (guest_stack_size
== 0) {
3830 usage(EXIT_FAILURE
);
3834 guest_stack_size
*= 1024 * 1024;
3835 } else if (*p
== 'k' || *p
== 'K') {
3836 guest_stack_size
*= 1024;
3840 static void handle_arg_ld_prefix(const char *arg
)
3842 interp_prefix
= strdup(arg
);
3845 static void handle_arg_pagesize(const char *arg
)
3847 qemu_host_page_size
= atoi(arg
);
3848 if (qemu_host_page_size
== 0 ||
3849 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3850 fprintf(stderr
, "page size must be a power of two\n");
3855 static void handle_arg_randseed(const char *arg
)
3857 unsigned long long seed
;
3859 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3860 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3866 static void handle_arg_gdb(const char *arg
)
3868 gdbstub_port
= atoi(arg
);
3871 static void handle_arg_uname(const char *arg
)
3873 qemu_uname_release
= strdup(arg
);
3876 static void handle_arg_cpu(const char *arg
)
3878 cpu_model
= strdup(arg
);
3879 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3880 /* XXX: implement xxx_cpu_list for targets that still miss it */
3881 #if defined(cpu_list)
3882 cpu_list(stdout
, &fprintf
);
3888 static void handle_arg_guest_base(const char *arg
)
3890 guest_base
= strtol(arg
, NULL
, 0);
3891 have_guest_base
= 1;
3894 static void handle_arg_reserved_va(const char *arg
)
3898 reserved_va
= strtoul(arg
, &p
, 0);
3912 unsigned long unshifted
= reserved_va
;
3914 reserved_va
<<= shift
;
3915 if (((reserved_va
>> shift
) != unshifted
)
3916 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3917 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3920 fprintf(stderr
, "Reserved virtual address too big\n");
3925 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3930 static void handle_arg_singlestep(const char *arg
)
3935 static void handle_arg_strace(const char *arg
)
3940 static void handle_arg_version(const char *arg
)
3942 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3943 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3947 struct qemu_argument
{
3951 void (*handle_opt
)(const char *arg
);
3952 const char *example
;
3956 static const struct qemu_argument arg_table
[] = {
3957 {"h", "", false, handle_arg_help
,
3958 "", "print this help"},
3959 {"help", "", false, handle_arg_help
,
3961 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3962 "port", "wait gdb connection to 'port'"},
3963 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3964 "path", "set the elf interpreter prefix to 'path'"},
3965 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3966 "size", "set the stack size to 'size' bytes"},
3967 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3968 "model", "select CPU (-cpu help for list)"},
3969 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3970 "var=value", "sets targets environment variable (see below)"},
3971 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3972 "var", "unsets targets environment variable (see below)"},
3973 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3974 "argv0", "forces target process argv[0] to be 'argv0'"},
3975 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3976 "uname", "set qemu uname release string to 'uname'"},
3977 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3978 "address", "set guest_base address to 'address'"},
3979 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3980 "size", "reserve 'size' bytes for guest virtual address space"},
3981 {"d", "QEMU_LOG", true, handle_arg_log
,
3982 "item[,...]", "enable logging of specified items "
3983 "(use '-d help' for a list of items)"},
3984 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3985 "logfile", "write logs to 'logfile' (default stderr)"},
3986 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3987 "pagesize", "set the host page size to 'pagesize'"},
3988 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3989 "", "run in singlestep mode"},
3990 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3991 "", "log system calls"},
3992 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3993 "", "Seed for pseudo-random number generator"},
3994 {"version", "QEMU_VERSION", false, handle_arg_version
,
3995 "", "display version information and exit"},
3996 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3999 static void usage(int exitcode
)
4001 const struct qemu_argument
*arginfo
;
4005 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
4006 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
4008 "Options and associated environment variables:\n"
4011 /* Calculate column widths. We must always have at least enough space
4012 * for the column header.
4014 maxarglen
= strlen("Argument");
4015 maxenvlen
= strlen("Env-variable");
4017 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4018 int arglen
= strlen(arginfo
->argv
);
4019 if (arginfo
->has_arg
) {
4020 arglen
+= strlen(arginfo
->example
) + 1;
4022 if (strlen(arginfo
->env
) > maxenvlen
) {
4023 maxenvlen
= strlen(arginfo
->env
);
4025 if (arglen
> maxarglen
) {
4030 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4031 maxenvlen
, "Env-variable");
4033 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4034 if (arginfo
->has_arg
) {
4035 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4036 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4037 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4039 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4040 maxenvlen
, arginfo
->env
,
4047 "QEMU_LD_PREFIX = %s\n"
4048 "QEMU_STACK_SIZE = %ld byte\n",
4053 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4054 "QEMU_UNSET_ENV environment variables to set and unset\n"
4055 "environment variables for the target process.\n"
4056 "It is possible to provide several variables by separating them\n"
4057 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4058 "provide the -E and -U options multiple times.\n"
4059 "The following lines are equivalent:\n"
4060 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4061 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4062 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4063 "Note that if you provide several changes to a single variable\n"
4064 "the last change will stay in effect.\n");
4069 static int parse_args(int argc
, char **argv
)
4073 const struct qemu_argument
*arginfo
;
4075 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4076 if (arginfo
->env
== NULL
) {
4080 r
= getenv(arginfo
->env
);
4082 arginfo
->handle_opt(r
);
4088 if (optind
>= argc
) {
4097 if (!strcmp(r
, "-")) {
4100 /* Treat --foo the same as -foo. */
4105 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4106 if (!strcmp(r
, arginfo
->argv
)) {
4107 if (arginfo
->has_arg
) {
4108 if (optind
>= argc
) {
4109 (void) fprintf(stderr
,
4110 "qemu: missing argument for option '%s'\n", r
);
4113 arginfo
->handle_opt(argv
[optind
]);
4116 arginfo
->handle_opt(NULL
);
4122 /* no option matched the current argv */
4123 if (arginfo
->handle_opt
== NULL
) {
4124 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4129 if (optind
>= argc
) {
4130 (void) fprintf(stderr
, "qemu: no user program specified\n");
4134 filename
= argv
[optind
];
4135 exec_path
= argv
[optind
];
4140 int main(int argc
, char **argv
, char **envp
)
4142 struct target_pt_regs regs1
, *regs
= ®s1
;
4143 struct image_info info1
, *info
= &info1
;
4144 struct linux_binprm bprm
;
4149 char **target_environ
, **wrk
;
4156 module_call_init(MODULE_INIT_QOM
);
4158 if ((envlist
= envlist_create()) == NULL
) {
4159 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4163 /* add current environment into the list */
4164 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4165 (void) envlist_setenv(envlist
, *wrk
);
4168 /* Read the stack limit from the kernel. If it's "unlimited",
4169 then we can do little else besides use the default. */
4172 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4173 && lim
.rlim_cur
!= RLIM_INFINITY
4174 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4175 guest_stack_size
= lim
.rlim_cur
;
4183 optind
= parse_args(argc
, argv
);
4186 memset(regs
, 0, sizeof(struct target_pt_regs
));
4188 /* Zero out image_info */
4189 memset(info
, 0, sizeof(struct image_info
));
4191 memset(&bprm
, 0, sizeof (bprm
));
4193 /* Scan interp_prefix dir for replacement files. */
4194 init_paths(interp_prefix
);
4196 init_qemu_uname_release();
4198 if (cpu_model
== NULL
) {
4199 #if defined(TARGET_I386)
4200 #ifdef TARGET_X86_64
4201 cpu_model
= "qemu64";
4203 cpu_model
= "qemu32";
4205 #elif defined(TARGET_ARM)
4207 #elif defined(TARGET_UNICORE32)
4209 #elif defined(TARGET_M68K)
4211 #elif defined(TARGET_SPARC)
4212 #ifdef TARGET_SPARC64
4213 cpu_model
= "TI UltraSparc II";
4215 cpu_model
= "Fujitsu MB86904";
4217 #elif defined(TARGET_MIPS)
4218 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4223 #elif defined TARGET_OPENRISC
4224 cpu_model
= "or1200";
4225 #elif defined(TARGET_PPC)
4226 # ifdef TARGET_PPC64
4227 cpu_model
= "POWER8";
4231 #elif defined TARGET_SH4
4232 cpu_model
= TYPE_SH7785_CPU
;
4238 /* NOTE: we need to init the CPU at this stage to get
4239 qemu_host_page_size */
4240 cpu
= cpu_init(cpu_model
);
4242 fprintf(stderr
, "Unable to find CPU definition\n");
4250 if (getenv("QEMU_STRACE")) {
4254 if (getenv("QEMU_RAND_SEED")) {
4255 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4258 target_environ
= envlist_to_environ(envlist
, NULL
);
4259 envlist_free(envlist
);
4262 * Now that page sizes are configured in cpu_init() we can do
4263 * proper page alignment for guest_base.
4265 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4267 if (reserved_va
|| have_guest_base
) {
4268 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4270 if (guest_base
== (unsigned long)-1) {
4271 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4272 "space for use as guest address space (check your virtual "
4273 "memory ulimit setting or reserve less using -R option)\n",
4279 mmap_next_start
= reserved_va
;
4284 * Read in mmap_min_addr kernel parameter. This value is used
4285 * When loading the ELF image to determine whether guest_base
4286 * is needed. It is also used in mmap_find_vma.
4291 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4293 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4294 mmap_min_addr
= tmp
;
4295 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4302 * Prepare copy of argv vector for target.
4304 target_argc
= argc
- optind
;
4305 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4306 if (target_argv
== NULL
) {
4307 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4312 * If argv0 is specified (using '-0' switch) we replace
4313 * argv[0] pointer with the given one.
4316 if (argv0
!= NULL
) {
4317 target_argv
[i
++] = strdup(argv0
);
4319 for (; i
< target_argc
; i
++) {
4320 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4322 target_argv
[target_argc
] = NULL
;
4324 ts
= g_new0(TaskState
, 1);
4325 init_task_state(ts
);
4326 /* build Task State */
4332 execfd
= qemu_getauxval(AT_EXECFD
);
4334 execfd
= open(filename
, O_RDONLY
);
4336 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4337 _exit(EXIT_FAILURE
);
4341 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4344 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4345 _exit(EXIT_FAILURE
);
4348 for (wrk
= target_environ
; *wrk
; wrk
++) {
4352 free(target_environ
);
4354 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4355 qemu_log("guest_base 0x%lx\n", guest_base
);
4358 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4359 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4360 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4362 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4364 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4365 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4367 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4368 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4371 target_set_brk(info
->brk
);
4375 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4376 generating the prologue until now so that the prologue can take
4377 the real value of GUEST_BASE into account. */
4378 tcg_prologue_init(&tcg_ctx
);
4380 #if defined(TARGET_I386)
4381 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4382 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4383 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4384 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4385 env
->hflags
|= HF_OSFXSR_MASK
;
4387 #ifndef TARGET_ABI32
4388 /* enable 64 bit mode if possible */
4389 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4390 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4393 env
->cr
[4] |= CR4_PAE_MASK
;
4394 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4395 env
->hflags
|= HF_LMA_MASK
;
4398 /* flags setup : we activate the IRQs by default as in user mode */
4399 env
->eflags
|= IF_MASK
;
4401 /* linux register setup */
4402 #ifndef TARGET_ABI32
4403 env
->regs
[R_EAX
] = regs
->rax
;
4404 env
->regs
[R_EBX
] = regs
->rbx
;
4405 env
->regs
[R_ECX
] = regs
->rcx
;
4406 env
->regs
[R_EDX
] = regs
->rdx
;
4407 env
->regs
[R_ESI
] = regs
->rsi
;
4408 env
->regs
[R_EDI
] = regs
->rdi
;
4409 env
->regs
[R_EBP
] = regs
->rbp
;
4410 env
->regs
[R_ESP
] = regs
->rsp
;
4411 env
->eip
= regs
->rip
;
4413 env
->regs
[R_EAX
] = regs
->eax
;
4414 env
->regs
[R_EBX
] = regs
->ebx
;
4415 env
->regs
[R_ECX
] = regs
->ecx
;
4416 env
->regs
[R_EDX
] = regs
->edx
;
4417 env
->regs
[R_ESI
] = regs
->esi
;
4418 env
->regs
[R_EDI
] = regs
->edi
;
4419 env
->regs
[R_EBP
] = regs
->ebp
;
4420 env
->regs
[R_ESP
] = regs
->esp
;
4421 env
->eip
= regs
->eip
;
4424 /* linux interrupt setup */
4425 #ifndef TARGET_ABI32
4426 env
->idt
.limit
= 511;
4428 env
->idt
.limit
= 255;
4430 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4431 PROT_READ
|PROT_WRITE
,
4432 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4433 idt_table
= g2h(env
->idt
.base
);
4456 /* linux segment setup */
4458 uint64_t *gdt_table
;
4459 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4460 PROT_READ
|PROT_WRITE
,
4461 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4462 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4463 gdt_table
= g2h(env
->gdt
.base
);
4465 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4466 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4467 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4469 /* 64 bit code segment */
4470 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4471 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4473 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4475 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4476 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4477 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4479 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4480 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4482 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4483 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4484 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4485 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4486 /* This hack makes Wine work... */
4487 env
->segs
[R_FS
].selector
= 0;
4489 cpu_x86_load_seg(env
, R_DS
, 0);
4490 cpu_x86_load_seg(env
, R_ES
, 0);
4491 cpu_x86_load_seg(env
, R_FS
, 0);
4492 cpu_x86_load_seg(env
, R_GS
, 0);
4494 #elif defined(TARGET_AARCH64)
4498 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4500 "The selected ARM CPU does not support 64 bit mode\n");
4504 for (i
= 0; i
< 31; i
++) {
4505 env
->xregs
[i
] = regs
->regs
[i
];
4508 env
->xregs
[31] = regs
->sp
;
4510 #elif defined(TARGET_ARM)
4513 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4515 for(i
= 0; i
< 16; i
++) {
4516 env
->regs
[i
] = regs
->uregs
[i
];
4518 #ifdef TARGET_WORDS_BIGENDIAN
4520 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4521 && (info
->elf_flags
& EF_ARM_BE8
)) {
4522 env
->uncached_cpsr
|= CPSR_E
;
4523 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4525 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4529 #elif defined(TARGET_UNICORE32)
4532 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4533 for (i
= 0; i
< 32; i
++) {
4534 env
->regs
[i
] = regs
->uregs
[i
];
4537 #elif defined(TARGET_SPARC)
4541 env
->npc
= regs
->npc
;
4543 for(i
= 0; i
< 8; i
++)
4544 env
->gregs
[i
] = regs
->u_regs
[i
];
4545 for(i
= 0; i
< 8; i
++)
4546 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4548 #elif defined(TARGET_PPC)
4552 #if defined(TARGET_PPC64)
4553 #if defined(TARGET_ABI32)
4554 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4556 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4559 env
->nip
= regs
->nip
;
4560 for(i
= 0; i
< 32; i
++) {
4561 env
->gpr
[i
] = regs
->gpr
[i
];
4564 #elif defined(TARGET_M68K)
4567 env
->dregs
[0] = regs
->d0
;
4568 env
->dregs
[1] = regs
->d1
;
4569 env
->dregs
[2] = regs
->d2
;
4570 env
->dregs
[3] = regs
->d3
;
4571 env
->dregs
[4] = regs
->d4
;
4572 env
->dregs
[5] = regs
->d5
;
4573 env
->dregs
[6] = regs
->d6
;
4574 env
->dregs
[7] = regs
->d7
;
4575 env
->aregs
[0] = regs
->a0
;
4576 env
->aregs
[1] = regs
->a1
;
4577 env
->aregs
[2] = regs
->a2
;
4578 env
->aregs
[3] = regs
->a3
;
4579 env
->aregs
[4] = regs
->a4
;
4580 env
->aregs
[5] = regs
->a5
;
4581 env
->aregs
[6] = regs
->a6
;
4582 env
->aregs
[7] = regs
->usp
;
4584 ts
->sim_syscalls
= 1;
4586 #elif defined(TARGET_MICROBLAZE)
4588 env
->regs
[0] = regs
->r0
;
4589 env
->regs
[1] = regs
->r1
;
4590 env
->regs
[2] = regs
->r2
;
4591 env
->regs
[3] = regs
->r3
;
4592 env
->regs
[4] = regs
->r4
;
4593 env
->regs
[5] = regs
->r5
;
4594 env
->regs
[6] = regs
->r6
;
4595 env
->regs
[7] = regs
->r7
;
4596 env
->regs
[8] = regs
->r8
;
4597 env
->regs
[9] = regs
->r9
;
4598 env
->regs
[10] = regs
->r10
;
4599 env
->regs
[11] = regs
->r11
;
4600 env
->regs
[12] = regs
->r12
;
4601 env
->regs
[13] = regs
->r13
;
4602 env
->regs
[14] = regs
->r14
;
4603 env
->regs
[15] = regs
->r15
;
4604 env
->regs
[16] = regs
->r16
;
4605 env
->regs
[17] = regs
->r17
;
4606 env
->regs
[18] = regs
->r18
;
4607 env
->regs
[19] = regs
->r19
;
4608 env
->regs
[20] = regs
->r20
;
4609 env
->regs
[21] = regs
->r21
;
4610 env
->regs
[22] = regs
->r22
;
4611 env
->regs
[23] = regs
->r23
;
4612 env
->regs
[24] = regs
->r24
;
4613 env
->regs
[25] = regs
->r25
;
4614 env
->regs
[26] = regs
->r26
;
4615 env
->regs
[27] = regs
->r27
;
4616 env
->regs
[28] = regs
->r28
;
4617 env
->regs
[29] = regs
->r29
;
4618 env
->regs
[30] = regs
->r30
;
4619 env
->regs
[31] = regs
->r31
;
4620 env
->sregs
[SR_PC
] = regs
->pc
;
4622 #elif defined(TARGET_MIPS)
4626 for(i
= 0; i
< 32; i
++) {
4627 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4629 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4630 if (regs
->cp0_epc
& 1) {
4631 env
->hflags
|= MIPS_HFLAG_M16
;
4634 #elif defined(TARGET_OPENRISC)
4638 for (i
= 0; i
< 32; i
++) {
4639 env
->gpr
[i
] = regs
->gpr
[i
];
4645 #elif defined(TARGET_SH4)
4649 for(i
= 0; i
< 16; i
++) {
4650 env
->gregs
[i
] = regs
->regs
[i
];
4654 #elif defined(TARGET_ALPHA)
4658 for(i
= 0; i
< 28; i
++) {
4659 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4661 env
->ir
[IR_SP
] = regs
->usp
;
4664 #elif defined(TARGET_CRIS)
4666 env
->regs
[0] = regs
->r0
;
4667 env
->regs
[1] = regs
->r1
;
4668 env
->regs
[2] = regs
->r2
;
4669 env
->regs
[3] = regs
->r3
;
4670 env
->regs
[4] = regs
->r4
;
4671 env
->regs
[5] = regs
->r5
;
4672 env
->regs
[6] = regs
->r6
;
4673 env
->regs
[7] = regs
->r7
;
4674 env
->regs
[8] = regs
->r8
;
4675 env
->regs
[9] = regs
->r9
;
4676 env
->regs
[10] = regs
->r10
;
4677 env
->regs
[11] = regs
->r11
;
4678 env
->regs
[12] = regs
->r12
;
4679 env
->regs
[13] = regs
->r13
;
4680 env
->regs
[14] = info
->start_stack
;
4681 env
->regs
[15] = regs
->acr
;
4682 env
->pc
= regs
->erp
;
4684 #elif defined(TARGET_S390X)
4687 for (i
= 0; i
< 16; i
++) {
4688 env
->regs
[i
] = regs
->gprs
[i
];
4690 env
->psw
.mask
= regs
->psw
.mask
;
4691 env
->psw
.addr
= regs
->psw
.addr
;
4693 #elif defined(TARGET_TILEGX)
4696 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4697 env
->regs
[i
] = regs
->regs
[i
];
4699 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4705 #error unsupported target CPU
4708 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4709 ts
->stack_base
= info
->start_stack
;
4710 ts
->heap_base
= info
->brk
;
4711 /* This will be filled in on the first SYS_HEAPINFO call. */
4716 if (gdbserver_start(gdbstub_port
) < 0) {
4717 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4721 gdb_handlesig(cpu
, 0);