4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu/path.h"
26 #include "qemu/cutils.h"
27 #include "qemu/help_option.h"
29 #include "exec/exec-all.h"
31 #include "qemu/timer.h"
32 #include "qemu/envlist.h"
39 static const char *filename
;
40 static const char *argv0
;
41 static int gdbstub_port
;
42 static envlist_t
*envlist
;
43 static const char *cpu_model
;
44 unsigned long mmap_min_addr
;
45 unsigned long guest_base
;
48 #define EXCP_DUMP(env, fmt, ...) \
50 CPUState *cs = ENV_GET_CPU(env); \
51 fprintf(stderr, fmt , ## __VA_ARGS__); \
52 cpu_dump_state(cs, stderr, fprintf, 0); \
53 if (qemu_log_separate()) { \
54 qemu_log(fmt, ## __VA_ARGS__); \
55 log_cpu_state(cs, 0); \
59 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
61 * When running 32-on-64 we should make sure we can fit all of the possible
62 * guest address space into a contiguous chunk of virtual host memory.
64 * This way we will never overlap with our own libraries or binaries or stack
65 * or anything else that QEMU maps.
68 /* MIPS only supports 31 bits of virtual address space for user space */
69 unsigned long reserved_va
= 0x77000000;
71 unsigned long reserved_va
= 0xf7000000;
74 unsigned long reserved_va
;
77 static void usage(int exitcode
);
79 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
80 const char *qemu_uname_release
;
82 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
83 we allocate a bigger stack. Need a better solution, for example
84 by remapping the process stack directly at the right place */
85 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
87 void gemu_log(const char *fmt
, ...)
92 vfprintf(stderr
, fmt
, ap
);
96 #if defined(TARGET_I386)
97 int cpu_get_pic_interrupt(CPUX86State
*env
)
103 /***********************************************************/
104 /* Helper routines for implementing atomic operations. */
106 /* To implement exclusive operations we force all cpus to syncronise.
107 We don't require a full sync, only that no cpus are executing guest code.
108 The alternative is to map target atomic ops onto host equivalents,
109 which requires quite a lot of per host/target work. */
110 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
111 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
112 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
113 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
114 static int pending_cpus
;
116 /* Make sure everything is in a consistent state for calling fork(). */
117 void fork_start(void)
119 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
120 pthread_mutex_lock(&exclusive_lock
);
124 void fork_end(int child
)
126 mmap_fork_end(child
);
128 CPUState
*cpu
, *next_cpu
;
129 /* Child processes created by fork() only have a single thread.
130 Discard information about the parent threads. */
131 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
132 if (cpu
!= thread_cpu
) {
133 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
137 pthread_mutex_init(&exclusive_lock
, NULL
);
138 pthread_mutex_init(&cpu_list_mutex
, NULL
);
139 pthread_cond_init(&exclusive_cond
, NULL
);
140 pthread_cond_init(&exclusive_resume
, NULL
);
141 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
142 gdbserver_fork(thread_cpu
);
144 pthread_mutex_unlock(&exclusive_lock
);
145 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
149 /* Wait for pending exclusive operations to complete. The exclusive lock
151 static inline void exclusive_idle(void)
153 while (pending_cpus
) {
154 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
158 /* Start an exclusive operation.
159 Must only be called from outside cpu_arm_exec. */
160 static inline void start_exclusive(void)
164 pthread_mutex_lock(&exclusive_lock
);
168 /* Make all other cpus stop executing. */
169 CPU_FOREACH(other_cpu
) {
170 if (other_cpu
->running
) {
175 if (pending_cpus
> 1) {
176 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
180 /* Finish an exclusive operation. */
181 static inline void __attribute__((unused
)) end_exclusive(void)
184 pthread_cond_broadcast(&exclusive_resume
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Wait for exclusive ops to finish, and begin cpu execution. */
189 static inline void cpu_exec_start(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
194 pthread_mutex_unlock(&exclusive_lock
);
197 /* Mark cpu as not executing, and release pending exclusive ops. */
198 static inline void cpu_exec_end(CPUState
*cpu
)
200 pthread_mutex_lock(&exclusive_lock
);
201 cpu
->running
= false;
202 if (pending_cpus
> 1) {
204 if (pending_cpus
== 1) {
205 pthread_cond_signal(&exclusive_cond
);
209 pthread_mutex_unlock(&exclusive_lock
);
212 void cpu_list_lock(void)
214 pthread_mutex_lock(&cpu_list_mutex
);
217 void cpu_list_unlock(void)
219 pthread_mutex_unlock(&cpu_list_mutex
);
224 /***********************************************************/
225 /* CPUX86 core interface */
227 uint64_t cpu_get_tsc(CPUX86State
*env
)
229 return cpu_get_host_ticks();
232 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
237 e1
= (addr
<< 16) | (limit
& 0xffff);
238 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
245 static uint64_t *idt_table
;
247 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
248 uint64_t addr
, unsigned int sel
)
251 e1
= (addr
& 0xffff) | (sel
<< 16);
252 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
256 p
[2] = tswap32(addr
>> 32);
259 /* only dpl matters as we do only user space emulation */
260 static void set_idt(int n
, unsigned int dpl
)
262 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
265 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
266 uint32_t addr
, unsigned int sel
)
269 e1
= (addr
& 0xffff) | (sel
<< 16);
270 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
276 /* only dpl matters as we do only user space emulation */
277 static void set_idt(int n
, unsigned int dpl
)
279 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
283 void cpu_loop(CPUX86State
*env
)
285 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
288 target_siginfo_t info
;
292 trapnr
= cpu_x86_exec(cs
);
296 /* linux syscall from int $0x80 */
297 env
->regs
[R_EAX
] = do_syscall(env
,
309 /* linux syscall from syscall instruction */
310 env
->regs
[R_EAX
] = do_syscall(env
,
323 info
.si_signo
= TARGET_SIGBUS
;
325 info
.si_code
= TARGET_SI_KERNEL
;
326 info
._sifields
._sigfault
._addr
= 0;
327 queue_signal(env
, info
.si_signo
, &info
);
330 /* XXX: potential problem if ABI32 */
331 #ifndef TARGET_X86_64
332 if (env
->eflags
& VM_MASK
) {
333 handle_vm86_fault(env
);
337 info
.si_signo
= TARGET_SIGSEGV
;
339 info
.si_code
= TARGET_SI_KERNEL
;
340 info
._sifields
._sigfault
._addr
= 0;
341 queue_signal(env
, info
.si_signo
, &info
);
345 info
.si_signo
= TARGET_SIGSEGV
;
347 if (!(env
->error_code
& 1))
348 info
.si_code
= TARGET_SEGV_MAPERR
;
350 info
.si_code
= TARGET_SEGV_ACCERR
;
351 info
._sifields
._sigfault
._addr
= env
->cr
[2];
352 queue_signal(env
, info
.si_signo
, &info
);
355 #ifndef TARGET_X86_64
356 if (env
->eflags
& VM_MASK
) {
357 handle_vm86_trap(env
, trapnr
);
361 /* division by zero */
362 info
.si_signo
= TARGET_SIGFPE
;
364 info
.si_code
= TARGET_FPE_INTDIV
;
365 info
._sifields
._sigfault
._addr
= env
->eip
;
366 queue_signal(env
, info
.si_signo
, &info
);
371 #ifndef TARGET_X86_64
372 if (env
->eflags
& VM_MASK
) {
373 handle_vm86_trap(env
, trapnr
);
377 info
.si_signo
= TARGET_SIGTRAP
;
379 if (trapnr
== EXCP01_DB
) {
380 info
.si_code
= TARGET_TRAP_BRKPT
;
381 info
._sifields
._sigfault
._addr
= env
->eip
;
383 info
.si_code
= TARGET_SI_KERNEL
;
384 info
._sifields
._sigfault
._addr
= 0;
386 queue_signal(env
, info
.si_signo
, &info
);
391 #ifndef TARGET_X86_64
392 if (env
->eflags
& VM_MASK
) {
393 handle_vm86_trap(env
, trapnr
);
397 info
.si_signo
= TARGET_SIGSEGV
;
399 info
.si_code
= TARGET_SI_KERNEL
;
400 info
._sifields
._sigfault
._addr
= 0;
401 queue_signal(env
, info
.si_signo
, &info
);
405 info
.si_signo
= TARGET_SIGILL
;
407 info
.si_code
= TARGET_ILL_ILLOPN
;
408 info
._sifields
._sigfault
._addr
= env
->eip
;
409 queue_signal(env
, info
.si_signo
, &info
);
412 /* just indicate that signals should be handled asap */
418 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
423 info
.si_code
= TARGET_TRAP_BRKPT
;
424 queue_signal(env
, info
.si_signo
, &info
);
429 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
430 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
434 process_pending_signals(env
);
441 #define get_user_code_u32(x, gaddr, env) \
442 ({ abi_long __r = get_user_u32((x), (gaddr)); \
443 if (!__r && bswap_code(arm_sctlr_b(env))) { \
449 #define get_user_code_u16(x, gaddr, env) \
450 ({ abi_long __r = get_user_u16((x), (gaddr)); \
451 if (!__r && bswap_code(arm_sctlr_b(env))) { \
457 #define get_user_data_u32(x, gaddr, env) \
458 ({ abi_long __r = get_user_u32((x), (gaddr)); \
459 if (!__r && arm_cpu_bswap_data(env)) { \
465 #define get_user_data_u16(x, gaddr, env) \
466 ({ abi_long __r = get_user_u16((x), (gaddr)); \
467 if (!__r && arm_cpu_bswap_data(env)) { \
473 #define put_user_data_u32(x, gaddr, env) \
474 ({ typeof(x) __x = (x); \
475 if (arm_cpu_bswap_data(env)) { \
476 __x = bswap32(__x); \
478 put_user_u32(__x, (gaddr)); \
481 #define put_user_data_u16(x, gaddr, env) \
482 ({ typeof(x) __x = (x); \
483 if (arm_cpu_bswap_data(env)) { \
484 __x = bswap16(__x); \
486 put_user_u16(__x, (gaddr)); \
490 /* Commpage handling -- there is no commpage for AArch64 */
493 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
495 * r0 = pointer to oldval
496 * r1 = pointer to newval
497 * r2 = pointer to target value
500 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
501 * C set if *ptr was changed, clear if no exchange happened
503 * Note segv's in kernel helpers are a bit tricky, we can set the
504 * data address sensibly but the PC address is just the entry point.
506 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
508 uint64_t oldval
, newval
, val
;
510 target_siginfo_t info
;
512 /* Based on the 32 bit code in do_kernel_trap */
514 /* XXX: This only works between threads, not between processes.
515 It's probably possible to implement this with native host
516 operations. However things like ldrex/strex are much harder so
517 there's not much point trying. */
519 cpsr
= cpsr_read(env
);
522 if (get_user_u64(oldval
, env
->regs
[0])) {
523 env
->exception
.vaddress
= env
->regs
[0];
527 if (get_user_u64(newval
, env
->regs
[1])) {
528 env
->exception
.vaddress
= env
->regs
[1];
532 if (get_user_u64(val
, addr
)) {
533 env
->exception
.vaddress
= addr
;
540 if (put_user_u64(val
, addr
)) {
541 env
->exception
.vaddress
= addr
;
551 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
557 /* We get the PC of the entry address - which is as good as anything,
558 on a real kernel what you get depends on which mode it uses. */
559 info
.si_signo
= TARGET_SIGSEGV
;
561 /* XXX: check env->error_code */
562 info
.si_code
= TARGET_SEGV_MAPERR
;
563 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
564 queue_signal(env
, info
.si_signo
, &info
);
567 /* Handle a jump to the kernel code page. */
569 do_kernel_trap(CPUARMState
*env
)
575 switch (env
->regs
[15]) {
576 case 0xffff0fa0: /* __kernel_memory_barrier */
577 /* ??? No-op. Will need to do better for SMP. */
579 case 0xffff0fc0: /* __kernel_cmpxchg */
580 /* XXX: This only works between threads, not between processes.
581 It's probably possible to implement this with native host
582 operations. However things like ldrex/strex are much harder so
583 there's not much point trying. */
585 cpsr
= cpsr_read(env
);
587 /* FIXME: This should SEGV if the access fails. */
588 if (get_user_u32(val
, addr
))
590 if (val
== env
->regs
[0]) {
592 /* FIXME: Check for segfaults. */
593 put_user_u32(val
, addr
);
600 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
603 case 0xffff0fe0: /* __kernel_get_tls */
604 env
->regs
[0] = cpu_get_tls(env
);
606 case 0xffff0f60: /* __kernel_cmpxchg64 */
607 arm_kernel_cmpxchg64_helper(env
);
613 /* Jump back to the caller. */
614 addr
= env
->regs
[14];
619 env
->regs
[15] = addr
;
624 /* Store exclusive handling for AArch32 */
625 static int do_strex(CPUARMState
*env
)
633 if (env
->exclusive_addr
!= env
->exclusive_test
) {
636 /* We know we're always AArch32 so the address is in uint32_t range
637 * unless it was the -1 exclusive-monitor-lost value (which won't
638 * match exclusive_test above).
640 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
641 addr
= env
->exclusive_addr
;
642 size
= env
->exclusive_info
& 0xf;
645 segv
= get_user_u8(val
, addr
);
648 segv
= get_user_data_u16(val
, addr
, env
);
652 segv
= get_user_data_u32(val
, addr
, env
);
658 env
->exception
.vaddress
= addr
;
663 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
665 env
->exception
.vaddress
= addr
+ 4;
668 if (arm_cpu_bswap_data(env
)) {
669 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
671 val
= deposit64(val
, 32, 32, valhi
);
674 if (val
!= env
->exclusive_val
) {
678 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
681 segv
= put_user_u8(val
, addr
);
684 segv
= put_user_data_u16(val
, addr
, env
);
688 segv
= put_user_data_u32(val
, addr
, env
);
692 env
->exception
.vaddress
= addr
;
696 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
697 segv
= put_user_data_u32(val
, addr
+ 4, env
);
699 env
->exception
.vaddress
= addr
+ 4;
706 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
712 void cpu_loop(CPUARMState
*env
)
714 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
716 unsigned int n
, insn
;
717 target_siginfo_t info
;
722 trapnr
= cpu_arm_exec(cs
);
727 TaskState
*ts
= cs
->opaque
;
731 /* we handle the FPU emulation here, as Linux */
732 /* we get the opcode */
733 /* FIXME - what to do if get_user() fails? */
734 get_user_code_u32(opcode
, env
->regs
[15], env
);
736 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
737 if (rc
== 0) { /* illegal instruction */
738 info
.si_signo
= TARGET_SIGILL
;
740 info
.si_code
= TARGET_ILL_ILLOPN
;
741 info
._sifields
._sigfault
._addr
= env
->regs
[15];
742 queue_signal(env
, info
.si_signo
, &info
);
743 } else if (rc
< 0) { /* FP exception */
746 /* translate softfloat flags to FPSR flags */
747 if (-rc
& float_flag_invalid
)
749 if (-rc
& float_flag_divbyzero
)
751 if (-rc
& float_flag_overflow
)
753 if (-rc
& float_flag_underflow
)
755 if (-rc
& float_flag_inexact
)
758 FPSR fpsr
= ts
->fpa
.fpsr
;
759 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
761 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
762 info
.si_signo
= TARGET_SIGFPE
;
765 /* ordered by priority, least first */
766 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
767 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
768 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
769 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
770 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
772 info
._sifields
._sigfault
._addr
= env
->regs
[15];
773 queue_signal(env
, info
.si_signo
, &info
);
778 /* accumulate unenabled exceptions */
779 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
781 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
783 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
785 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
787 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
790 } else { /* everything OK */
801 if (trapnr
== EXCP_BKPT
) {
803 /* FIXME - what to do if get_user() fails? */
804 get_user_code_u16(insn
, env
->regs
[15], env
);
808 /* FIXME - what to do if get_user() fails? */
809 get_user_code_u32(insn
, env
->regs
[15], env
);
810 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
815 /* FIXME - what to do if get_user() fails? */
816 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
819 /* FIXME - what to do if get_user() fails? */
820 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
825 if (n
== ARM_NR_cacheflush
) {
827 } else if (n
== ARM_NR_semihosting
828 || n
== ARM_NR_thumb_semihosting
) {
829 env
->regs
[0] = do_arm_semihosting (env
);
830 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
832 if (env
->thumb
|| n
== 0) {
835 n
-= ARM_SYSCALL_BASE
;
838 if ( n
> ARM_NR_BASE
) {
840 case ARM_NR_cacheflush
:
844 cpu_set_tls(env
, env
->regs
[0]);
847 case ARM_NR_breakpoint
:
848 env
->regs
[15] -= env
->thumb
? 2 : 4;
851 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
853 env
->regs
[0] = -TARGET_ENOSYS
;
857 env
->regs
[0] = do_syscall(env
,
873 /* just indicate that signals should be handled asap */
876 if (!do_strex(env
)) {
879 /* fall through for segv */
880 case EXCP_PREFETCH_ABORT
:
881 case EXCP_DATA_ABORT
:
882 addr
= env
->exception
.vaddress
;
884 info
.si_signo
= TARGET_SIGSEGV
;
886 /* XXX: check env->error_code */
887 info
.si_code
= TARGET_SEGV_MAPERR
;
888 info
._sifields
._sigfault
._addr
= addr
;
889 queue_signal(env
, info
.si_signo
, &info
);
897 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
902 info
.si_code
= TARGET_TRAP_BRKPT
;
903 queue_signal(env
, info
.si_signo
, &info
);
907 case EXCP_KERNEL_TRAP
:
908 if (do_kernel_trap(env
))
912 /* nothing to do here for user-mode, just resume guest code */
916 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
919 process_pending_signals(env
);
926 * Handle AArch64 store-release exclusive
928 * rs = gets the status result of store exclusive
929 * rt = is the register that is stored
930 * rt2 = is the second register store (in STP)
933 static int do_strex_a64(CPUARMState
*env
)
944 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
945 size
= extract32(env
->exclusive_info
, 0, 2);
946 is_pair
= extract32(env
->exclusive_info
, 2, 1);
947 rs
= extract32(env
->exclusive_info
, 4, 5);
948 rt
= extract32(env
->exclusive_info
, 9, 5);
949 rt2
= extract32(env
->exclusive_info
, 14, 5);
951 addr
= env
->exclusive_addr
;
953 if (addr
!= env
->exclusive_test
) {
959 segv
= get_user_u8(val
, addr
);
962 segv
= get_user_u16(val
, addr
);
965 segv
= get_user_u32(val
, addr
);
968 segv
= get_user_u64(val
, addr
);
974 env
->exception
.vaddress
= addr
;
977 if (val
!= env
->exclusive_val
) {
982 segv
= get_user_u32(val
, addr
+ 4);
984 segv
= get_user_u64(val
, addr
+ 8);
987 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
990 if (val
!= env
->exclusive_high
) {
994 /* handle the zero register */
995 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
998 segv
= put_user_u8(val
, addr
);
1001 segv
= put_user_u16(val
, addr
);
1004 segv
= put_user_u32(val
, addr
);
1007 segv
= put_user_u64(val
, addr
);
1014 /* handle the zero register */
1015 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1017 segv
= put_user_u32(val
, addr
+ 4);
1019 segv
= put_user_u64(val
, addr
+ 8);
1022 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1029 /* rs == 31 encodes a write to the ZR, thus throwing away
1030 * the status return. This is rather silly but valid.
1033 env
->xregs
[rs
] = rc
;
1036 /* instruction faulted, PC does not advance */
1037 /* either way a strex releases any exclusive lock we have */
1038 env
->exclusive_addr
= -1;
1043 /* AArch64 main loop */
1044 void cpu_loop(CPUARMState
*env
)
1046 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1048 target_siginfo_t info
;
1052 trapnr
= cpu_arm_exec(cs
);
1057 env
->xregs
[0] = do_syscall(env
,
1067 case EXCP_INTERRUPT
:
1068 /* just indicate that signals should be handled asap */
1071 info
.si_signo
= TARGET_SIGILL
;
1073 info
.si_code
= TARGET_ILL_ILLOPN
;
1074 info
._sifields
._sigfault
._addr
= env
->pc
;
1075 queue_signal(env
, info
.si_signo
, &info
);
1078 if (!do_strex_a64(env
)) {
1081 /* fall through for segv */
1082 case EXCP_PREFETCH_ABORT
:
1083 case EXCP_DATA_ABORT
:
1084 info
.si_signo
= TARGET_SIGSEGV
;
1086 /* XXX: check env->error_code */
1087 info
.si_code
= TARGET_SEGV_MAPERR
;
1088 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1089 queue_signal(env
, info
.si_signo
, &info
);
1093 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1095 info
.si_signo
= sig
;
1097 info
.si_code
= TARGET_TRAP_BRKPT
;
1098 queue_signal(env
, info
.si_signo
, &info
);
1102 env
->xregs
[0] = do_arm_semihosting(env
);
1105 /* nothing to do here for user-mode, just resume guest code */
1108 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1111 process_pending_signals(env
);
1112 /* Exception return on AArch64 always clears the exclusive monitor,
1113 * so any return to running guest code implies this.
1114 * A strex (successful or otherwise) also clears the monitor, so
1115 * we don't need to specialcase EXCP_STREX.
1117 env
->exclusive_addr
= -1;
1120 #endif /* ndef TARGET_ABI32 */
1124 #ifdef TARGET_UNICORE32
1126 void cpu_loop(CPUUniCore32State
*env
)
1128 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1130 unsigned int n
, insn
;
1131 target_siginfo_t info
;
1135 trapnr
= uc32_cpu_exec(cs
);
1138 case UC32_EXCP_PRIV
:
1141 get_user_u32(insn
, env
->regs
[31] - 4);
1142 n
= insn
& 0xffffff;
1144 if (n
>= UC32_SYSCALL_BASE
) {
1146 n
-= UC32_SYSCALL_BASE
;
1147 if (n
== UC32_SYSCALL_NR_set_tls
) {
1148 cpu_set_tls(env
, env
->regs
[0]);
1151 env
->regs
[0] = do_syscall(env
,
1166 case UC32_EXCP_DTRAP
:
1167 case UC32_EXCP_ITRAP
:
1168 info
.si_signo
= TARGET_SIGSEGV
;
1170 /* XXX: check env->error_code */
1171 info
.si_code
= TARGET_SEGV_MAPERR
;
1172 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1173 queue_signal(env
, info
.si_signo
, &info
);
1175 case EXCP_INTERRUPT
:
1176 /* just indicate that signals should be handled asap */
1182 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1184 info
.si_signo
= sig
;
1186 info
.si_code
= TARGET_TRAP_BRKPT
;
1187 queue_signal(env
, info
.si_signo
, &info
);
1194 process_pending_signals(env
);
1198 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1204 #define SPARC64_STACK_BIAS 2047
1208 /* WARNING: dealing with register windows _is_ complicated. More info
1209 can be found at http://www.sics.se/~psm/sparcstack.html */
1210 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1212 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1213 /* wrap handling : if cwp is on the last window, then we use the
1214 registers 'after' the end */
1215 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1216 index
+= 16 * env
->nwindows
;
1220 /* save the register window 'cwp1' */
1221 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1226 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1227 #ifdef TARGET_SPARC64
1229 sp_ptr
+= SPARC64_STACK_BIAS
;
1231 #if defined(DEBUG_WIN)
1232 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1235 for(i
= 0; i
< 16; i
++) {
1236 /* FIXME - what to do if put_user() fails? */
1237 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1238 sp_ptr
+= sizeof(abi_ulong
);
1242 static void save_window(CPUSPARCState
*env
)
1244 #ifndef TARGET_SPARC64
1245 unsigned int new_wim
;
1246 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1247 ((1LL << env
->nwindows
) - 1);
1248 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1251 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1257 static void restore_window(CPUSPARCState
*env
)
1259 #ifndef TARGET_SPARC64
1260 unsigned int new_wim
;
1262 unsigned int i
, cwp1
;
1265 #ifndef TARGET_SPARC64
1266 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1267 ((1LL << env
->nwindows
) - 1);
1270 /* restore the invalid window */
1271 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1272 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1273 #ifdef TARGET_SPARC64
1275 sp_ptr
+= SPARC64_STACK_BIAS
;
1277 #if defined(DEBUG_WIN)
1278 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1281 for(i
= 0; i
< 16; i
++) {
1282 /* FIXME - what to do if get_user() fails? */
1283 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1284 sp_ptr
+= sizeof(abi_ulong
);
1286 #ifdef TARGET_SPARC64
1288 if (env
->cleanwin
< env
->nwindows
- 1)
1296 static void flush_windows(CPUSPARCState
*env
)
1302 /* if restore would invoke restore_window(), then we can stop */
1303 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1304 #ifndef TARGET_SPARC64
1305 if (env
->wim
& (1 << cwp1
))
1308 if (env
->canrestore
== 0)
1313 save_window_offset(env
, cwp1
);
1316 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1317 #ifndef TARGET_SPARC64
1318 /* set wim so that restore will reload the registers */
1319 env
->wim
= 1 << cwp1
;
1321 #if defined(DEBUG_WIN)
1322 printf("flush_windows: nb=%d\n", offset
- 1);
1326 void cpu_loop (CPUSPARCState
*env
)
1328 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1331 target_siginfo_t info
;
1335 trapnr
= cpu_sparc_exec(cs
);
1338 /* Compute PSR before exposing state. */
1339 if (env
->cc_op
!= CC_OP_FLAGS
) {
1344 #ifndef TARGET_SPARC64
1351 ret
= do_syscall (env
, env
->gregs
[1],
1352 env
->regwptr
[0], env
->regwptr
[1],
1353 env
->regwptr
[2], env
->regwptr
[3],
1354 env
->regwptr
[4], env
->regwptr
[5],
1356 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1357 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1358 env
->xcc
|= PSR_CARRY
;
1360 env
->psr
|= PSR_CARRY
;
1364 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1365 env
->xcc
&= ~PSR_CARRY
;
1367 env
->psr
&= ~PSR_CARRY
;
1370 env
->regwptr
[0] = ret
;
1371 /* next instruction */
1373 env
->npc
= env
->npc
+ 4;
1375 case 0x83: /* flush windows */
1380 /* next instruction */
1382 env
->npc
= env
->npc
+ 4;
1384 #ifndef TARGET_SPARC64
1385 case TT_WIN_OVF
: /* window overflow */
1388 case TT_WIN_UNF
: /* window underflow */
1389 restore_window(env
);
1394 info
.si_signo
= TARGET_SIGSEGV
;
1396 /* XXX: check env->error_code */
1397 info
.si_code
= TARGET_SEGV_MAPERR
;
1398 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1399 queue_signal(env
, info
.si_signo
, &info
);
1403 case TT_SPILL
: /* window overflow */
1406 case TT_FILL
: /* window underflow */
1407 restore_window(env
);
1412 info
.si_signo
= TARGET_SIGSEGV
;
1414 /* XXX: check env->error_code */
1415 info
.si_code
= TARGET_SEGV_MAPERR
;
1416 if (trapnr
== TT_DFAULT
)
1417 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1419 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1420 queue_signal(env
, info
.si_signo
, &info
);
1423 #ifndef TARGET_ABI32
1426 sparc64_get_context(env
);
1430 sparc64_set_context(env
);
1434 case EXCP_INTERRUPT
:
1435 /* just indicate that signals should be handled asap */
1439 info
.si_signo
= TARGET_SIGILL
;
1441 info
.si_code
= TARGET_ILL_ILLOPC
;
1442 info
._sifields
._sigfault
._addr
= env
->pc
;
1443 queue_signal(env
, info
.si_signo
, &info
);
1450 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1453 info
.si_signo
= sig
;
1455 info
.si_code
= TARGET_TRAP_BRKPT
;
1456 queue_signal(env
, info
.si_signo
, &info
);
1461 printf ("Unhandled trap: 0x%x\n", trapnr
);
1462 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1465 process_pending_signals (env
);
1472 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1474 return cpu_get_host_ticks();
1477 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1479 return cpu_ppc_get_tb(env
);
1482 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1484 return cpu_ppc_get_tb(env
) >> 32;
1487 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1489 return cpu_ppc_get_tb(env
);
1492 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1494 return cpu_ppc_get_tb(env
) >> 32;
1497 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1498 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1500 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1502 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1505 /* XXX: to be fixed */
1506 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1511 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1516 static int do_store_exclusive(CPUPPCState
*env
)
1519 target_ulong page_addr
;
1520 target_ulong val
, val2
__attribute__((unused
)) = 0;
1524 addr
= env
->reserve_ea
;
1525 page_addr
= addr
& TARGET_PAGE_MASK
;
1528 flags
= page_get_flags(page_addr
);
1529 if ((flags
& PAGE_READ
) == 0) {
1532 int reg
= env
->reserve_info
& 0x1f;
1533 int size
= env
->reserve_info
>> 5;
1536 if (addr
== env
->reserve_addr
) {
1538 case 1: segv
= get_user_u8(val
, addr
); break;
1539 case 2: segv
= get_user_u16(val
, addr
); break;
1540 case 4: segv
= get_user_u32(val
, addr
); break;
1541 #if defined(TARGET_PPC64)
1542 case 8: segv
= get_user_u64(val
, addr
); break;
1544 segv
= get_user_u64(val
, addr
);
1546 segv
= get_user_u64(val2
, addr
+ 8);
1553 if (!segv
&& val
== env
->reserve_val
) {
1554 val
= env
->gpr
[reg
];
1556 case 1: segv
= put_user_u8(val
, addr
); break;
1557 case 2: segv
= put_user_u16(val
, addr
); break;
1558 case 4: segv
= put_user_u32(val
, addr
); break;
1559 #if defined(TARGET_PPC64)
1560 case 8: segv
= put_user_u64(val
, addr
); break;
1562 if (val2
== env
->reserve_val2
) {
1565 val
= env
->gpr
[reg
+1];
1567 val2
= env
->gpr
[reg
+1];
1569 segv
= put_user_u64(val
, addr
);
1571 segv
= put_user_u64(val2
, addr
+ 8);
1584 env
->crf
[0] = (stored
<< 1) | xer_so
;
1585 env
->reserve_addr
= (target_ulong
)-1;
1595 void cpu_loop(CPUPPCState
*env
)
1597 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1598 target_siginfo_t info
;
1604 trapnr
= cpu_ppc_exec(cs
);
1607 case POWERPC_EXCP_NONE
:
1610 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1611 cpu_abort(cs
, "Critical interrupt while in user mode. "
1614 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1615 cpu_abort(cs
, "Machine check exception while in user mode. "
1618 case POWERPC_EXCP_DSI
: /* Data storage exception */
1619 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1621 /* XXX: check this. Seems bugged */
1622 switch (env
->error_code
& 0xFF000000) {
1624 info
.si_signo
= TARGET_SIGSEGV
;
1626 info
.si_code
= TARGET_SEGV_MAPERR
;
1629 info
.si_signo
= TARGET_SIGILL
;
1631 info
.si_code
= TARGET_ILL_ILLADR
;
1634 info
.si_signo
= TARGET_SIGSEGV
;
1636 info
.si_code
= TARGET_SEGV_ACCERR
;
1639 /* Let's send a regular segfault... */
1640 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1642 info
.si_signo
= TARGET_SIGSEGV
;
1644 info
.si_code
= TARGET_SEGV_MAPERR
;
1647 info
._sifields
._sigfault
._addr
= env
->nip
;
1648 queue_signal(env
, info
.si_signo
, &info
);
1650 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1651 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1652 "\n", env
->spr
[SPR_SRR0
]);
1653 /* XXX: check this */
1654 switch (env
->error_code
& 0xFF000000) {
1656 info
.si_signo
= TARGET_SIGSEGV
;
1658 info
.si_code
= TARGET_SEGV_MAPERR
;
1662 info
.si_signo
= TARGET_SIGSEGV
;
1664 info
.si_code
= TARGET_SEGV_ACCERR
;
1667 /* Let's send a regular segfault... */
1668 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1670 info
.si_signo
= TARGET_SIGSEGV
;
1672 info
.si_code
= TARGET_SEGV_MAPERR
;
1675 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1676 queue_signal(env
, info
.si_signo
, &info
);
1678 case POWERPC_EXCP_EXTERNAL
: /* External input */
1679 cpu_abort(cs
, "External interrupt while in user mode. "
1682 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1683 EXCP_DUMP(env
, "Unaligned memory access\n");
1684 /* XXX: check this */
1685 info
.si_signo
= TARGET_SIGBUS
;
1687 info
.si_code
= TARGET_BUS_ADRALN
;
1688 info
._sifields
._sigfault
._addr
= env
->nip
;
1689 queue_signal(env
, info
.si_signo
, &info
);
1691 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1692 /* XXX: check this */
1693 switch (env
->error_code
& ~0xF) {
1694 case POWERPC_EXCP_FP
:
1695 EXCP_DUMP(env
, "Floating point program exception\n");
1696 info
.si_signo
= TARGET_SIGFPE
;
1698 switch (env
->error_code
& 0xF) {
1699 case POWERPC_EXCP_FP_OX
:
1700 info
.si_code
= TARGET_FPE_FLTOVF
;
1702 case POWERPC_EXCP_FP_UX
:
1703 info
.si_code
= TARGET_FPE_FLTUND
;
1705 case POWERPC_EXCP_FP_ZX
:
1706 case POWERPC_EXCP_FP_VXZDZ
:
1707 info
.si_code
= TARGET_FPE_FLTDIV
;
1709 case POWERPC_EXCP_FP_XX
:
1710 info
.si_code
= TARGET_FPE_FLTRES
;
1712 case POWERPC_EXCP_FP_VXSOFT
:
1713 info
.si_code
= TARGET_FPE_FLTINV
;
1715 case POWERPC_EXCP_FP_VXSNAN
:
1716 case POWERPC_EXCP_FP_VXISI
:
1717 case POWERPC_EXCP_FP_VXIDI
:
1718 case POWERPC_EXCP_FP_VXIMZ
:
1719 case POWERPC_EXCP_FP_VXVC
:
1720 case POWERPC_EXCP_FP_VXSQRT
:
1721 case POWERPC_EXCP_FP_VXCVI
:
1722 info
.si_code
= TARGET_FPE_FLTSUB
;
1725 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1730 case POWERPC_EXCP_INVAL
:
1731 EXCP_DUMP(env
, "Invalid instruction\n");
1732 info
.si_signo
= TARGET_SIGILL
;
1734 switch (env
->error_code
& 0xF) {
1735 case POWERPC_EXCP_INVAL_INVAL
:
1736 info
.si_code
= TARGET_ILL_ILLOPC
;
1738 case POWERPC_EXCP_INVAL_LSWX
:
1739 info
.si_code
= TARGET_ILL_ILLOPN
;
1741 case POWERPC_EXCP_INVAL_SPR
:
1742 info
.si_code
= TARGET_ILL_PRVREG
;
1744 case POWERPC_EXCP_INVAL_FP
:
1745 info
.si_code
= TARGET_ILL_COPROC
;
1748 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1749 env
->error_code
& 0xF);
1750 info
.si_code
= TARGET_ILL_ILLADR
;
1754 case POWERPC_EXCP_PRIV
:
1755 EXCP_DUMP(env
, "Privilege violation\n");
1756 info
.si_signo
= TARGET_SIGILL
;
1758 switch (env
->error_code
& 0xF) {
1759 case POWERPC_EXCP_PRIV_OPC
:
1760 info
.si_code
= TARGET_ILL_PRVOPC
;
1762 case POWERPC_EXCP_PRIV_REG
:
1763 info
.si_code
= TARGET_ILL_PRVREG
;
1766 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1767 env
->error_code
& 0xF);
1768 info
.si_code
= TARGET_ILL_PRVOPC
;
1772 case POWERPC_EXCP_TRAP
:
1773 cpu_abort(cs
, "Tried to call a TRAP\n");
1776 /* Should not happen ! */
1777 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1781 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1782 queue_signal(env
, info
.si_signo
, &info
);
1784 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1785 EXCP_DUMP(env
, "No floating point allowed\n");
1786 info
.si_signo
= TARGET_SIGILL
;
1788 info
.si_code
= TARGET_ILL_COPROC
;
1789 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1790 queue_signal(env
, info
.si_signo
, &info
);
1792 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1793 cpu_abort(cs
, "Syscall exception while in user mode. "
1796 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1797 EXCP_DUMP(env
, "No APU instruction allowed\n");
1798 info
.si_signo
= TARGET_SIGILL
;
1800 info
.si_code
= TARGET_ILL_COPROC
;
1801 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1802 queue_signal(env
, info
.si_signo
, &info
);
1804 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1805 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1808 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1809 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1812 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1813 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1816 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1817 cpu_abort(cs
, "Data TLB exception while in user mode. "
1820 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1821 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1824 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1825 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1826 info
.si_signo
= TARGET_SIGILL
;
1828 info
.si_code
= TARGET_ILL_COPROC
;
1829 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1830 queue_signal(env
, info
.si_signo
, &info
);
1832 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1833 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1835 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1836 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1838 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1839 cpu_abort(cs
, "Performance monitor exception not handled\n");
1841 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1842 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1845 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1846 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1849 case POWERPC_EXCP_RESET
: /* System reset exception */
1850 cpu_abort(cs
, "Reset interrupt while in user mode. "
1853 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1854 cpu_abort(cs
, "Data segment exception while in user mode. "
1857 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1858 cpu_abort(cs
, "Instruction segment exception "
1859 "while in user mode. Aborting\n");
1861 /* PowerPC 64 with hypervisor mode support */
1862 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1863 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1864 "while in user mode. Aborting\n");
1866 case POWERPC_EXCP_TRACE
: /* Trace exception */
1868 * we use this exception to emulate step-by-step execution mode.
1871 /* PowerPC 64 with hypervisor mode support */
1872 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1873 cpu_abort(cs
, "Hypervisor data storage exception "
1874 "while in user mode. Aborting\n");
1876 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1877 cpu_abort(cs
, "Hypervisor instruction storage exception "
1878 "while in user mode. Aborting\n");
1880 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1881 cpu_abort(cs
, "Hypervisor data segment exception "
1882 "while in user mode. Aborting\n");
1884 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1885 cpu_abort(cs
, "Hypervisor instruction segment exception "
1886 "while in user mode. Aborting\n");
1888 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1889 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1890 info
.si_signo
= TARGET_SIGILL
;
1892 info
.si_code
= TARGET_ILL_COPROC
;
1893 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1894 queue_signal(env
, info
.si_signo
, &info
);
1896 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1897 cpu_abort(cs
, "Programmable interval timer interrupt "
1898 "while in user mode. Aborting\n");
1900 case POWERPC_EXCP_IO
: /* IO error exception */
1901 cpu_abort(cs
, "IO error exception while in user mode. "
1904 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1905 cpu_abort(cs
, "Run mode exception while in user mode. "
1908 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1909 cpu_abort(cs
, "Emulation trap exception not handled\n");
1911 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1912 cpu_abort(cs
, "Instruction fetch TLB exception "
1913 "while in user-mode. Aborting");
1915 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1916 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1919 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1920 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1923 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1924 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1926 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1927 cpu_abort(cs
, "Instruction address breakpoint exception "
1930 case POWERPC_EXCP_SMI
: /* System management interrupt */
1931 cpu_abort(cs
, "System management interrupt while in user mode. "
1934 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1935 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1938 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1939 cpu_abort(cs
, "Performance monitor exception not handled\n");
1941 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1942 cpu_abort(cs
, "Vector assist exception not handled\n");
1944 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1945 cpu_abort(cs
, "Soft patch exception not handled\n");
1947 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1948 cpu_abort(cs
, "Maintenance exception while in user mode. "
1951 case POWERPC_EXCP_STOP
: /* stop translation */
1952 /* We did invalidate the instruction cache. Go on */
1954 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1955 /* We just stopped because of a branch. Go on */
1957 case POWERPC_EXCP_SYSCALL_USER
:
1958 /* system call in user-mode emulation */
1960 * PPC ABI uses overflow flag in cr0 to signal an error
1963 env
->crf
[0] &= ~0x1;
1964 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1965 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1967 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1968 /* Returning from a successful sigreturn syscall.
1969 Avoid corrupting register state. */
1972 if (ret
> (target_ulong
)(-515)) {
1978 case POWERPC_EXCP_STCX
:
1979 if (do_store_exclusive(env
)) {
1980 info
.si_signo
= TARGET_SIGSEGV
;
1982 info
.si_code
= TARGET_SEGV_MAPERR
;
1983 info
._sifields
._sigfault
._addr
= env
->nip
;
1984 queue_signal(env
, info
.si_signo
, &info
);
1991 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1993 info
.si_signo
= sig
;
1995 info
.si_code
= TARGET_TRAP_BRKPT
;
1996 queue_signal(env
, info
.si_signo
, &info
);
2000 case EXCP_INTERRUPT
:
2001 /* just indicate that signals should be handled asap */
2004 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2007 process_pending_signals(env
);
2014 # ifdef TARGET_ABI_MIPSO32
2015 # define MIPS_SYS(name, args) args,
2016 static const uint8_t mips_syscall_args
[] = {
2017 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2018 MIPS_SYS(sys_exit
, 1)
2019 MIPS_SYS(sys_fork
, 0)
2020 MIPS_SYS(sys_read
, 3)
2021 MIPS_SYS(sys_write
, 3)
2022 MIPS_SYS(sys_open
, 3) /* 4005 */
2023 MIPS_SYS(sys_close
, 1)
2024 MIPS_SYS(sys_waitpid
, 3)
2025 MIPS_SYS(sys_creat
, 2)
2026 MIPS_SYS(sys_link
, 2)
2027 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2028 MIPS_SYS(sys_execve
, 0)
2029 MIPS_SYS(sys_chdir
, 1)
2030 MIPS_SYS(sys_time
, 1)
2031 MIPS_SYS(sys_mknod
, 3)
2032 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2033 MIPS_SYS(sys_lchown
, 3)
2034 MIPS_SYS(sys_ni_syscall
, 0)
2035 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2036 MIPS_SYS(sys_lseek
, 3)
2037 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2038 MIPS_SYS(sys_mount
, 5)
2039 MIPS_SYS(sys_umount
, 1)
2040 MIPS_SYS(sys_setuid
, 1)
2041 MIPS_SYS(sys_getuid
, 0)
2042 MIPS_SYS(sys_stime
, 1) /* 4025 */
2043 MIPS_SYS(sys_ptrace
, 4)
2044 MIPS_SYS(sys_alarm
, 1)
2045 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2046 MIPS_SYS(sys_pause
, 0)
2047 MIPS_SYS(sys_utime
, 2) /* 4030 */
2048 MIPS_SYS(sys_ni_syscall
, 0)
2049 MIPS_SYS(sys_ni_syscall
, 0)
2050 MIPS_SYS(sys_access
, 2)
2051 MIPS_SYS(sys_nice
, 1)
2052 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2053 MIPS_SYS(sys_sync
, 0)
2054 MIPS_SYS(sys_kill
, 2)
2055 MIPS_SYS(sys_rename
, 2)
2056 MIPS_SYS(sys_mkdir
, 2)
2057 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2058 MIPS_SYS(sys_dup
, 1)
2059 MIPS_SYS(sys_pipe
, 0)
2060 MIPS_SYS(sys_times
, 1)
2061 MIPS_SYS(sys_ni_syscall
, 0)
2062 MIPS_SYS(sys_brk
, 1) /* 4045 */
2063 MIPS_SYS(sys_setgid
, 1)
2064 MIPS_SYS(sys_getgid
, 0)
2065 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2066 MIPS_SYS(sys_geteuid
, 0)
2067 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2068 MIPS_SYS(sys_acct
, 0)
2069 MIPS_SYS(sys_umount2
, 2)
2070 MIPS_SYS(sys_ni_syscall
, 0)
2071 MIPS_SYS(sys_ioctl
, 3)
2072 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2073 MIPS_SYS(sys_ni_syscall
, 2)
2074 MIPS_SYS(sys_setpgid
, 2)
2075 MIPS_SYS(sys_ni_syscall
, 0)
2076 MIPS_SYS(sys_olduname
, 1)
2077 MIPS_SYS(sys_umask
, 1) /* 4060 */
2078 MIPS_SYS(sys_chroot
, 1)
2079 MIPS_SYS(sys_ustat
, 2)
2080 MIPS_SYS(sys_dup2
, 2)
2081 MIPS_SYS(sys_getppid
, 0)
2082 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2083 MIPS_SYS(sys_setsid
, 0)
2084 MIPS_SYS(sys_sigaction
, 3)
2085 MIPS_SYS(sys_sgetmask
, 0)
2086 MIPS_SYS(sys_ssetmask
, 1)
2087 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2088 MIPS_SYS(sys_setregid
, 2)
2089 MIPS_SYS(sys_sigsuspend
, 0)
2090 MIPS_SYS(sys_sigpending
, 1)
2091 MIPS_SYS(sys_sethostname
, 2)
2092 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2093 MIPS_SYS(sys_getrlimit
, 2)
2094 MIPS_SYS(sys_getrusage
, 2)
2095 MIPS_SYS(sys_gettimeofday
, 2)
2096 MIPS_SYS(sys_settimeofday
, 2)
2097 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2098 MIPS_SYS(sys_setgroups
, 2)
2099 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2100 MIPS_SYS(sys_symlink
, 2)
2101 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2102 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2103 MIPS_SYS(sys_uselib
, 1)
2104 MIPS_SYS(sys_swapon
, 2)
2105 MIPS_SYS(sys_reboot
, 3)
2106 MIPS_SYS(old_readdir
, 3)
2107 MIPS_SYS(old_mmap
, 6) /* 4090 */
2108 MIPS_SYS(sys_munmap
, 2)
2109 MIPS_SYS(sys_truncate
, 2)
2110 MIPS_SYS(sys_ftruncate
, 2)
2111 MIPS_SYS(sys_fchmod
, 2)
2112 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2113 MIPS_SYS(sys_getpriority
, 2)
2114 MIPS_SYS(sys_setpriority
, 3)
2115 MIPS_SYS(sys_ni_syscall
, 0)
2116 MIPS_SYS(sys_statfs
, 2)
2117 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2118 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2119 MIPS_SYS(sys_socketcall
, 2)
2120 MIPS_SYS(sys_syslog
, 3)
2121 MIPS_SYS(sys_setitimer
, 3)
2122 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2123 MIPS_SYS(sys_newstat
, 2)
2124 MIPS_SYS(sys_newlstat
, 2)
2125 MIPS_SYS(sys_newfstat
, 2)
2126 MIPS_SYS(sys_uname
, 1)
2127 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2128 MIPS_SYS(sys_vhangup
, 0)
2129 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2130 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2131 MIPS_SYS(sys_wait4
, 4)
2132 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2133 MIPS_SYS(sys_sysinfo
, 1)
2134 MIPS_SYS(sys_ipc
, 6)
2135 MIPS_SYS(sys_fsync
, 1)
2136 MIPS_SYS(sys_sigreturn
, 0)
2137 MIPS_SYS(sys_clone
, 6) /* 4120 */
2138 MIPS_SYS(sys_setdomainname
, 2)
2139 MIPS_SYS(sys_newuname
, 1)
2140 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2141 MIPS_SYS(sys_adjtimex
, 1)
2142 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2143 MIPS_SYS(sys_sigprocmask
, 3)
2144 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2145 MIPS_SYS(sys_init_module
, 5)
2146 MIPS_SYS(sys_delete_module
, 1)
2147 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2148 MIPS_SYS(sys_quotactl
, 0)
2149 MIPS_SYS(sys_getpgid
, 1)
2150 MIPS_SYS(sys_fchdir
, 1)
2151 MIPS_SYS(sys_bdflush
, 2)
2152 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2153 MIPS_SYS(sys_personality
, 1)
2154 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2155 MIPS_SYS(sys_setfsuid
, 1)
2156 MIPS_SYS(sys_setfsgid
, 1)
2157 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2158 MIPS_SYS(sys_getdents
, 3)
2159 MIPS_SYS(sys_select
, 5)
2160 MIPS_SYS(sys_flock
, 2)
2161 MIPS_SYS(sys_msync
, 3)
2162 MIPS_SYS(sys_readv
, 3) /* 4145 */
2163 MIPS_SYS(sys_writev
, 3)
2164 MIPS_SYS(sys_cacheflush
, 3)
2165 MIPS_SYS(sys_cachectl
, 3)
2166 MIPS_SYS(sys_sysmips
, 4)
2167 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2168 MIPS_SYS(sys_getsid
, 1)
2169 MIPS_SYS(sys_fdatasync
, 0)
2170 MIPS_SYS(sys_sysctl
, 1)
2171 MIPS_SYS(sys_mlock
, 2)
2172 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2173 MIPS_SYS(sys_mlockall
, 1)
2174 MIPS_SYS(sys_munlockall
, 0)
2175 MIPS_SYS(sys_sched_setparam
, 2)
2176 MIPS_SYS(sys_sched_getparam
, 2)
2177 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2178 MIPS_SYS(sys_sched_getscheduler
, 1)
2179 MIPS_SYS(sys_sched_yield
, 0)
2180 MIPS_SYS(sys_sched_get_priority_max
, 1)
2181 MIPS_SYS(sys_sched_get_priority_min
, 1)
2182 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2183 MIPS_SYS(sys_nanosleep
, 2)
2184 MIPS_SYS(sys_mremap
, 5)
2185 MIPS_SYS(sys_accept
, 3)
2186 MIPS_SYS(sys_bind
, 3)
2187 MIPS_SYS(sys_connect
, 3) /* 4170 */
2188 MIPS_SYS(sys_getpeername
, 3)
2189 MIPS_SYS(sys_getsockname
, 3)
2190 MIPS_SYS(sys_getsockopt
, 5)
2191 MIPS_SYS(sys_listen
, 2)
2192 MIPS_SYS(sys_recv
, 4) /* 4175 */
2193 MIPS_SYS(sys_recvfrom
, 6)
2194 MIPS_SYS(sys_recvmsg
, 3)
2195 MIPS_SYS(sys_send
, 4)
2196 MIPS_SYS(sys_sendmsg
, 3)
2197 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2198 MIPS_SYS(sys_setsockopt
, 5)
2199 MIPS_SYS(sys_shutdown
, 2)
2200 MIPS_SYS(sys_socket
, 3)
2201 MIPS_SYS(sys_socketpair
, 4)
2202 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2203 MIPS_SYS(sys_getresuid
, 3)
2204 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2205 MIPS_SYS(sys_poll
, 3)
2206 MIPS_SYS(sys_nfsservctl
, 3)
2207 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2208 MIPS_SYS(sys_getresgid
, 3)
2209 MIPS_SYS(sys_prctl
, 5)
2210 MIPS_SYS(sys_rt_sigreturn
, 0)
2211 MIPS_SYS(sys_rt_sigaction
, 4)
2212 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2213 MIPS_SYS(sys_rt_sigpending
, 2)
2214 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2215 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2216 MIPS_SYS(sys_rt_sigsuspend
, 0)
2217 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2218 MIPS_SYS(sys_pwrite64
, 6)
2219 MIPS_SYS(sys_chown
, 3)
2220 MIPS_SYS(sys_getcwd
, 2)
2221 MIPS_SYS(sys_capget
, 2)
2222 MIPS_SYS(sys_capset
, 2) /* 4205 */
2223 MIPS_SYS(sys_sigaltstack
, 2)
2224 MIPS_SYS(sys_sendfile
, 4)
2225 MIPS_SYS(sys_ni_syscall
, 0)
2226 MIPS_SYS(sys_ni_syscall
, 0)
2227 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2228 MIPS_SYS(sys_truncate64
, 4)
2229 MIPS_SYS(sys_ftruncate64
, 4)
2230 MIPS_SYS(sys_stat64
, 2)
2231 MIPS_SYS(sys_lstat64
, 2)
2232 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2233 MIPS_SYS(sys_pivot_root
, 2)
2234 MIPS_SYS(sys_mincore
, 3)
2235 MIPS_SYS(sys_madvise
, 3)
2236 MIPS_SYS(sys_getdents64
, 3)
2237 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2238 MIPS_SYS(sys_ni_syscall
, 0)
2239 MIPS_SYS(sys_gettid
, 0)
2240 MIPS_SYS(sys_readahead
, 5)
2241 MIPS_SYS(sys_setxattr
, 5)
2242 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2243 MIPS_SYS(sys_fsetxattr
, 5)
2244 MIPS_SYS(sys_getxattr
, 4)
2245 MIPS_SYS(sys_lgetxattr
, 4)
2246 MIPS_SYS(sys_fgetxattr
, 4)
2247 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2248 MIPS_SYS(sys_llistxattr
, 3)
2249 MIPS_SYS(sys_flistxattr
, 3)
2250 MIPS_SYS(sys_removexattr
, 2)
2251 MIPS_SYS(sys_lremovexattr
, 2)
2252 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2253 MIPS_SYS(sys_tkill
, 2)
2254 MIPS_SYS(sys_sendfile64
, 5)
2255 MIPS_SYS(sys_futex
, 6)
2256 MIPS_SYS(sys_sched_setaffinity
, 3)
2257 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2258 MIPS_SYS(sys_io_setup
, 2)
2259 MIPS_SYS(sys_io_destroy
, 1)
2260 MIPS_SYS(sys_io_getevents
, 5)
2261 MIPS_SYS(sys_io_submit
, 3)
2262 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2263 MIPS_SYS(sys_exit_group
, 1)
2264 MIPS_SYS(sys_lookup_dcookie
, 3)
2265 MIPS_SYS(sys_epoll_create
, 1)
2266 MIPS_SYS(sys_epoll_ctl
, 4)
2267 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2268 MIPS_SYS(sys_remap_file_pages
, 5)
2269 MIPS_SYS(sys_set_tid_address
, 1)
2270 MIPS_SYS(sys_restart_syscall
, 0)
2271 MIPS_SYS(sys_fadvise64_64
, 7)
2272 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2273 MIPS_SYS(sys_fstatfs64
, 2)
2274 MIPS_SYS(sys_timer_create
, 3)
2275 MIPS_SYS(sys_timer_settime
, 4)
2276 MIPS_SYS(sys_timer_gettime
, 2)
2277 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2278 MIPS_SYS(sys_timer_delete
, 1)
2279 MIPS_SYS(sys_clock_settime
, 2)
2280 MIPS_SYS(sys_clock_gettime
, 2)
2281 MIPS_SYS(sys_clock_getres
, 2)
2282 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2283 MIPS_SYS(sys_tgkill
, 3)
2284 MIPS_SYS(sys_utimes
, 2)
2285 MIPS_SYS(sys_mbind
, 4)
2286 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2287 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2288 MIPS_SYS(sys_mq_open
, 4)
2289 MIPS_SYS(sys_mq_unlink
, 1)
2290 MIPS_SYS(sys_mq_timedsend
, 5)
2291 MIPS_SYS(sys_mq_timedreceive
, 5)
2292 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2293 MIPS_SYS(sys_mq_getsetattr
, 3)
2294 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2295 MIPS_SYS(sys_waitid
, 4)
2296 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2297 MIPS_SYS(sys_add_key
, 5)
2298 MIPS_SYS(sys_request_key
, 4)
2299 MIPS_SYS(sys_keyctl
, 5)
2300 MIPS_SYS(sys_set_thread_area
, 1)
2301 MIPS_SYS(sys_inotify_init
, 0)
2302 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2303 MIPS_SYS(sys_inotify_rm_watch
, 2)
2304 MIPS_SYS(sys_migrate_pages
, 4)
2305 MIPS_SYS(sys_openat
, 4)
2306 MIPS_SYS(sys_mkdirat
, 3)
2307 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2308 MIPS_SYS(sys_fchownat
, 5)
2309 MIPS_SYS(sys_futimesat
, 3)
2310 MIPS_SYS(sys_fstatat64
, 4)
2311 MIPS_SYS(sys_unlinkat
, 3)
2312 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2313 MIPS_SYS(sys_linkat
, 5)
2314 MIPS_SYS(sys_symlinkat
, 3)
2315 MIPS_SYS(sys_readlinkat
, 4)
2316 MIPS_SYS(sys_fchmodat
, 3)
2317 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2318 MIPS_SYS(sys_pselect6
, 6)
2319 MIPS_SYS(sys_ppoll
, 5)
2320 MIPS_SYS(sys_unshare
, 1)
2321 MIPS_SYS(sys_splice
, 6)
2322 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2323 MIPS_SYS(sys_tee
, 4)
2324 MIPS_SYS(sys_vmsplice
, 4)
2325 MIPS_SYS(sys_move_pages
, 6)
2326 MIPS_SYS(sys_set_robust_list
, 2)
2327 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2328 MIPS_SYS(sys_kexec_load
, 4)
2329 MIPS_SYS(sys_getcpu
, 3)
2330 MIPS_SYS(sys_epoll_pwait
, 6)
2331 MIPS_SYS(sys_ioprio_set
, 3)
2332 MIPS_SYS(sys_ioprio_get
, 2)
2333 MIPS_SYS(sys_utimensat
, 4)
2334 MIPS_SYS(sys_signalfd
, 3)
2335 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2336 MIPS_SYS(sys_eventfd
, 1)
2337 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2338 MIPS_SYS(sys_timerfd_create
, 2)
2339 MIPS_SYS(sys_timerfd_gettime
, 2)
2340 MIPS_SYS(sys_timerfd_settime
, 4)
2341 MIPS_SYS(sys_signalfd4
, 4)
2342 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2343 MIPS_SYS(sys_epoll_create1
, 1)
2344 MIPS_SYS(sys_dup3
, 3)
2345 MIPS_SYS(sys_pipe2
, 2)
2346 MIPS_SYS(sys_inotify_init1
, 1)
2347 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2348 MIPS_SYS(sys_pwritev
, 6)
2349 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2350 MIPS_SYS(sys_perf_event_open
, 5)
2351 MIPS_SYS(sys_accept4
, 4)
2352 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2353 MIPS_SYS(sys_fanotify_init
, 2)
2354 MIPS_SYS(sys_fanotify_mark
, 6)
2355 MIPS_SYS(sys_prlimit64
, 4)
2356 MIPS_SYS(sys_name_to_handle_at
, 5)
2357 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2358 MIPS_SYS(sys_clock_adjtime
, 2)
2359 MIPS_SYS(sys_syncfs
, 1)
2364 static int do_store_exclusive(CPUMIPSState
*env
)
2367 target_ulong page_addr
;
2375 page_addr
= addr
& TARGET_PAGE_MASK
;
2378 flags
= page_get_flags(page_addr
);
2379 if ((flags
& PAGE_READ
) == 0) {
2382 reg
= env
->llreg
& 0x1f;
2383 d
= (env
->llreg
& 0x20) != 0;
2385 segv
= get_user_s64(val
, addr
);
2387 segv
= get_user_s32(val
, addr
);
2390 if (val
!= env
->llval
) {
2391 env
->active_tc
.gpr
[reg
] = 0;
2394 segv
= put_user_u64(env
->llnewval
, addr
);
2396 segv
= put_user_u32(env
->llnewval
, addr
);
2399 env
->active_tc
.gpr
[reg
] = 1;
2406 env
->active_tc
.PC
+= 4;
2419 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2427 info
->si_signo
= TARGET_SIGFPE
;
2429 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2430 queue_signal(env
, info
->si_signo
, &*info
);
2434 info
->si_signo
= TARGET_SIGTRAP
;
2436 queue_signal(env
, info
->si_signo
, &*info
);
2444 void cpu_loop(CPUMIPSState
*env
)
2446 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2447 target_siginfo_t info
;
2450 # ifdef TARGET_ABI_MIPSO32
2451 unsigned int syscall_num
;
2456 trapnr
= cpu_mips_exec(cs
);
2460 env
->active_tc
.PC
+= 4;
2461 # ifdef TARGET_ABI_MIPSO32
2462 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2463 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2464 ret
= -TARGET_ENOSYS
;
2468 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2470 nb_args
= mips_syscall_args
[syscall_num
];
2471 sp_reg
= env
->active_tc
.gpr
[29];
2473 /* these arguments are taken from the stack */
2475 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2479 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2483 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2487 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2493 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2494 env
->active_tc
.gpr
[4],
2495 env
->active_tc
.gpr
[5],
2496 env
->active_tc
.gpr
[6],
2497 env
->active_tc
.gpr
[7],
2498 arg5
, arg6
, arg7
, arg8
);
2502 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2503 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2504 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2505 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2506 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2508 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2509 /* Returning from a successful sigreturn syscall.
2510 Avoid clobbering register state. */
2513 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2514 env
->active_tc
.gpr
[7] = 1; /* error flag */
2517 env
->active_tc
.gpr
[7] = 0; /* error flag */
2519 env
->active_tc
.gpr
[2] = ret
;
2525 info
.si_signo
= TARGET_SIGSEGV
;
2527 /* XXX: check env->error_code */
2528 info
.si_code
= TARGET_SEGV_MAPERR
;
2529 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2530 queue_signal(env
, info
.si_signo
, &info
);
2534 info
.si_signo
= TARGET_SIGILL
;
2537 queue_signal(env
, info
.si_signo
, &info
);
2539 case EXCP_INTERRUPT
:
2540 /* just indicate that signals should be handled asap */
2546 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2549 info
.si_signo
= sig
;
2551 info
.si_code
= TARGET_TRAP_BRKPT
;
2552 queue_signal(env
, info
.si_signo
, &info
);
2557 if (do_store_exclusive(env
)) {
2558 info
.si_signo
= TARGET_SIGSEGV
;
2560 info
.si_code
= TARGET_SEGV_MAPERR
;
2561 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2562 queue_signal(env
, info
.si_signo
, &info
);
2566 info
.si_signo
= TARGET_SIGILL
;
2568 info
.si_code
= TARGET_ILL_ILLOPC
;
2569 queue_signal(env
, info
.si_signo
, &info
);
2571 /* The code below was inspired by the MIPS Linux kernel trap
2572 * handling code in arch/mips/kernel/traps.c.
2576 abi_ulong trap_instr
;
2579 if (env
->hflags
& MIPS_HFLAG_M16
) {
2580 if (env
->insn_flags
& ASE_MICROMIPS
) {
2581 /* microMIPS mode */
2582 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2587 if ((trap_instr
>> 10) == 0x11) {
2588 /* 16-bit instruction */
2589 code
= trap_instr
& 0xf;
2591 /* 32-bit instruction */
2594 ret
= get_user_u16(instr_lo
,
2595 env
->active_tc
.PC
+ 2);
2599 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2600 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2601 /* Unfortunately, microMIPS also suffers from
2602 the old assembler bug... */
2603 if (code
>= (1 << 10)) {
2609 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2613 code
= (trap_instr
>> 6) & 0x3f;
2616 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2621 /* As described in the original Linux kernel code, the
2622 * below checks on 'code' are to work around an old
2625 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2626 if (code
>= (1 << 10)) {
2631 if (do_break(env
, &info
, code
) != 0) {
2638 abi_ulong trap_instr
;
2639 unsigned int code
= 0;
2641 if (env
->hflags
& MIPS_HFLAG_M16
) {
2642 /* microMIPS mode */
2645 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2646 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2648 trap_instr
= (instr
[0] << 16) | instr
[1];
2650 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2657 /* The immediate versions don't provide a code. */
2658 if (!(trap_instr
& 0xFC000000)) {
2659 if (env
->hflags
& MIPS_HFLAG_M16
) {
2660 /* microMIPS mode */
2661 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2663 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2667 if (do_break(env
, &info
, code
) != 0) {
2674 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2677 process_pending_signals(env
);
2682 #ifdef TARGET_OPENRISC
2684 void cpu_loop(CPUOpenRISCState
*env
)
2686 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2691 trapnr
= cpu_openrisc_exec(cs
);
2697 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2701 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2702 gdbsig
= TARGET_SIGBUS
;
2706 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2707 gdbsig
= TARGET_SIGSEGV
;
2710 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2713 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2714 gdbsig
= TARGET_SIGBUS
;
2717 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2718 gdbsig
= TARGET_SIGILL
;
2721 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2725 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2728 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2729 gdbsig
= TARGET_SIGSEGV
;
2732 env
->pc
+= 4; /* 0xc00; */
2733 env
->gpr
[11] = do_syscall(env
,
2734 env
->gpr
[11], /* return value */
2735 env
->gpr
[3], /* r3 - r7 are params */
2743 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2746 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2747 gdbsig
= TARGET_SIGTRAP
;
2750 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2753 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2755 gdbsig
= TARGET_SIGILL
;
2759 gdb_handlesig(cs
, gdbsig
);
2760 if (gdbsig
!= TARGET_SIGTRAP
) {
2765 process_pending_signals(env
);
2769 #endif /* TARGET_OPENRISC */
2772 void cpu_loop(CPUSH4State
*env
)
2774 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2776 target_siginfo_t info
;
2780 trapnr
= cpu_sh4_exec(cs
);
2786 ret
= do_syscall(env
,
2795 env
->gregs
[0] = ret
;
2797 case EXCP_INTERRUPT
:
2798 /* just indicate that signals should be handled asap */
2804 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2807 info
.si_signo
= sig
;
2809 info
.si_code
= TARGET_TRAP_BRKPT
;
2810 queue_signal(env
, info
.si_signo
, &info
);
2816 info
.si_signo
= TARGET_SIGSEGV
;
2818 info
.si_code
= TARGET_SEGV_MAPERR
;
2819 info
._sifields
._sigfault
._addr
= env
->tea
;
2820 queue_signal(env
, info
.si_signo
, &info
);
2824 printf ("Unhandled trap: 0x%x\n", trapnr
);
2825 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2828 process_pending_signals (env
);
2834 void cpu_loop(CPUCRISState
*env
)
2836 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2838 target_siginfo_t info
;
2842 trapnr
= cpu_cris_exec(cs
);
2847 info
.si_signo
= TARGET_SIGSEGV
;
2849 /* XXX: check env->error_code */
2850 info
.si_code
= TARGET_SEGV_MAPERR
;
2851 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2852 queue_signal(env
, info
.si_signo
, &info
);
2855 case EXCP_INTERRUPT
:
2856 /* just indicate that signals should be handled asap */
2859 ret
= do_syscall(env
,
2868 env
->regs
[10] = ret
;
2874 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2877 info
.si_signo
= sig
;
2879 info
.si_code
= TARGET_TRAP_BRKPT
;
2880 queue_signal(env
, info
.si_signo
, &info
);
2885 printf ("Unhandled trap: 0x%x\n", trapnr
);
2886 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2889 process_pending_signals (env
);
2894 #ifdef TARGET_MICROBLAZE
2895 void cpu_loop(CPUMBState
*env
)
2897 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2899 target_siginfo_t info
;
2903 trapnr
= cpu_mb_exec(cs
);
2908 info
.si_signo
= TARGET_SIGSEGV
;
2910 /* XXX: check env->error_code */
2911 info
.si_code
= TARGET_SEGV_MAPERR
;
2912 info
._sifields
._sigfault
._addr
= 0;
2913 queue_signal(env
, info
.si_signo
, &info
);
2916 case EXCP_INTERRUPT
:
2917 /* just indicate that signals should be handled asap */
2920 /* Return address is 4 bytes after the call. */
2922 env
->sregs
[SR_PC
] = env
->regs
[14];
2923 ret
= do_syscall(env
,
2935 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2936 if (env
->iflags
& D_FLAG
) {
2937 env
->sregs
[SR_ESR
] |= 1 << 12;
2938 env
->sregs
[SR_PC
] -= 4;
2939 /* FIXME: if branch was immed, replay the imm as well. */
2942 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2944 switch (env
->sregs
[SR_ESR
] & 31) {
2945 case ESR_EC_DIVZERO
:
2946 info
.si_signo
= TARGET_SIGFPE
;
2948 info
.si_code
= TARGET_FPE_FLTDIV
;
2949 info
._sifields
._sigfault
._addr
= 0;
2950 queue_signal(env
, info
.si_signo
, &info
);
2953 info
.si_signo
= TARGET_SIGFPE
;
2955 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2956 info
.si_code
= TARGET_FPE_FLTINV
;
2958 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2959 info
.si_code
= TARGET_FPE_FLTDIV
;
2961 info
._sifields
._sigfault
._addr
= 0;
2962 queue_signal(env
, info
.si_signo
, &info
);
2965 printf ("Unhandled hw-exception: 0x%x\n",
2966 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2967 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2976 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2979 info
.si_signo
= sig
;
2981 info
.si_code
= TARGET_TRAP_BRKPT
;
2982 queue_signal(env
, info
.si_signo
, &info
);
2987 printf ("Unhandled trap: 0x%x\n", trapnr
);
2988 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2991 process_pending_signals (env
);
2998 void cpu_loop(CPUM68KState
*env
)
3000 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3003 target_siginfo_t info
;
3004 TaskState
*ts
= cs
->opaque
;
3008 trapnr
= cpu_m68k_exec(cs
);
3013 if (ts
->sim_syscalls
) {
3015 get_user_u16(nr
, env
->pc
+ 2);
3017 do_m68k_simcall(env
, nr
);
3023 case EXCP_HALT_INSN
:
3024 /* Semihosing syscall. */
3026 do_m68k_semihosting(env
, env
->dregs
[0]);
3030 case EXCP_UNSUPPORTED
:
3032 info
.si_signo
= TARGET_SIGILL
;
3034 info
.si_code
= TARGET_ILL_ILLOPN
;
3035 info
._sifields
._sigfault
._addr
= env
->pc
;
3036 queue_signal(env
, info
.si_signo
, &info
);
3040 ts
->sim_syscalls
= 0;
3043 env
->dregs
[0] = do_syscall(env
,
3054 case EXCP_INTERRUPT
:
3055 /* just indicate that signals should be handled asap */
3059 info
.si_signo
= TARGET_SIGSEGV
;
3061 /* XXX: check env->error_code */
3062 info
.si_code
= TARGET_SEGV_MAPERR
;
3063 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3064 queue_signal(env
, info
.si_signo
, &info
);
3071 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3074 info
.si_signo
= sig
;
3076 info
.si_code
= TARGET_TRAP_BRKPT
;
3077 queue_signal(env
, info
.si_signo
, &info
);
3082 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3085 process_pending_signals(env
);
3088 #endif /* TARGET_M68K */
3091 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3093 target_ulong addr
, val
, tmp
;
3094 target_siginfo_t info
;
3097 addr
= env
->lock_addr
;
3098 tmp
= env
->lock_st_addr
;
3099 env
->lock_addr
= -1;
3100 env
->lock_st_addr
= 0;
3106 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3110 if (val
== env
->lock_value
) {
3112 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3129 info
.si_signo
= TARGET_SIGSEGV
;
3131 info
.si_code
= TARGET_SEGV_MAPERR
;
3132 info
._sifields
._sigfault
._addr
= addr
;
3133 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3136 void cpu_loop(CPUAlphaState
*env
)
3138 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3140 target_siginfo_t info
;
3145 trapnr
= cpu_alpha_exec(cs
);
3148 /* All of the traps imply a transition through PALcode, which
3149 implies an REI instruction has been executed. Which means
3150 that the intr_flag should be cleared. */
3155 fprintf(stderr
, "Reset requested. Exit\n");
3159 fprintf(stderr
, "Machine check exception. Exit\n");
3162 case EXCP_SMP_INTERRUPT
:
3163 case EXCP_CLK_INTERRUPT
:
3164 case EXCP_DEV_INTERRUPT
:
3165 fprintf(stderr
, "External interrupt. Exit\n");
3169 env
->lock_addr
= -1;
3170 info
.si_signo
= TARGET_SIGSEGV
;
3172 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3173 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3174 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3175 queue_signal(env
, info
.si_signo
, &info
);
3178 env
->lock_addr
= -1;
3179 info
.si_signo
= TARGET_SIGBUS
;
3181 info
.si_code
= TARGET_BUS_ADRALN
;
3182 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3183 queue_signal(env
, info
.si_signo
, &info
);
3187 env
->lock_addr
= -1;
3188 info
.si_signo
= TARGET_SIGILL
;
3190 info
.si_code
= TARGET_ILL_ILLOPC
;
3191 info
._sifields
._sigfault
._addr
= env
->pc
;
3192 queue_signal(env
, info
.si_signo
, &info
);
3195 env
->lock_addr
= -1;
3196 info
.si_signo
= TARGET_SIGFPE
;
3198 info
.si_code
= TARGET_FPE_FLTINV
;
3199 info
._sifields
._sigfault
._addr
= env
->pc
;
3200 queue_signal(env
, info
.si_signo
, &info
);
3203 /* No-op. Linux simply re-enables the FPU. */
3206 env
->lock_addr
= -1;
3207 switch (env
->error_code
) {
3210 info
.si_signo
= TARGET_SIGTRAP
;
3212 info
.si_code
= TARGET_TRAP_BRKPT
;
3213 info
._sifields
._sigfault
._addr
= env
->pc
;
3214 queue_signal(env
, info
.si_signo
, &info
);
3218 info
.si_signo
= TARGET_SIGTRAP
;
3221 info
._sifields
._sigfault
._addr
= env
->pc
;
3222 queue_signal(env
, info
.si_signo
, &info
);
3226 trapnr
= env
->ir
[IR_V0
];
3227 sysret
= do_syscall(env
, trapnr
,
3228 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3229 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3230 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3232 if (trapnr
== TARGET_NR_sigreturn
3233 || trapnr
== TARGET_NR_rt_sigreturn
) {
3236 /* Syscall writes 0 to V0 to bypass error check, similar
3237 to how this is handled internal to Linux kernel.
3238 (Ab)use trapnr temporarily as boolean indicating error. */
3239 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3240 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3241 env
->ir
[IR_A3
] = trapnr
;
3245 /* ??? We can probably elide the code using page_unprotect
3246 that is checking for self-modifying code. Instead we
3247 could simply call tb_flush here. Until we work out the
3248 changes required to turn off the extra write protection,
3249 this can be a no-op. */
3253 /* Handled in the translator for usermode. */
3257 /* Handled in the translator for usermode. */
3261 info
.si_signo
= TARGET_SIGFPE
;
3262 switch (env
->ir
[IR_A0
]) {
3263 case TARGET_GEN_INTOVF
:
3264 info
.si_code
= TARGET_FPE_INTOVF
;
3266 case TARGET_GEN_INTDIV
:
3267 info
.si_code
= TARGET_FPE_INTDIV
;
3269 case TARGET_GEN_FLTOVF
:
3270 info
.si_code
= TARGET_FPE_FLTOVF
;
3272 case TARGET_GEN_FLTUND
:
3273 info
.si_code
= TARGET_FPE_FLTUND
;
3275 case TARGET_GEN_FLTINV
:
3276 info
.si_code
= TARGET_FPE_FLTINV
;
3278 case TARGET_GEN_FLTINE
:
3279 info
.si_code
= TARGET_FPE_FLTRES
;
3281 case TARGET_GEN_ROPRAND
:
3285 info
.si_signo
= TARGET_SIGTRAP
;
3290 info
._sifields
._sigfault
._addr
= env
->pc
;
3291 queue_signal(env
, info
.si_signo
, &info
);
3298 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3299 if (info
.si_signo
) {
3300 env
->lock_addr
= -1;
3302 info
.si_code
= TARGET_TRAP_BRKPT
;
3303 queue_signal(env
, info
.si_signo
, &info
);
3308 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3310 case EXCP_INTERRUPT
:
3311 /* Just indicate that signals should be handled asap. */
3314 printf ("Unhandled trap: 0x%x\n", trapnr
);
3315 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3318 process_pending_signals (env
);
3321 #endif /* TARGET_ALPHA */
3324 void cpu_loop(CPUS390XState
*env
)
3326 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3328 target_siginfo_t info
;
3333 trapnr
= cpu_s390x_exec(cs
);
3336 case EXCP_INTERRUPT
:
3337 /* Just indicate that signals should be handled asap. */
3341 n
= env
->int_svc_code
;
3343 /* syscalls > 255 */
3346 env
->psw
.addr
+= env
->int_svc_ilen
;
3347 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3348 env
->regs
[4], env
->regs
[5],
3349 env
->regs
[6], env
->regs
[7], 0, 0);
3353 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3355 n
= TARGET_TRAP_BRKPT
;
3360 n
= env
->int_pgm_code
;
3363 case PGM_PRIVILEGED
:
3364 sig
= TARGET_SIGILL
;
3365 n
= TARGET_ILL_ILLOPC
;
3367 case PGM_PROTECTION
:
3368 case PGM_ADDRESSING
:
3369 sig
= TARGET_SIGSEGV
;
3370 /* XXX: check env->error_code */
3371 n
= TARGET_SEGV_MAPERR
;
3372 addr
= env
->__excp_addr
;
3375 case PGM_SPECIFICATION
:
3376 case PGM_SPECIAL_OP
:
3379 sig
= TARGET_SIGILL
;
3380 n
= TARGET_ILL_ILLOPN
;
3383 case PGM_FIXPT_OVERFLOW
:
3384 sig
= TARGET_SIGFPE
;
3385 n
= TARGET_FPE_INTOVF
;
3387 case PGM_FIXPT_DIVIDE
:
3388 sig
= TARGET_SIGFPE
;
3389 n
= TARGET_FPE_INTDIV
;
3393 n
= (env
->fpc
>> 8) & 0xff;
3395 /* compare-and-trap */
3398 /* An IEEE exception, simulated or otherwise. */
3400 n
= TARGET_FPE_FLTINV
;
3401 } else if (n
& 0x40) {
3402 n
= TARGET_FPE_FLTDIV
;
3403 } else if (n
& 0x20) {
3404 n
= TARGET_FPE_FLTOVF
;
3405 } else if (n
& 0x10) {
3406 n
= TARGET_FPE_FLTUND
;
3407 } else if (n
& 0x08) {
3408 n
= TARGET_FPE_FLTRES
;
3410 /* ??? Quantum exception; BFP, DFP error. */
3413 sig
= TARGET_SIGFPE
;
3418 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3419 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3425 addr
= env
->psw
.addr
;
3427 info
.si_signo
= sig
;
3430 info
._sifields
._sigfault
._addr
= addr
;
3431 queue_signal(env
, info
.si_signo
, &info
);
3435 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3436 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3439 process_pending_signals (env
);
3443 #endif /* TARGET_S390X */
3445 #ifdef TARGET_TILEGX
3447 static void gen_sigill_reg(CPUTLGState
*env
)
3449 target_siginfo_t info
;
3451 info
.si_signo
= TARGET_SIGILL
;
3453 info
.si_code
= TARGET_ILL_PRVREG
;
3454 info
._sifields
._sigfault
._addr
= env
->pc
;
3455 queue_signal(env
, info
.si_signo
, &info
);
3458 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3460 target_siginfo_t info
;
3462 info
.si_signo
= signo
;
3464 info
._sifields
._sigfault
._addr
= env
->pc
;
3466 if (signo
== TARGET_SIGSEGV
) {
3467 /* The passed in sigcode is a dummy; check for a page mapping
3468 and pass either MAPERR or ACCERR. */
3469 target_ulong addr
= env
->excaddr
;
3470 info
._sifields
._sigfault
._addr
= addr
;
3471 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3472 sigcode
= TARGET_SEGV_MAPERR
;
3474 sigcode
= TARGET_SEGV_ACCERR
;
3477 info
.si_code
= sigcode
;
3479 queue_signal(env
, info
.si_signo
, &info
);
3482 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3484 env
->excaddr
= addr
;
3485 do_signal(env
, TARGET_SIGSEGV
, 0);
3488 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3490 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3501 gen_sigill_reg(env
);
3504 g_assert_not_reached();
3507 env
->regs
[reg
] = val
;
3511 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3512 * memory at the address held in the first source register. If the values are
3513 * not equal, then no memory operation is performed. If the values are equal,
3514 * the 8-byte quantity from the second source register is written into memory
3515 * at the address held in the first source register. In either case, the result
3516 * of the instruction is the value read from memory. The compare and write to
3517 * memory are atomic and thus can be used for synchronization purposes. This
3518 * instruction only operates for addresses aligned to a 8-byte boundary.
3519 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3521 * Functional Description (64-bit)
3522 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3523 * rf[Dest] = memVal;
3524 * if (memVal == SPR[CmpValueSPR])
3525 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3527 * Functional Description (32-bit)
3528 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3529 * rf[Dest] = memVal;
3530 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3531 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3534 * This function also processes exch and exch4 which need not process SPR.
3536 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3539 target_long val
, sprval
;
3543 addr
= env
->atomic_srca
;
3544 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3545 goto sigsegv_maperr
;
3550 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3552 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3556 if (!cmp
|| val
== sprval
) {
3557 target_long valb
= env
->atomic_srcb
;
3558 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3559 goto sigsegv_maperr
;
3563 set_regval(env
, env
->atomic_dstr
, val
);
3569 gen_sigsegv_maperr(env
, addr
);
3572 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3576 target_long val
, valb
;
3580 addr
= env
->atomic_srca
;
3581 valb
= env
->atomic_srcb
;
3582 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3583 goto sigsegv_maperr
;
3587 case TILEGX_EXCP_OPCODE_FETCHADD
:
3588 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3591 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3597 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3599 if ((int32_t)valb
< 0) {
3603 case TILEGX_EXCP_OPCODE_FETCHAND
:
3604 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3607 case TILEGX_EXCP_OPCODE_FETCHOR
:
3608 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3612 g_assert_not_reached();
3616 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3617 goto sigsegv_maperr
;
3621 set_regval(env
, env
->atomic_dstr
, val
);
3627 gen_sigsegv_maperr(env
, addr
);
3630 void cpu_loop(CPUTLGState
*env
)
3632 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3637 trapnr
= cpu_tilegx_exec(cs
);
3640 case TILEGX_EXCP_SYSCALL
:
3641 env
->regs
[TILEGX_R_RE
] = do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3642 env
->regs
[0], env
->regs
[1],
3643 env
->regs
[2], env
->regs
[3],
3644 env
->regs
[4], env
->regs
[5],
3645 env
->regs
[6], env
->regs
[7]);
3646 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(env
->regs
[TILEGX_R_RE
])
3647 ? - env
->regs
[TILEGX_R_RE
]
3650 case TILEGX_EXCP_OPCODE_EXCH
:
3651 do_exch(env
, true, false);
3653 case TILEGX_EXCP_OPCODE_EXCH4
:
3654 do_exch(env
, false, false);
3656 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3657 do_exch(env
, true, true);
3659 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3660 do_exch(env
, false, true);
3662 case TILEGX_EXCP_OPCODE_FETCHADD
:
3663 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3664 case TILEGX_EXCP_OPCODE_FETCHAND
:
3665 case TILEGX_EXCP_OPCODE_FETCHOR
:
3666 do_fetch(env
, trapnr
, true);
3668 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3669 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3670 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3671 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3672 do_fetch(env
, trapnr
, false);
3674 case TILEGX_EXCP_SIGNAL
:
3675 do_signal(env
, env
->signo
, env
->sigcode
);
3677 case TILEGX_EXCP_REG_IDN_ACCESS
:
3678 case TILEGX_EXCP_REG_UDN_ACCESS
:
3679 gen_sigill_reg(env
);
3682 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3683 g_assert_not_reached();
3685 process_pending_signals(env
);
3691 THREAD CPUState
*thread_cpu
;
3693 void task_settid(TaskState
*ts
)
3695 if (ts
->ts_tid
== 0) {
3696 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3700 void stop_all_tasks(void)
3703 * We trust that when using NPTL, start_exclusive()
3704 * handles thread stopping correctly.
3709 /* Assumes contents are already zeroed. */
3710 void init_task_state(TaskState
*ts
)
3715 ts
->first_free
= ts
->sigqueue_table
;
3716 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3717 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3719 ts
->sigqueue_table
[i
].next
= NULL
;
3722 CPUArchState
*cpu_copy(CPUArchState
*env
)
3724 CPUState
*cpu
= ENV_GET_CPU(env
);
3725 CPUState
*new_cpu
= cpu_init(cpu_model
);
3726 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3730 /* Reset non arch specific state */
3733 memcpy(new_env
, env
, sizeof(CPUArchState
));
3735 /* Clone all break/watchpoints.
3736 Note: Once we support ptrace with hw-debug register access, make sure
3737 BP_CPU break/watchpoints are handled correctly on clone. */
3738 QTAILQ_INIT(&new_cpu
->breakpoints
);
3739 QTAILQ_INIT(&new_cpu
->watchpoints
);
3740 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3741 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3743 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3744 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3750 static void handle_arg_help(const char *arg
)
3752 usage(EXIT_SUCCESS
);
3755 static void handle_arg_log(const char *arg
)
3759 mask
= qemu_str_to_log_mask(arg
);
3761 qemu_print_log_usage(stdout
);
3764 qemu_log_needs_buffers();
3768 static void handle_arg_log_filename(const char *arg
)
3770 qemu_set_log_filename(arg
);
3773 static void handle_arg_set_env(const char *arg
)
3775 char *r
, *p
, *token
;
3776 r
= p
= strdup(arg
);
3777 while ((token
= strsep(&p
, ",")) != NULL
) {
3778 if (envlist_setenv(envlist
, token
) != 0) {
3779 usage(EXIT_FAILURE
);
3785 static void handle_arg_unset_env(const char *arg
)
3787 char *r
, *p
, *token
;
3788 r
= p
= strdup(arg
);
3789 while ((token
= strsep(&p
, ",")) != NULL
) {
3790 if (envlist_unsetenv(envlist
, token
) != 0) {
3791 usage(EXIT_FAILURE
);
3797 static void handle_arg_argv0(const char *arg
)
3799 argv0
= strdup(arg
);
3802 static void handle_arg_stack_size(const char *arg
)
3805 guest_stack_size
= strtoul(arg
, &p
, 0);
3806 if (guest_stack_size
== 0) {
3807 usage(EXIT_FAILURE
);
3811 guest_stack_size
*= 1024 * 1024;
3812 } else if (*p
== 'k' || *p
== 'K') {
3813 guest_stack_size
*= 1024;
3817 static void handle_arg_ld_prefix(const char *arg
)
3819 interp_prefix
= strdup(arg
);
3822 static void handle_arg_pagesize(const char *arg
)
3824 qemu_host_page_size
= atoi(arg
);
3825 if (qemu_host_page_size
== 0 ||
3826 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3827 fprintf(stderr
, "page size must be a power of two\n");
3832 static void handle_arg_randseed(const char *arg
)
3834 unsigned long long seed
;
3836 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3837 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3843 static void handle_arg_gdb(const char *arg
)
3845 gdbstub_port
= atoi(arg
);
3848 static void handle_arg_uname(const char *arg
)
3850 qemu_uname_release
= strdup(arg
);
3853 static void handle_arg_cpu(const char *arg
)
3855 cpu_model
= strdup(arg
);
3856 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3857 /* XXX: implement xxx_cpu_list for targets that still miss it */
3858 #if defined(cpu_list)
3859 cpu_list(stdout
, &fprintf
);
3865 static void handle_arg_guest_base(const char *arg
)
3867 guest_base
= strtol(arg
, NULL
, 0);
3868 have_guest_base
= 1;
3871 static void handle_arg_reserved_va(const char *arg
)
3875 reserved_va
= strtoul(arg
, &p
, 0);
3889 unsigned long unshifted
= reserved_va
;
3891 reserved_va
<<= shift
;
3892 if (((reserved_va
>> shift
) != unshifted
)
3893 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3894 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3897 fprintf(stderr
, "Reserved virtual address too big\n");
3902 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3907 static void handle_arg_singlestep(const char *arg
)
3912 static void handle_arg_strace(const char *arg
)
3917 static void handle_arg_version(const char *arg
)
3919 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3920 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3924 struct qemu_argument
{
3928 void (*handle_opt
)(const char *arg
);
3929 const char *example
;
3933 static const struct qemu_argument arg_table
[] = {
3934 {"h", "", false, handle_arg_help
,
3935 "", "print this help"},
3936 {"help", "", false, handle_arg_help
,
3938 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3939 "port", "wait gdb connection to 'port'"},
3940 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3941 "path", "set the elf interpreter prefix to 'path'"},
3942 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3943 "size", "set the stack size to 'size' bytes"},
3944 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3945 "model", "select CPU (-cpu help for list)"},
3946 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3947 "var=value", "sets targets environment variable (see below)"},
3948 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3949 "var", "unsets targets environment variable (see below)"},
3950 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3951 "argv0", "forces target process argv[0] to be 'argv0'"},
3952 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3953 "uname", "set qemu uname release string to 'uname'"},
3954 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3955 "address", "set guest_base address to 'address'"},
3956 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3957 "size", "reserve 'size' bytes for guest virtual address space"},
3958 {"d", "QEMU_LOG", true, handle_arg_log
,
3959 "item[,...]", "enable logging of specified items "
3960 "(use '-d help' for a list of items)"},
3961 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3962 "logfile", "write logs to 'logfile' (default stderr)"},
3963 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3964 "pagesize", "set the host page size to 'pagesize'"},
3965 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3966 "", "run in singlestep mode"},
3967 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3968 "", "log system calls"},
3969 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3970 "", "Seed for pseudo-random number generator"},
3971 {"version", "QEMU_VERSION", false, handle_arg_version
,
3972 "", "display version information and exit"},
3973 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3976 static void usage(int exitcode
)
3978 const struct qemu_argument
*arginfo
;
3982 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3983 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3985 "Options and associated environment variables:\n"
3988 /* Calculate column widths. We must always have at least enough space
3989 * for the column header.
3991 maxarglen
= strlen("Argument");
3992 maxenvlen
= strlen("Env-variable");
3994 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3995 int arglen
= strlen(arginfo
->argv
);
3996 if (arginfo
->has_arg
) {
3997 arglen
+= strlen(arginfo
->example
) + 1;
3999 if (strlen(arginfo
->env
) > maxenvlen
) {
4000 maxenvlen
= strlen(arginfo
->env
);
4002 if (arglen
> maxarglen
) {
4007 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4008 maxenvlen
, "Env-variable");
4010 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4011 if (arginfo
->has_arg
) {
4012 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4013 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4014 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4016 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4017 maxenvlen
, arginfo
->env
,
4024 "QEMU_LD_PREFIX = %s\n"
4025 "QEMU_STACK_SIZE = %ld byte\n",
4030 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4031 "QEMU_UNSET_ENV environment variables to set and unset\n"
4032 "environment variables for the target process.\n"
4033 "It is possible to provide several variables by separating them\n"
4034 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4035 "provide the -E and -U options multiple times.\n"
4036 "The following lines are equivalent:\n"
4037 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4038 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4039 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4040 "Note that if you provide several changes to a single variable\n"
4041 "the last change will stay in effect.\n");
4046 static int parse_args(int argc
, char **argv
)
4050 const struct qemu_argument
*arginfo
;
4052 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4053 if (arginfo
->env
== NULL
) {
4057 r
= getenv(arginfo
->env
);
4059 arginfo
->handle_opt(r
);
4065 if (optind
>= argc
) {
4074 if (!strcmp(r
, "-")) {
4077 /* Treat --foo the same as -foo. */
4082 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4083 if (!strcmp(r
, arginfo
->argv
)) {
4084 if (arginfo
->has_arg
) {
4085 if (optind
>= argc
) {
4086 (void) fprintf(stderr
,
4087 "qemu: missing argument for option '%s'\n", r
);
4090 arginfo
->handle_opt(argv
[optind
]);
4093 arginfo
->handle_opt(NULL
);
4099 /* no option matched the current argv */
4100 if (arginfo
->handle_opt
== NULL
) {
4101 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4106 if (optind
>= argc
) {
4107 (void) fprintf(stderr
, "qemu: no user program specified\n");
4111 filename
= argv
[optind
];
4112 exec_path
= argv
[optind
];
4117 int main(int argc
, char **argv
, char **envp
)
4119 struct target_pt_regs regs1
, *regs
= ®s1
;
4120 struct image_info info1
, *info
= &info1
;
4121 struct linux_binprm bprm
;
4126 char **target_environ
, **wrk
;
4133 module_call_init(MODULE_INIT_QOM
);
4135 if ((envlist
= envlist_create()) == NULL
) {
4136 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4140 /* add current environment into the list */
4141 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4142 (void) envlist_setenv(envlist
, *wrk
);
4145 /* Read the stack limit from the kernel. If it's "unlimited",
4146 then we can do little else besides use the default. */
4149 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4150 && lim
.rlim_cur
!= RLIM_INFINITY
4151 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4152 guest_stack_size
= lim
.rlim_cur
;
4157 #if defined(cpudef_setup)
4158 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
4163 optind
= parse_args(argc
, argv
);
4166 memset(regs
, 0, sizeof(struct target_pt_regs
));
4168 /* Zero out image_info */
4169 memset(info
, 0, sizeof(struct image_info
));
4171 memset(&bprm
, 0, sizeof (bprm
));
4173 /* Scan interp_prefix dir for replacement files. */
4174 init_paths(interp_prefix
);
4176 init_qemu_uname_release();
4178 if (cpu_model
== NULL
) {
4179 #if defined(TARGET_I386)
4180 #ifdef TARGET_X86_64
4181 cpu_model
= "qemu64";
4183 cpu_model
= "qemu32";
4185 #elif defined(TARGET_ARM)
4187 #elif defined(TARGET_UNICORE32)
4189 #elif defined(TARGET_M68K)
4191 #elif defined(TARGET_SPARC)
4192 #ifdef TARGET_SPARC64
4193 cpu_model
= "TI UltraSparc II";
4195 cpu_model
= "Fujitsu MB86904";
4197 #elif defined(TARGET_MIPS)
4198 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4203 #elif defined TARGET_OPENRISC
4204 cpu_model
= "or1200";
4205 #elif defined(TARGET_PPC)
4206 # ifdef TARGET_PPC64
4207 cpu_model
= "POWER8";
4211 #elif defined TARGET_SH4
4212 cpu_model
= TYPE_SH7785_CPU
;
4218 /* NOTE: we need to init the CPU at this stage to get
4219 qemu_host_page_size */
4220 cpu
= cpu_init(cpu_model
);
4222 fprintf(stderr
, "Unable to find CPU definition\n");
4230 if (getenv("QEMU_STRACE")) {
4234 if (getenv("QEMU_RAND_SEED")) {
4235 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4238 target_environ
= envlist_to_environ(envlist
, NULL
);
4239 envlist_free(envlist
);
4242 * Now that page sizes are configured in cpu_init() we can do
4243 * proper page alignment for guest_base.
4245 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4247 if (reserved_va
|| have_guest_base
) {
4248 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4250 if (guest_base
== (unsigned long)-1) {
4251 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4252 "space for use as guest address space (check your virtual "
4253 "memory ulimit setting or reserve less using -R option)\n",
4259 mmap_next_start
= reserved_va
;
4264 * Read in mmap_min_addr kernel parameter. This value is used
4265 * When loading the ELF image to determine whether guest_base
4266 * is needed. It is also used in mmap_find_vma.
4271 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4273 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4274 mmap_min_addr
= tmp
;
4275 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4282 * Prepare copy of argv vector for target.
4284 target_argc
= argc
- optind
;
4285 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4286 if (target_argv
== NULL
) {
4287 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4292 * If argv0 is specified (using '-0' switch) we replace
4293 * argv[0] pointer with the given one.
4296 if (argv0
!= NULL
) {
4297 target_argv
[i
++] = strdup(argv0
);
4299 for (; i
< target_argc
; i
++) {
4300 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4302 target_argv
[target_argc
] = NULL
;
4304 ts
= g_new0(TaskState
, 1);
4305 init_task_state(ts
);
4306 /* build Task State */
4312 execfd
= qemu_getauxval(AT_EXECFD
);
4314 execfd
= open(filename
, O_RDONLY
);
4316 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4317 _exit(EXIT_FAILURE
);
4321 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4324 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4325 _exit(EXIT_FAILURE
);
4328 for (wrk
= target_environ
; *wrk
; wrk
++) {
4332 free(target_environ
);
4334 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4335 qemu_log("guest_base 0x%lx\n", guest_base
);
4338 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4339 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4340 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4342 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4344 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4345 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4347 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4348 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4351 target_set_brk(info
->brk
);
4355 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4356 generating the prologue until now so that the prologue can take
4357 the real value of GUEST_BASE into account. */
4358 tcg_prologue_init(&tcg_ctx
);
4360 #if defined(TARGET_I386)
4361 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4362 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4363 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4364 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4365 env
->hflags
|= HF_OSFXSR_MASK
;
4367 #ifndef TARGET_ABI32
4368 /* enable 64 bit mode if possible */
4369 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4370 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4373 env
->cr
[4] |= CR4_PAE_MASK
;
4374 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4375 env
->hflags
|= HF_LMA_MASK
;
4378 /* flags setup : we activate the IRQs by default as in user mode */
4379 env
->eflags
|= IF_MASK
;
4381 /* linux register setup */
4382 #ifndef TARGET_ABI32
4383 env
->regs
[R_EAX
] = regs
->rax
;
4384 env
->regs
[R_EBX
] = regs
->rbx
;
4385 env
->regs
[R_ECX
] = regs
->rcx
;
4386 env
->regs
[R_EDX
] = regs
->rdx
;
4387 env
->regs
[R_ESI
] = regs
->rsi
;
4388 env
->regs
[R_EDI
] = regs
->rdi
;
4389 env
->regs
[R_EBP
] = regs
->rbp
;
4390 env
->regs
[R_ESP
] = regs
->rsp
;
4391 env
->eip
= regs
->rip
;
4393 env
->regs
[R_EAX
] = regs
->eax
;
4394 env
->regs
[R_EBX
] = regs
->ebx
;
4395 env
->regs
[R_ECX
] = regs
->ecx
;
4396 env
->regs
[R_EDX
] = regs
->edx
;
4397 env
->regs
[R_ESI
] = regs
->esi
;
4398 env
->regs
[R_EDI
] = regs
->edi
;
4399 env
->regs
[R_EBP
] = regs
->ebp
;
4400 env
->regs
[R_ESP
] = regs
->esp
;
4401 env
->eip
= regs
->eip
;
4404 /* linux interrupt setup */
4405 #ifndef TARGET_ABI32
4406 env
->idt
.limit
= 511;
4408 env
->idt
.limit
= 255;
4410 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4411 PROT_READ
|PROT_WRITE
,
4412 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4413 idt_table
= g2h(env
->idt
.base
);
4436 /* linux segment setup */
4438 uint64_t *gdt_table
;
4439 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4440 PROT_READ
|PROT_WRITE
,
4441 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4442 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4443 gdt_table
= g2h(env
->gdt
.base
);
4445 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4446 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4447 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4449 /* 64 bit code segment */
4450 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4451 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4453 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4455 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4456 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4457 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4459 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4460 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4462 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4463 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4464 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4465 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4466 /* This hack makes Wine work... */
4467 env
->segs
[R_FS
].selector
= 0;
4469 cpu_x86_load_seg(env
, R_DS
, 0);
4470 cpu_x86_load_seg(env
, R_ES
, 0);
4471 cpu_x86_load_seg(env
, R_FS
, 0);
4472 cpu_x86_load_seg(env
, R_GS
, 0);
4474 #elif defined(TARGET_AARCH64)
4478 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4480 "The selected ARM CPU does not support 64 bit mode\n");
4484 for (i
= 0; i
< 31; i
++) {
4485 env
->xregs
[i
] = regs
->regs
[i
];
4488 env
->xregs
[31] = regs
->sp
;
4490 #elif defined(TARGET_ARM)
4493 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4495 for(i
= 0; i
< 16; i
++) {
4496 env
->regs
[i
] = regs
->uregs
[i
];
4498 #ifdef TARGET_WORDS_BIGENDIAN
4500 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4501 && (info
->elf_flags
& EF_ARM_BE8
)) {
4502 env
->uncached_cpsr
|= CPSR_E
;
4503 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4505 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4509 #elif defined(TARGET_UNICORE32)
4512 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4513 for (i
= 0; i
< 32; i
++) {
4514 env
->regs
[i
] = regs
->uregs
[i
];
4517 #elif defined(TARGET_SPARC)
4521 env
->npc
= regs
->npc
;
4523 for(i
= 0; i
< 8; i
++)
4524 env
->gregs
[i
] = regs
->u_regs
[i
];
4525 for(i
= 0; i
< 8; i
++)
4526 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4528 #elif defined(TARGET_PPC)
4532 #if defined(TARGET_PPC64)
4533 #if defined(TARGET_ABI32)
4534 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4536 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4539 env
->nip
= regs
->nip
;
4540 for(i
= 0; i
< 32; i
++) {
4541 env
->gpr
[i
] = regs
->gpr
[i
];
4544 #elif defined(TARGET_M68K)
4547 env
->dregs
[0] = regs
->d0
;
4548 env
->dregs
[1] = regs
->d1
;
4549 env
->dregs
[2] = regs
->d2
;
4550 env
->dregs
[3] = regs
->d3
;
4551 env
->dregs
[4] = regs
->d4
;
4552 env
->dregs
[5] = regs
->d5
;
4553 env
->dregs
[6] = regs
->d6
;
4554 env
->dregs
[7] = regs
->d7
;
4555 env
->aregs
[0] = regs
->a0
;
4556 env
->aregs
[1] = regs
->a1
;
4557 env
->aregs
[2] = regs
->a2
;
4558 env
->aregs
[3] = regs
->a3
;
4559 env
->aregs
[4] = regs
->a4
;
4560 env
->aregs
[5] = regs
->a5
;
4561 env
->aregs
[6] = regs
->a6
;
4562 env
->aregs
[7] = regs
->usp
;
4564 ts
->sim_syscalls
= 1;
4566 #elif defined(TARGET_MICROBLAZE)
4568 env
->regs
[0] = regs
->r0
;
4569 env
->regs
[1] = regs
->r1
;
4570 env
->regs
[2] = regs
->r2
;
4571 env
->regs
[3] = regs
->r3
;
4572 env
->regs
[4] = regs
->r4
;
4573 env
->regs
[5] = regs
->r5
;
4574 env
->regs
[6] = regs
->r6
;
4575 env
->regs
[7] = regs
->r7
;
4576 env
->regs
[8] = regs
->r8
;
4577 env
->regs
[9] = regs
->r9
;
4578 env
->regs
[10] = regs
->r10
;
4579 env
->regs
[11] = regs
->r11
;
4580 env
->regs
[12] = regs
->r12
;
4581 env
->regs
[13] = regs
->r13
;
4582 env
->regs
[14] = regs
->r14
;
4583 env
->regs
[15] = regs
->r15
;
4584 env
->regs
[16] = regs
->r16
;
4585 env
->regs
[17] = regs
->r17
;
4586 env
->regs
[18] = regs
->r18
;
4587 env
->regs
[19] = regs
->r19
;
4588 env
->regs
[20] = regs
->r20
;
4589 env
->regs
[21] = regs
->r21
;
4590 env
->regs
[22] = regs
->r22
;
4591 env
->regs
[23] = regs
->r23
;
4592 env
->regs
[24] = regs
->r24
;
4593 env
->regs
[25] = regs
->r25
;
4594 env
->regs
[26] = regs
->r26
;
4595 env
->regs
[27] = regs
->r27
;
4596 env
->regs
[28] = regs
->r28
;
4597 env
->regs
[29] = regs
->r29
;
4598 env
->regs
[30] = regs
->r30
;
4599 env
->regs
[31] = regs
->r31
;
4600 env
->sregs
[SR_PC
] = regs
->pc
;
4602 #elif defined(TARGET_MIPS)
4606 for(i
= 0; i
< 32; i
++) {
4607 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4609 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4610 if (regs
->cp0_epc
& 1) {
4611 env
->hflags
|= MIPS_HFLAG_M16
;
4614 #elif defined(TARGET_OPENRISC)
4618 for (i
= 0; i
< 32; i
++) {
4619 env
->gpr
[i
] = regs
->gpr
[i
];
4625 #elif defined(TARGET_SH4)
4629 for(i
= 0; i
< 16; i
++) {
4630 env
->gregs
[i
] = regs
->regs
[i
];
4634 #elif defined(TARGET_ALPHA)
4638 for(i
= 0; i
< 28; i
++) {
4639 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4641 env
->ir
[IR_SP
] = regs
->usp
;
4644 #elif defined(TARGET_CRIS)
4646 env
->regs
[0] = regs
->r0
;
4647 env
->regs
[1] = regs
->r1
;
4648 env
->regs
[2] = regs
->r2
;
4649 env
->regs
[3] = regs
->r3
;
4650 env
->regs
[4] = regs
->r4
;
4651 env
->regs
[5] = regs
->r5
;
4652 env
->regs
[6] = regs
->r6
;
4653 env
->regs
[7] = regs
->r7
;
4654 env
->regs
[8] = regs
->r8
;
4655 env
->regs
[9] = regs
->r9
;
4656 env
->regs
[10] = regs
->r10
;
4657 env
->regs
[11] = regs
->r11
;
4658 env
->regs
[12] = regs
->r12
;
4659 env
->regs
[13] = regs
->r13
;
4660 env
->regs
[14] = info
->start_stack
;
4661 env
->regs
[15] = regs
->acr
;
4662 env
->pc
= regs
->erp
;
4664 #elif defined(TARGET_S390X)
4667 for (i
= 0; i
< 16; i
++) {
4668 env
->regs
[i
] = regs
->gprs
[i
];
4670 env
->psw
.mask
= regs
->psw
.mask
;
4671 env
->psw
.addr
= regs
->psw
.addr
;
4673 #elif defined(TARGET_TILEGX)
4676 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4677 env
->regs
[i
] = regs
->regs
[i
];
4679 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4685 #error unsupported target CPU
4688 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4689 ts
->stack_base
= info
->start_stack
;
4690 ts
->heap_base
= info
->brk
;
4691 /* This will be filled in on the first SYS_HEAPINFO call. */
4696 if (gdbserver_start(gdbstub_port
) < 0) {
4697 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4701 gdb_handlesig(cpu
, 0);