4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
31 #include "qemu/cache-utils.h"
34 #include "qemu/timer.h"
35 #include "qemu/envlist.h"
45 static const char *cpu_model
;
46 unsigned long mmap_min_addr
;
47 #if defined(CONFIG_USE_GUEST_BASE)
48 unsigned long guest_base
;
50 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
52 * When running 32-on-64 we should make sure we can fit all of the possible
53 * guest address space into a contiguous chunk of virtual host memory.
55 * This way we will never overlap with our own libraries or binaries or stack
56 * or anything else that QEMU maps.
59 /* MIPS only supports 31 bits of virtual address space for user space */
60 unsigned long reserved_va
= 0x77000000;
62 unsigned long reserved_va
= 0xf7000000;
65 unsigned long reserved_va
;
69 static void usage(void);
71 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
72 const char *qemu_uname_release
= CONFIG_UNAME_RELEASE
;
74 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
75 we allocate a bigger stack. Need a better solution, for example
76 by remapping the process stack directly at the right place */
77 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
79 void gemu_log(const char *fmt
, ...)
84 vfprintf(stderr
, fmt
, ap
);
88 #if defined(TARGET_I386)
89 int cpu_get_pic_interrupt(CPUX86State
*env
)
95 /***********************************************************/
96 /* Helper routines for implementing atomic operations. */
98 /* To implement exclusive operations we force all cpus to syncronise.
99 We don't require a full sync, only that no cpus are executing guest code.
100 The alternative is to map target atomic ops onto host equivalents,
101 which requires quite a lot of per host/target work. */
102 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
104 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
105 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
106 static int pending_cpus
;
108 /* Make sure everything is in a consistent state for calling fork(). */
109 void fork_start(void)
111 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
112 pthread_mutex_lock(&exclusive_lock
);
116 void fork_end(int child
)
118 mmap_fork_end(child
);
120 CPUState
*cpu
, *next_cpu
;
121 /* Child processes created by fork() only have a single thread.
122 Discard information about the parent threads. */
123 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
124 if (cpu
!= thread_cpu
) {
125 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
129 pthread_mutex_init(&exclusive_lock
, NULL
);
130 pthread_mutex_init(&cpu_list_mutex
, NULL
);
131 pthread_cond_init(&exclusive_cond
, NULL
);
132 pthread_cond_init(&exclusive_resume
, NULL
);
133 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
134 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
136 pthread_mutex_unlock(&exclusive_lock
);
137 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
141 /* Wait for pending exclusive operations to complete. The exclusive lock
143 static inline void exclusive_idle(void)
145 while (pending_cpus
) {
146 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
150 /* Start an exclusive operation.
151 Must only be called from outside cpu_arm_exec. */
152 static inline void start_exclusive(void)
156 pthread_mutex_lock(&exclusive_lock
);
160 /* Make all other cpus stop executing. */
161 CPU_FOREACH(other_cpu
) {
162 if (other_cpu
->running
) {
167 if (pending_cpus
> 1) {
168 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
172 /* Finish an exclusive operation. */
173 static inline void end_exclusive(void)
176 pthread_cond_broadcast(&exclusive_resume
);
177 pthread_mutex_unlock(&exclusive_lock
);
180 /* Wait for exclusive ops to finish, and begin cpu execution. */
181 static inline void cpu_exec_start(CPUState
*cpu
)
183 pthread_mutex_lock(&exclusive_lock
);
186 pthread_mutex_unlock(&exclusive_lock
);
189 /* Mark cpu as not executing, and release pending exclusive ops. */
190 static inline void cpu_exec_end(CPUState
*cpu
)
192 pthread_mutex_lock(&exclusive_lock
);
193 cpu
->running
= false;
194 if (pending_cpus
> 1) {
196 if (pending_cpus
== 1) {
197 pthread_cond_signal(&exclusive_cond
);
201 pthread_mutex_unlock(&exclusive_lock
);
204 void cpu_list_lock(void)
206 pthread_mutex_lock(&cpu_list_mutex
);
209 void cpu_list_unlock(void)
211 pthread_mutex_unlock(&cpu_list_mutex
);
216 /***********************************************************/
217 /* CPUX86 core interface */
219 void cpu_smm_update(CPUX86State
*env
)
223 uint64_t cpu_get_tsc(CPUX86State
*env
)
225 return cpu_get_real_ticks();
228 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
233 e1
= (addr
<< 16) | (limit
& 0xffff);
234 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
241 static uint64_t *idt_table
;
243 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
244 uint64_t addr
, unsigned int sel
)
247 e1
= (addr
& 0xffff) | (sel
<< 16);
248 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
252 p
[2] = tswap32(addr
>> 32);
255 /* only dpl matters as we do only user space emulation */
256 static void set_idt(int n
, unsigned int dpl
)
258 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
261 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
262 uint32_t addr
, unsigned int sel
)
265 e1
= (addr
& 0xffff) | (sel
<< 16);
266 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
272 /* only dpl matters as we do only user space emulation */
273 static void set_idt(int n
, unsigned int dpl
)
275 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
279 void cpu_loop(CPUX86State
*env
)
281 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
284 target_siginfo_t info
;
287 trapnr
= cpu_x86_exec(env
);
290 /* linux syscall from int $0x80 */
291 env
->regs
[R_EAX
] = do_syscall(env
,
303 /* linux syscall from syscall instruction */
304 env
->regs
[R_EAX
] = do_syscall(env
,
313 env
->eip
= env
->exception_next_eip
;
318 info
.si_signo
= SIGBUS
;
320 info
.si_code
= TARGET_SI_KERNEL
;
321 info
._sifields
._sigfault
._addr
= 0;
322 queue_signal(env
, info
.si_signo
, &info
);
325 /* XXX: potential problem if ABI32 */
326 #ifndef TARGET_X86_64
327 if (env
->eflags
& VM_MASK
) {
328 handle_vm86_fault(env
);
332 info
.si_signo
= SIGSEGV
;
334 info
.si_code
= TARGET_SI_KERNEL
;
335 info
._sifields
._sigfault
._addr
= 0;
336 queue_signal(env
, info
.si_signo
, &info
);
340 info
.si_signo
= SIGSEGV
;
342 if (!(env
->error_code
& 1))
343 info
.si_code
= TARGET_SEGV_MAPERR
;
345 info
.si_code
= TARGET_SEGV_ACCERR
;
346 info
._sifields
._sigfault
._addr
= env
->cr
[2];
347 queue_signal(env
, info
.si_signo
, &info
);
350 #ifndef TARGET_X86_64
351 if (env
->eflags
& VM_MASK
) {
352 handle_vm86_trap(env
, trapnr
);
356 /* division by zero */
357 info
.si_signo
= SIGFPE
;
359 info
.si_code
= TARGET_FPE_INTDIV
;
360 info
._sifields
._sigfault
._addr
= env
->eip
;
361 queue_signal(env
, info
.si_signo
, &info
);
366 #ifndef TARGET_X86_64
367 if (env
->eflags
& VM_MASK
) {
368 handle_vm86_trap(env
, trapnr
);
372 info
.si_signo
= SIGTRAP
;
374 if (trapnr
== EXCP01_DB
) {
375 info
.si_code
= TARGET_TRAP_BRKPT
;
376 info
._sifields
._sigfault
._addr
= env
->eip
;
378 info
.si_code
= TARGET_SI_KERNEL
;
379 info
._sifields
._sigfault
._addr
= 0;
381 queue_signal(env
, info
.si_signo
, &info
);
386 #ifndef TARGET_X86_64
387 if (env
->eflags
& VM_MASK
) {
388 handle_vm86_trap(env
, trapnr
);
392 info
.si_signo
= SIGSEGV
;
394 info
.si_code
= TARGET_SI_KERNEL
;
395 info
._sifields
._sigfault
._addr
= 0;
396 queue_signal(env
, info
.si_signo
, &info
);
400 info
.si_signo
= SIGILL
;
402 info
.si_code
= TARGET_ILL_ILLOPN
;
403 info
._sifields
._sigfault
._addr
= env
->eip
;
404 queue_signal(env
, info
.si_signo
, &info
);
407 /* just indicate that signals should be handled asap */
413 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
418 info
.si_code
= TARGET_TRAP_BRKPT
;
419 queue_signal(env
, info
.si_signo
, &info
);
424 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
425 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
429 process_pending_signals(env
);
436 #define get_user_code_u32(x, gaddr, doswap) \
437 ({ abi_long __r = get_user_u32((x), (gaddr)); \
438 if (!__r && (doswap)) { \
444 #define get_user_code_u16(x, gaddr, doswap) \
445 ({ abi_long __r = get_user_u16((x), (gaddr)); \
446 if (!__r && (doswap)) { \
453 /* Commpage handling -- there is no commpage for AArch64 */
456 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
458 * r0 = pointer to oldval
459 * r1 = pointer to newval
460 * r2 = pointer to target value
463 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
464 * C set if *ptr was changed, clear if no exchange happened
466 * Note segv's in kernel helpers are a bit tricky, we can set the
467 * data address sensibly but the PC address is just the entry point.
469 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
471 uint64_t oldval
, newval
, val
;
473 target_siginfo_t info
;
475 /* Based on the 32 bit code in do_kernel_trap */
477 /* XXX: This only works between threads, not between processes.
478 It's probably possible to implement this with native host
479 operations. However things like ldrex/strex are much harder so
480 there's not much point trying. */
482 cpsr
= cpsr_read(env
);
485 if (get_user_u64(oldval
, env
->regs
[0])) {
486 env
->exception
.vaddress
= env
->regs
[0];
490 if (get_user_u64(newval
, env
->regs
[1])) {
491 env
->exception
.vaddress
= env
->regs
[1];
495 if (get_user_u64(val
, addr
)) {
496 env
->exception
.vaddress
= addr
;
503 if (put_user_u64(val
, addr
)) {
504 env
->exception
.vaddress
= addr
;
514 cpsr_write(env
, cpsr
, CPSR_C
);
520 /* We get the PC of the entry address - which is as good as anything,
521 on a real kernel what you get depends on which mode it uses. */
522 info
.si_signo
= SIGSEGV
;
524 /* XXX: check env->error_code */
525 info
.si_code
= TARGET_SEGV_MAPERR
;
526 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
527 queue_signal(env
, info
.si_signo
, &info
);
532 /* Handle a jump to the kernel code page. */
534 do_kernel_trap(CPUARMState
*env
)
540 switch (env
->regs
[15]) {
541 case 0xffff0fa0: /* __kernel_memory_barrier */
542 /* ??? No-op. Will need to do better for SMP. */
544 case 0xffff0fc0: /* __kernel_cmpxchg */
545 /* XXX: This only works between threads, not between processes.
546 It's probably possible to implement this with native host
547 operations. However things like ldrex/strex are much harder so
548 there's not much point trying. */
550 cpsr
= cpsr_read(env
);
552 /* FIXME: This should SEGV if the access fails. */
553 if (get_user_u32(val
, addr
))
555 if (val
== env
->regs
[0]) {
557 /* FIXME: Check for segfaults. */
558 put_user_u32(val
, addr
);
565 cpsr_write(env
, cpsr
, CPSR_C
);
568 case 0xffff0fe0: /* __kernel_get_tls */
569 env
->regs
[0] = env
->cp15
.tpidrro_el0
;
571 case 0xffff0f60: /* __kernel_cmpxchg64 */
572 arm_kernel_cmpxchg64_helper(env
);
578 /* Jump back to the caller. */
579 addr
= env
->regs
[14];
584 env
->regs
[15] = addr
;
589 /* Store exclusive handling for AArch32 */
590 static int do_strex(CPUARMState
*env
)
598 if (env
->exclusive_addr
!= env
->exclusive_test
) {
601 /* We know we're always AArch32 so the address is in uint32_t range
602 * unless it was the -1 exclusive-monitor-lost value (which won't
603 * match exclusive_test above).
605 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
606 addr
= env
->exclusive_addr
;
607 size
= env
->exclusive_info
& 0xf;
610 segv
= get_user_u8(val
, addr
);
613 segv
= get_user_u16(val
, addr
);
617 segv
= get_user_u32(val
, addr
);
623 env
->exception
.vaddress
= addr
;
628 segv
= get_user_u32(valhi
, addr
+ 4);
630 env
->exception
.vaddress
= addr
+ 4;
633 val
= deposit64(val
, 32, 32, valhi
);
635 if (val
!= env
->exclusive_val
) {
639 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
642 segv
= put_user_u8(val
, addr
);
645 segv
= put_user_u16(val
, addr
);
649 segv
= put_user_u32(val
, addr
);
653 env
->exception
.vaddress
= addr
;
657 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
658 segv
= put_user_u32(val
, addr
+ 4);
660 env
->exception
.vaddress
= addr
+ 4;
667 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
673 void cpu_loop(CPUARMState
*env
)
675 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
677 unsigned int n
, insn
;
678 target_siginfo_t info
;
683 trapnr
= cpu_arm_exec(env
);
688 TaskState
*ts
= cs
->opaque
;
692 /* we handle the FPU emulation here, as Linux */
693 /* we get the opcode */
694 /* FIXME - what to do if get_user() fails? */
695 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
697 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
698 if (rc
== 0) { /* illegal instruction */
699 info
.si_signo
= SIGILL
;
701 info
.si_code
= TARGET_ILL_ILLOPN
;
702 info
._sifields
._sigfault
._addr
= env
->regs
[15];
703 queue_signal(env
, info
.si_signo
, &info
);
704 } else if (rc
< 0) { /* FP exception */
707 /* translate softfloat flags to FPSR flags */
708 if (-rc
& float_flag_invalid
)
710 if (-rc
& float_flag_divbyzero
)
712 if (-rc
& float_flag_overflow
)
714 if (-rc
& float_flag_underflow
)
716 if (-rc
& float_flag_inexact
)
719 FPSR fpsr
= ts
->fpa
.fpsr
;
720 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
722 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
723 info
.si_signo
= SIGFPE
;
726 /* ordered by priority, least first */
727 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
728 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
729 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
730 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
731 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
733 info
._sifields
._sigfault
._addr
= env
->regs
[15];
734 queue_signal(env
, info
.si_signo
, &info
);
739 /* accumulate unenabled exceptions */
740 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
742 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
744 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
746 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
748 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
751 } else { /* everything OK */
762 if (trapnr
== EXCP_BKPT
) {
764 /* FIXME - what to do if get_user() fails? */
765 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
769 /* FIXME - what to do if get_user() fails? */
770 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
771 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
776 /* FIXME - what to do if get_user() fails? */
777 get_user_code_u16(insn
, env
->regs
[15] - 2,
781 /* FIXME - what to do if get_user() fails? */
782 get_user_code_u32(insn
, env
->regs
[15] - 4,
788 if (n
== ARM_NR_cacheflush
) {
790 } else if (n
== ARM_NR_semihosting
791 || n
== ARM_NR_thumb_semihosting
) {
792 env
->regs
[0] = do_arm_semihosting (env
);
793 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
795 if (env
->thumb
|| n
== 0) {
798 n
-= ARM_SYSCALL_BASE
;
801 if ( n
> ARM_NR_BASE
) {
803 case ARM_NR_cacheflush
:
807 cpu_set_tls(env
, env
->regs
[0]);
811 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
813 env
->regs
[0] = -TARGET_ENOSYS
;
817 env
->regs
[0] = do_syscall(env
,
833 /* just indicate that signals should be handled asap */
836 if (!do_strex(env
)) {
839 /* fall through for segv */
840 case EXCP_PREFETCH_ABORT
:
841 case EXCP_DATA_ABORT
:
842 addr
= env
->exception
.vaddress
;
844 info
.si_signo
= SIGSEGV
;
846 /* XXX: check env->error_code */
847 info
.si_code
= TARGET_SEGV_MAPERR
;
848 info
._sifields
._sigfault
._addr
= addr
;
849 queue_signal(env
, info
.si_signo
, &info
);
856 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
861 info
.si_code
= TARGET_TRAP_BRKPT
;
862 queue_signal(env
, info
.si_signo
, &info
);
866 case EXCP_KERNEL_TRAP
:
867 if (do_kernel_trap(env
))
872 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
874 cpu_dump_state(cs
, stderr
, fprintf
, 0);
877 process_pending_signals(env
);
884 * Handle AArch64 store-release exclusive
886 * rs = gets the status result of store exclusive
887 * rt = is the register that is stored
888 * rt2 = is the second register store (in STP)
891 static int do_strex_a64(CPUARMState
*env
)
902 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
903 size
= extract32(env
->exclusive_info
, 0, 2);
904 is_pair
= extract32(env
->exclusive_info
, 2, 1);
905 rs
= extract32(env
->exclusive_info
, 4, 5);
906 rt
= extract32(env
->exclusive_info
, 9, 5);
907 rt2
= extract32(env
->exclusive_info
, 14, 5);
909 addr
= env
->exclusive_addr
;
911 if (addr
!= env
->exclusive_test
) {
917 segv
= get_user_u8(val
, addr
);
920 segv
= get_user_u16(val
, addr
);
923 segv
= get_user_u32(val
, addr
);
926 segv
= get_user_u64(val
, addr
);
932 env
->exception
.vaddress
= addr
;
935 if (val
!= env
->exclusive_val
) {
940 segv
= get_user_u32(val
, addr
+ 4);
942 segv
= get_user_u64(val
, addr
+ 8);
945 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
948 if (val
!= env
->exclusive_high
) {
952 /* handle the zero register */
953 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
956 segv
= put_user_u8(val
, addr
);
959 segv
= put_user_u16(val
, addr
);
962 segv
= put_user_u32(val
, addr
);
965 segv
= put_user_u64(val
, addr
);
972 /* handle the zero register */
973 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
975 segv
= put_user_u32(val
, addr
+ 4);
977 segv
= put_user_u64(val
, addr
+ 8);
980 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
987 /* rs == 31 encodes a write to the ZR, thus throwing away
988 * the status return. This is rather silly but valid.
994 /* instruction faulted, PC does not advance */
995 /* either way a strex releases any exclusive lock we have */
996 env
->exclusive_addr
= -1;
1001 /* AArch64 main loop */
1002 void cpu_loop(CPUARMState
*env
)
1004 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1006 target_siginfo_t info
;
1011 trapnr
= cpu_arm_exec(env
);
1016 env
->xregs
[0] = do_syscall(env
,
1026 case EXCP_INTERRUPT
:
1027 /* just indicate that signals should be handled asap */
1030 info
.si_signo
= SIGILL
;
1032 info
.si_code
= TARGET_ILL_ILLOPN
;
1033 info
._sifields
._sigfault
._addr
= env
->pc
;
1034 queue_signal(env
, info
.si_signo
, &info
);
1037 if (!do_strex_a64(env
)) {
1040 /* fall through for segv */
1041 case EXCP_PREFETCH_ABORT
:
1042 case EXCP_DATA_ABORT
:
1043 addr
= env
->exception
.vaddress
;
1044 info
.si_signo
= SIGSEGV
;
1046 /* XXX: check env->error_code */
1047 info
.si_code
= TARGET_SEGV_MAPERR
;
1048 info
._sifields
._sigfault
._addr
= addr
;
1049 queue_signal(env
, info
.si_signo
, &info
);
1053 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1055 info
.si_signo
= sig
;
1057 info
.si_code
= TARGET_TRAP_BRKPT
;
1058 queue_signal(env
, info
.si_signo
, &info
);
1062 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1064 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1067 process_pending_signals(env
);
1068 /* Exception return on AArch64 always clears the exclusive monitor,
1069 * so any return to running guest code implies this.
1070 * A strex (successful or otherwise) also clears the monitor, so
1071 * we don't need to specialcase EXCP_STREX.
1073 env
->exclusive_addr
= -1;
1076 #endif /* ndef TARGET_ABI32 */
1080 #ifdef TARGET_UNICORE32
1082 void cpu_loop(CPUUniCore32State
*env
)
1084 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1086 unsigned int n
, insn
;
1087 target_siginfo_t info
;
1091 trapnr
= uc32_cpu_exec(env
);
1094 case UC32_EXCP_PRIV
:
1097 get_user_u32(insn
, env
->regs
[31] - 4);
1098 n
= insn
& 0xffffff;
1100 if (n
>= UC32_SYSCALL_BASE
) {
1102 n
-= UC32_SYSCALL_BASE
;
1103 if (n
== UC32_SYSCALL_NR_set_tls
) {
1104 cpu_set_tls(env
, env
->regs
[0]);
1107 env
->regs
[0] = do_syscall(env
,
1122 case UC32_EXCP_DTRAP
:
1123 case UC32_EXCP_ITRAP
:
1124 info
.si_signo
= SIGSEGV
;
1126 /* XXX: check env->error_code */
1127 info
.si_code
= TARGET_SEGV_MAPERR
;
1128 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1129 queue_signal(env
, info
.si_signo
, &info
);
1131 case EXCP_INTERRUPT
:
1132 /* just indicate that signals should be handled asap */
1138 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1140 info
.si_signo
= sig
;
1142 info
.si_code
= TARGET_TRAP_BRKPT
;
1143 queue_signal(env
, info
.si_signo
, &info
);
1150 process_pending_signals(env
);
1154 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1155 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1161 #define SPARC64_STACK_BIAS 2047
1165 /* WARNING: dealing with register windows _is_ complicated. More info
1166 can be found at http://www.sics.se/~psm/sparcstack.html */
1167 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1169 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1170 /* wrap handling : if cwp is on the last window, then we use the
1171 registers 'after' the end */
1172 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1173 index
+= 16 * env
->nwindows
;
1177 /* save the register window 'cwp1' */
1178 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1183 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1184 #ifdef TARGET_SPARC64
1186 sp_ptr
+= SPARC64_STACK_BIAS
;
1188 #if defined(DEBUG_WIN)
1189 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1192 for(i
= 0; i
< 16; i
++) {
1193 /* FIXME - what to do if put_user() fails? */
1194 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1195 sp_ptr
+= sizeof(abi_ulong
);
1199 static void save_window(CPUSPARCState
*env
)
1201 #ifndef TARGET_SPARC64
1202 unsigned int new_wim
;
1203 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1204 ((1LL << env
->nwindows
) - 1);
1205 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1208 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1214 static void restore_window(CPUSPARCState
*env
)
1216 #ifndef TARGET_SPARC64
1217 unsigned int new_wim
;
1219 unsigned int i
, cwp1
;
1222 #ifndef TARGET_SPARC64
1223 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1224 ((1LL << env
->nwindows
) - 1);
1227 /* restore the invalid window */
1228 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1229 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1230 #ifdef TARGET_SPARC64
1232 sp_ptr
+= SPARC64_STACK_BIAS
;
1234 #if defined(DEBUG_WIN)
1235 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1238 for(i
= 0; i
< 16; i
++) {
1239 /* FIXME - what to do if get_user() fails? */
1240 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1241 sp_ptr
+= sizeof(abi_ulong
);
1243 #ifdef TARGET_SPARC64
1245 if (env
->cleanwin
< env
->nwindows
- 1)
1253 static void flush_windows(CPUSPARCState
*env
)
1259 /* if restore would invoke restore_window(), then we can stop */
1260 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1261 #ifndef TARGET_SPARC64
1262 if (env
->wim
& (1 << cwp1
))
1265 if (env
->canrestore
== 0)
1270 save_window_offset(env
, cwp1
);
1273 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1274 #ifndef TARGET_SPARC64
1275 /* set wim so that restore will reload the registers */
1276 env
->wim
= 1 << cwp1
;
1278 #if defined(DEBUG_WIN)
1279 printf("flush_windows: nb=%d\n", offset
- 1);
1283 void cpu_loop (CPUSPARCState
*env
)
1285 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1288 target_siginfo_t info
;
1291 trapnr
= cpu_sparc_exec (env
);
1293 /* Compute PSR before exposing state. */
1294 if (env
->cc_op
!= CC_OP_FLAGS
) {
1299 #ifndef TARGET_SPARC64
1306 ret
= do_syscall (env
, env
->gregs
[1],
1307 env
->regwptr
[0], env
->regwptr
[1],
1308 env
->regwptr
[2], env
->regwptr
[3],
1309 env
->regwptr
[4], env
->regwptr
[5],
1311 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1312 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1313 env
->xcc
|= PSR_CARRY
;
1315 env
->psr
|= PSR_CARRY
;
1319 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1320 env
->xcc
&= ~PSR_CARRY
;
1322 env
->psr
&= ~PSR_CARRY
;
1325 env
->regwptr
[0] = ret
;
1326 /* next instruction */
1328 env
->npc
= env
->npc
+ 4;
1330 case 0x83: /* flush windows */
1335 /* next instruction */
1337 env
->npc
= env
->npc
+ 4;
1339 #ifndef TARGET_SPARC64
1340 case TT_WIN_OVF
: /* window overflow */
1343 case TT_WIN_UNF
: /* window underflow */
1344 restore_window(env
);
1349 info
.si_signo
= TARGET_SIGSEGV
;
1351 /* XXX: check env->error_code */
1352 info
.si_code
= TARGET_SEGV_MAPERR
;
1353 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1354 queue_signal(env
, info
.si_signo
, &info
);
1358 case TT_SPILL
: /* window overflow */
1361 case TT_FILL
: /* window underflow */
1362 restore_window(env
);
1367 info
.si_signo
= TARGET_SIGSEGV
;
1369 /* XXX: check env->error_code */
1370 info
.si_code
= TARGET_SEGV_MAPERR
;
1371 if (trapnr
== TT_DFAULT
)
1372 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1374 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1375 queue_signal(env
, info
.si_signo
, &info
);
1378 #ifndef TARGET_ABI32
1381 sparc64_get_context(env
);
1385 sparc64_set_context(env
);
1389 case EXCP_INTERRUPT
:
1390 /* just indicate that signals should be handled asap */
1394 info
.si_signo
= TARGET_SIGILL
;
1396 info
.si_code
= TARGET_ILL_ILLOPC
;
1397 info
._sifields
._sigfault
._addr
= env
->pc
;
1398 queue_signal(env
, info
.si_signo
, &info
);
1405 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1408 info
.si_signo
= sig
;
1410 info
.si_code
= TARGET_TRAP_BRKPT
;
1411 queue_signal(env
, info
.si_signo
, &info
);
1416 printf ("Unhandled trap: 0x%x\n", trapnr
);
1417 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1420 process_pending_signals (env
);
1427 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1433 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1435 return cpu_ppc_get_tb(env
);
1438 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1440 return cpu_ppc_get_tb(env
) >> 32;
1443 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1445 return cpu_ppc_get_tb(env
);
1448 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1450 return cpu_ppc_get_tb(env
) >> 32;
1453 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1454 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1456 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1458 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1461 /* XXX: to be fixed */
1462 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1467 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1472 #define EXCP_DUMP(env, fmt, ...) \
1474 CPUState *cs = ENV_GET_CPU(env); \
1475 fprintf(stderr, fmt , ## __VA_ARGS__); \
1476 cpu_dump_state(cs, stderr, fprintf, 0); \
1477 qemu_log(fmt, ## __VA_ARGS__); \
1478 if (qemu_log_enabled()) { \
1479 log_cpu_state(cs, 0); \
1483 static int do_store_exclusive(CPUPPCState
*env
)
1486 target_ulong page_addr
;
1487 target_ulong val
, val2
__attribute__((unused
));
1491 addr
= env
->reserve_ea
;
1492 page_addr
= addr
& TARGET_PAGE_MASK
;
1495 flags
= page_get_flags(page_addr
);
1496 if ((flags
& PAGE_READ
) == 0) {
1499 int reg
= env
->reserve_info
& 0x1f;
1500 int size
= (env
->reserve_info
>> 5) & 0xf;
1503 if (addr
== env
->reserve_addr
) {
1505 case 1: segv
= get_user_u8(val
, addr
); break;
1506 case 2: segv
= get_user_u16(val
, addr
); break;
1507 case 4: segv
= get_user_u32(val
, addr
); break;
1508 #if defined(TARGET_PPC64)
1509 case 8: segv
= get_user_u64(val
, addr
); break;
1511 segv
= get_user_u64(val
, addr
);
1513 segv
= get_user_u64(val2
, addr
+ 8);
1520 if (!segv
&& val
== env
->reserve_val
) {
1521 val
= env
->gpr
[reg
];
1523 case 1: segv
= put_user_u8(val
, addr
); break;
1524 case 2: segv
= put_user_u16(val
, addr
); break;
1525 case 4: segv
= put_user_u32(val
, addr
); break;
1526 #if defined(TARGET_PPC64)
1527 case 8: segv
= put_user_u64(val
, addr
); break;
1529 if (val2
== env
->reserve_val2
) {
1530 segv
= put_user_u64(val
, addr
);
1532 segv
= put_user_u64(val2
, addr
+ 8);
1545 env
->crf
[0] = (stored
<< 1) | xer_so
;
1546 env
->reserve_addr
= (target_ulong
)-1;
1556 void cpu_loop(CPUPPCState
*env
)
1558 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1559 target_siginfo_t info
;
1565 trapnr
= cpu_ppc_exec(env
);
1568 case POWERPC_EXCP_NONE
:
1571 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1572 cpu_abort(cs
, "Critical interrupt while in user mode. "
1575 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1576 cpu_abort(cs
, "Machine check exception while in user mode. "
1579 case POWERPC_EXCP_DSI
: /* Data storage exception */
1580 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1582 /* XXX: check this. Seems bugged */
1583 switch (env
->error_code
& 0xFF000000) {
1585 info
.si_signo
= TARGET_SIGSEGV
;
1587 info
.si_code
= TARGET_SEGV_MAPERR
;
1590 info
.si_signo
= TARGET_SIGILL
;
1592 info
.si_code
= TARGET_ILL_ILLADR
;
1595 info
.si_signo
= TARGET_SIGSEGV
;
1597 info
.si_code
= TARGET_SEGV_ACCERR
;
1600 /* Let's send a regular segfault... */
1601 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1603 info
.si_signo
= TARGET_SIGSEGV
;
1605 info
.si_code
= TARGET_SEGV_MAPERR
;
1608 info
._sifields
._sigfault
._addr
= env
->nip
;
1609 queue_signal(env
, info
.si_signo
, &info
);
1611 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1612 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1613 "\n", env
->spr
[SPR_SRR0
]);
1614 /* XXX: check this */
1615 switch (env
->error_code
& 0xFF000000) {
1617 info
.si_signo
= TARGET_SIGSEGV
;
1619 info
.si_code
= TARGET_SEGV_MAPERR
;
1623 info
.si_signo
= TARGET_SIGSEGV
;
1625 info
.si_code
= TARGET_SEGV_ACCERR
;
1628 /* Let's send a regular segfault... */
1629 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1631 info
.si_signo
= TARGET_SIGSEGV
;
1633 info
.si_code
= TARGET_SEGV_MAPERR
;
1636 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1637 queue_signal(env
, info
.si_signo
, &info
);
1639 case POWERPC_EXCP_EXTERNAL
: /* External input */
1640 cpu_abort(cs
, "External interrupt while in user mode. "
1643 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1644 EXCP_DUMP(env
, "Unaligned memory access\n");
1645 /* XXX: check this */
1646 info
.si_signo
= TARGET_SIGBUS
;
1648 info
.si_code
= TARGET_BUS_ADRALN
;
1649 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1650 queue_signal(env
, info
.si_signo
, &info
);
1652 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1653 /* XXX: check this */
1654 switch (env
->error_code
& ~0xF) {
1655 case POWERPC_EXCP_FP
:
1656 EXCP_DUMP(env
, "Floating point program exception\n");
1657 info
.si_signo
= TARGET_SIGFPE
;
1659 switch (env
->error_code
& 0xF) {
1660 case POWERPC_EXCP_FP_OX
:
1661 info
.si_code
= TARGET_FPE_FLTOVF
;
1663 case POWERPC_EXCP_FP_UX
:
1664 info
.si_code
= TARGET_FPE_FLTUND
;
1666 case POWERPC_EXCP_FP_ZX
:
1667 case POWERPC_EXCP_FP_VXZDZ
:
1668 info
.si_code
= TARGET_FPE_FLTDIV
;
1670 case POWERPC_EXCP_FP_XX
:
1671 info
.si_code
= TARGET_FPE_FLTRES
;
1673 case POWERPC_EXCP_FP_VXSOFT
:
1674 info
.si_code
= TARGET_FPE_FLTINV
;
1676 case POWERPC_EXCP_FP_VXSNAN
:
1677 case POWERPC_EXCP_FP_VXISI
:
1678 case POWERPC_EXCP_FP_VXIDI
:
1679 case POWERPC_EXCP_FP_VXIMZ
:
1680 case POWERPC_EXCP_FP_VXVC
:
1681 case POWERPC_EXCP_FP_VXSQRT
:
1682 case POWERPC_EXCP_FP_VXCVI
:
1683 info
.si_code
= TARGET_FPE_FLTSUB
;
1686 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1691 case POWERPC_EXCP_INVAL
:
1692 EXCP_DUMP(env
, "Invalid instruction\n");
1693 info
.si_signo
= TARGET_SIGILL
;
1695 switch (env
->error_code
& 0xF) {
1696 case POWERPC_EXCP_INVAL_INVAL
:
1697 info
.si_code
= TARGET_ILL_ILLOPC
;
1699 case POWERPC_EXCP_INVAL_LSWX
:
1700 info
.si_code
= TARGET_ILL_ILLOPN
;
1702 case POWERPC_EXCP_INVAL_SPR
:
1703 info
.si_code
= TARGET_ILL_PRVREG
;
1705 case POWERPC_EXCP_INVAL_FP
:
1706 info
.si_code
= TARGET_ILL_COPROC
;
1709 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1710 env
->error_code
& 0xF);
1711 info
.si_code
= TARGET_ILL_ILLADR
;
1715 case POWERPC_EXCP_PRIV
:
1716 EXCP_DUMP(env
, "Privilege violation\n");
1717 info
.si_signo
= TARGET_SIGILL
;
1719 switch (env
->error_code
& 0xF) {
1720 case POWERPC_EXCP_PRIV_OPC
:
1721 info
.si_code
= TARGET_ILL_PRVOPC
;
1723 case POWERPC_EXCP_PRIV_REG
:
1724 info
.si_code
= TARGET_ILL_PRVREG
;
1727 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1728 env
->error_code
& 0xF);
1729 info
.si_code
= TARGET_ILL_PRVOPC
;
1733 case POWERPC_EXCP_TRAP
:
1734 cpu_abort(cs
, "Tried to call a TRAP\n");
1737 /* Should not happen ! */
1738 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1742 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1743 queue_signal(env
, info
.si_signo
, &info
);
1745 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1746 EXCP_DUMP(env
, "No floating point allowed\n");
1747 info
.si_signo
= TARGET_SIGILL
;
1749 info
.si_code
= TARGET_ILL_COPROC
;
1750 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1751 queue_signal(env
, info
.si_signo
, &info
);
1753 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1754 cpu_abort(cs
, "Syscall exception while in user mode. "
1757 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1758 EXCP_DUMP(env
, "No APU instruction allowed\n");
1759 info
.si_signo
= TARGET_SIGILL
;
1761 info
.si_code
= TARGET_ILL_COPROC
;
1762 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1763 queue_signal(env
, info
.si_signo
, &info
);
1765 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1766 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1769 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1770 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1773 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1774 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1777 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1778 cpu_abort(cs
, "Data TLB exception while in user mode. "
1781 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1782 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1785 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1786 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1787 info
.si_signo
= TARGET_SIGILL
;
1789 info
.si_code
= TARGET_ILL_COPROC
;
1790 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1791 queue_signal(env
, info
.si_signo
, &info
);
1793 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1794 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1796 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1797 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1799 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1800 cpu_abort(cs
, "Performance monitor exception not handled\n");
1802 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1803 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1806 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1807 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1810 case POWERPC_EXCP_RESET
: /* System reset exception */
1811 cpu_abort(cs
, "Reset interrupt while in user mode. "
1814 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1815 cpu_abort(cs
, "Data segment exception while in user mode. "
1818 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1819 cpu_abort(cs
, "Instruction segment exception "
1820 "while in user mode. Aborting\n");
1822 /* PowerPC 64 with hypervisor mode support */
1823 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1824 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1825 "while in user mode. Aborting\n");
1827 case POWERPC_EXCP_TRACE
: /* Trace exception */
1829 * we use this exception to emulate step-by-step execution mode.
1832 /* PowerPC 64 with hypervisor mode support */
1833 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1834 cpu_abort(cs
, "Hypervisor data storage exception "
1835 "while in user mode. Aborting\n");
1837 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1838 cpu_abort(cs
, "Hypervisor instruction storage exception "
1839 "while in user mode. Aborting\n");
1841 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1842 cpu_abort(cs
, "Hypervisor data segment exception "
1843 "while in user mode. Aborting\n");
1845 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1846 cpu_abort(cs
, "Hypervisor instruction segment exception "
1847 "while in user mode. Aborting\n");
1849 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1850 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1851 info
.si_signo
= TARGET_SIGILL
;
1853 info
.si_code
= TARGET_ILL_COPROC
;
1854 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1855 queue_signal(env
, info
.si_signo
, &info
);
1857 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1858 cpu_abort(cs
, "Programmable interval timer interrupt "
1859 "while in user mode. Aborting\n");
1861 case POWERPC_EXCP_IO
: /* IO error exception */
1862 cpu_abort(cs
, "IO error exception while in user mode. "
1865 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1866 cpu_abort(cs
, "Run mode exception while in user mode. "
1869 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1870 cpu_abort(cs
, "Emulation trap exception not handled\n");
1872 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1873 cpu_abort(cs
, "Instruction fetch TLB exception "
1874 "while in user-mode. Aborting");
1876 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1877 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1880 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1881 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1884 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1885 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1887 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1888 cpu_abort(cs
, "Instruction address breakpoint exception "
1891 case POWERPC_EXCP_SMI
: /* System management interrupt */
1892 cpu_abort(cs
, "System management interrupt while in user mode. "
1895 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1896 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1899 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1900 cpu_abort(cs
, "Performance monitor exception not handled\n");
1902 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1903 cpu_abort(cs
, "Vector assist exception not handled\n");
1905 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1906 cpu_abort(cs
, "Soft patch exception not handled\n");
1908 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1909 cpu_abort(cs
, "Maintenance exception while in user mode. "
1912 case POWERPC_EXCP_STOP
: /* stop translation */
1913 /* We did invalidate the instruction cache. Go on */
1915 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1916 /* We just stopped because of a branch. Go on */
1918 case POWERPC_EXCP_SYSCALL_USER
:
1919 /* system call in user-mode emulation */
1921 * PPC ABI uses overflow flag in cr0 to signal an error
1924 env
->crf
[0] &= ~0x1;
1925 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1926 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1928 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1929 /* Returning from a successful sigreturn syscall.
1930 Avoid corrupting register state. */
1933 if (ret
> (target_ulong
)(-515)) {
1939 case POWERPC_EXCP_STCX
:
1940 if (do_store_exclusive(env
)) {
1941 info
.si_signo
= TARGET_SIGSEGV
;
1943 info
.si_code
= TARGET_SEGV_MAPERR
;
1944 info
._sifields
._sigfault
._addr
= env
->nip
;
1945 queue_signal(env
, info
.si_signo
, &info
);
1952 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1954 info
.si_signo
= sig
;
1956 info
.si_code
= TARGET_TRAP_BRKPT
;
1957 queue_signal(env
, info
.si_signo
, &info
);
1961 case EXCP_INTERRUPT
:
1962 /* just indicate that signals should be handled asap */
1965 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1968 process_pending_signals(env
);
1975 # ifdef TARGET_ABI_MIPSO32
1976 # define MIPS_SYS(name, args) args,
1977 static const uint8_t mips_syscall_args
[] = {
1978 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1979 MIPS_SYS(sys_exit
, 1)
1980 MIPS_SYS(sys_fork
, 0)
1981 MIPS_SYS(sys_read
, 3)
1982 MIPS_SYS(sys_write
, 3)
1983 MIPS_SYS(sys_open
, 3) /* 4005 */
1984 MIPS_SYS(sys_close
, 1)
1985 MIPS_SYS(sys_waitpid
, 3)
1986 MIPS_SYS(sys_creat
, 2)
1987 MIPS_SYS(sys_link
, 2)
1988 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1989 MIPS_SYS(sys_execve
, 0)
1990 MIPS_SYS(sys_chdir
, 1)
1991 MIPS_SYS(sys_time
, 1)
1992 MIPS_SYS(sys_mknod
, 3)
1993 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1994 MIPS_SYS(sys_lchown
, 3)
1995 MIPS_SYS(sys_ni_syscall
, 0)
1996 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
1997 MIPS_SYS(sys_lseek
, 3)
1998 MIPS_SYS(sys_getpid
, 0) /* 4020 */
1999 MIPS_SYS(sys_mount
, 5)
2000 MIPS_SYS(sys_umount
, 1)
2001 MIPS_SYS(sys_setuid
, 1)
2002 MIPS_SYS(sys_getuid
, 0)
2003 MIPS_SYS(sys_stime
, 1) /* 4025 */
2004 MIPS_SYS(sys_ptrace
, 4)
2005 MIPS_SYS(sys_alarm
, 1)
2006 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2007 MIPS_SYS(sys_pause
, 0)
2008 MIPS_SYS(sys_utime
, 2) /* 4030 */
2009 MIPS_SYS(sys_ni_syscall
, 0)
2010 MIPS_SYS(sys_ni_syscall
, 0)
2011 MIPS_SYS(sys_access
, 2)
2012 MIPS_SYS(sys_nice
, 1)
2013 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2014 MIPS_SYS(sys_sync
, 0)
2015 MIPS_SYS(sys_kill
, 2)
2016 MIPS_SYS(sys_rename
, 2)
2017 MIPS_SYS(sys_mkdir
, 2)
2018 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2019 MIPS_SYS(sys_dup
, 1)
2020 MIPS_SYS(sys_pipe
, 0)
2021 MIPS_SYS(sys_times
, 1)
2022 MIPS_SYS(sys_ni_syscall
, 0)
2023 MIPS_SYS(sys_brk
, 1) /* 4045 */
2024 MIPS_SYS(sys_setgid
, 1)
2025 MIPS_SYS(sys_getgid
, 0)
2026 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2027 MIPS_SYS(sys_geteuid
, 0)
2028 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2029 MIPS_SYS(sys_acct
, 0)
2030 MIPS_SYS(sys_umount2
, 2)
2031 MIPS_SYS(sys_ni_syscall
, 0)
2032 MIPS_SYS(sys_ioctl
, 3)
2033 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2034 MIPS_SYS(sys_ni_syscall
, 2)
2035 MIPS_SYS(sys_setpgid
, 2)
2036 MIPS_SYS(sys_ni_syscall
, 0)
2037 MIPS_SYS(sys_olduname
, 1)
2038 MIPS_SYS(sys_umask
, 1) /* 4060 */
2039 MIPS_SYS(sys_chroot
, 1)
2040 MIPS_SYS(sys_ustat
, 2)
2041 MIPS_SYS(sys_dup2
, 2)
2042 MIPS_SYS(sys_getppid
, 0)
2043 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2044 MIPS_SYS(sys_setsid
, 0)
2045 MIPS_SYS(sys_sigaction
, 3)
2046 MIPS_SYS(sys_sgetmask
, 0)
2047 MIPS_SYS(sys_ssetmask
, 1)
2048 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2049 MIPS_SYS(sys_setregid
, 2)
2050 MIPS_SYS(sys_sigsuspend
, 0)
2051 MIPS_SYS(sys_sigpending
, 1)
2052 MIPS_SYS(sys_sethostname
, 2)
2053 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2054 MIPS_SYS(sys_getrlimit
, 2)
2055 MIPS_SYS(sys_getrusage
, 2)
2056 MIPS_SYS(sys_gettimeofday
, 2)
2057 MIPS_SYS(sys_settimeofday
, 2)
2058 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2059 MIPS_SYS(sys_setgroups
, 2)
2060 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2061 MIPS_SYS(sys_symlink
, 2)
2062 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2063 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2064 MIPS_SYS(sys_uselib
, 1)
2065 MIPS_SYS(sys_swapon
, 2)
2066 MIPS_SYS(sys_reboot
, 3)
2067 MIPS_SYS(old_readdir
, 3)
2068 MIPS_SYS(old_mmap
, 6) /* 4090 */
2069 MIPS_SYS(sys_munmap
, 2)
2070 MIPS_SYS(sys_truncate
, 2)
2071 MIPS_SYS(sys_ftruncate
, 2)
2072 MIPS_SYS(sys_fchmod
, 2)
2073 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2074 MIPS_SYS(sys_getpriority
, 2)
2075 MIPS_SYS(sys_setpriority
, 3)
2076 MIPS_SYS(sys_ni_syscall
, 0)
2077 MIPS_SYS(sys_statfs
, 2)
2078 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2079 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2080 MIPS_SYS(sys_socketcall
, 2)
2081 MIPS_SYS(sys_syslog
, 3)
2082 MIPS_SYS(sys_setitimer
, 3)
2083 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2084 MIPS_SYS(sys_newstat
, 2)
2085 MIPS_SYS(sys_newlstat
, 2)
2086 MIPS_SYS(sys_newfstat
, 2)
2087 MIPS_SYS(sys_uname
, 1)
2088 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2089 MIPS_SYS(sys_vhangup
, 0)
2090 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2091 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2092 MIPS_SYS(sys_wait4
, 4)
2093 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2094 MIPS_SYS(sys_sysinfo
, 1)
2095 MIPS_SYS(sys_ipc
, 6)
2096 MIPS_SYS(sys_fsync
, 1)
2097 MIPS_SYS(sys_sigreturn
, 0)
2098 MIPS_SYS(sys_clone
, 6) /* 4120 */
2099 MIPS_SYS(sys_setdomainname
, 2)
2100 MIPS_SYS(sys_newuname
, 1)
2101 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2102 MIPS_SYS(sys_adjtimex
, 1)
2103 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2104 MIPS_SYS(sys_sigprocmask
, 3)
2105 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2106 MIPS_SYS(sys_init_module
, 5)
2107 MIPS_SYS(sys_delete_module
, 1)
2108 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2109 MIPS_SYS(sys_quotactl
, 0)
2110 MIPS_SYS(sys_getpgid
, 1)
2111 MIPS_SYS(sys_fchdir
, 1)
2112 MIPS_SYS(sys_bdflush
, 2)
2113 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2114 MIPS_SYS(sys_personality
, 1)
2115 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2116 MIPS_SYS(sys_setfsuid
, 1)
2117 MIPS_SYS(sys_setfsgid
, 1)
2118 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2119 MIPS_SYS(sys_getdents
, 3)
2120 MIPS_SYS(sys_select
, 5)
2121 MIPS_SYS(sys_flock
, 2)
2122 MIPS_SYS(sys_msync
, 3)
2123 MIPS_SYS(sys_readv
, 3) /* 4145 */
2124 MIPS_SYS(sys_writev
, 3)
2125 MIPS_SYS(sys_cacheflush
, 3)
2126 MIPS_SYS(sys_cachectl
, 3)
2127 MIPS_SYS(sys_sysmips
, 4)
2128 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2129 MIPS_SYS(sys_getsid
, 1)
2130 MIPS_SYS(sys_fdatasync
, 0)
2131 MIPS_SYS(sys_sysctl
, 1)
2132 MIPS_SYS(sys_mlock
, 2)
2133 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2134 MIPS_SYS(sys_mlockall
, 1)
2135 MIPS_SYS(sys_munlockall
, 0)
2136 MIPS_SYS(sys_sched_setparam
, 2)
2137 MIPS_SYS(sys_sched_getparam
, 2)
2138 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2139 MIPS_SYS(sys_sched_getscheduler
, 1)
2140 MIPS_SYS(sys_sched_yield
, 0)
2141 MIPS_SYS(sys_sched_get_priority_max
, 1)
2142 MIPS_SYS(sys_sched_get_priority_min
, 1)
2143 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2144 MIPS_SYS(sys_nanosleep
, 2)
2145 MIPS_SYS(sys_mremap
, 5)
2146 MIPS_SYS(sys_accept
, 3)
2147 MIPS_SYS(sys_bind
, 3)
2148 MIPS_SYS(sys_connect
, 3) /* 4170 */
2149 MIPS_SYS(sys_getpeername
, 3)
2150 MIPS_SYS(sys_getsockname
, 3)
2151 MIPS_SYS(sys_getsockopt
, 5)
2152 MIPS_SYS(sys_listen
, 2)
2153 MIPS_SYS(sys_recv
, 4) /* 4175 */
2154 MIPS_SYS(sys_recvfrom
, 6)
2155 MIPS_SYS(sys_recvmsg
, 3)
2156 MIPS_SYS(sys_send
, 4)
2157 MIPS_SYS(sys_sendmsg
, 3)
2158 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2159 MIPS_SYS(sys_setsockopt
, 5)
2160 MIPS_SYS(sys_shutdown
, 2)
2161 MIPS_SYS(sys_socket
, 3)
2162 MIPS_SYS(sys_socketpair
, 4)
2163 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2164 MIPS_SYS(sys_getresuid
, 3)
2165 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2166 MIPS_SYS(sys_poll
, 3)
2167 MIPS_SYS(sys_nfsservctl
, 3)
2168 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2169 MIPS_SYS(sys_getresgid
, 3)
2170 MIPS_SYS(sys_prctl
, 5)
2171 MIPS_SYS(sys_rt_sigreturn
, 0)
2172 MIPS_SYS(sys_rt_sigaction
, 4)
2173 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2174 MIPS_SYS(sys_rt_sigpending
, 2)
2175 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2176 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2177 MIPS_SYS(sys_rt_sigsuspend
, 0)
2178 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2179 MIPS_SYS(sys_pwrite64
, 6)
2180 MIPS_SYS(sys_chown
, 3)
2181 MIPS_SYS(sys_getcwd
, 2)
2182 MIPS_SYS(sys_capget
, 2)
2183 MIPS_SYS(sys_capset
, 2) /* 4205 */
2184 MIPS_SYS(sys_sigaltstack
, 2)
2185 MIPS_SYS(sys_sendfile
, 4)
2186 MIPS_SYS(sys_ni_syscall
, 0)
2187 MIPS_SYS(sys_ni_syscall
, 0)
2188 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2189 MIPS_SYS(sys_truncate64
, 4)
2190 MIPS_SYS(sys_ftruncate64
, 4)
2191 MIPS_SYS(sys_stat64
, 2)
2192 MIPS_SYS(sys_lstat64
, 2)
2193 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2194 MIPS_SYS(sys_pivot_root
, 2)
2195 MIPS_SYS(sys_mincore
, 3)
2196 MIPS_SYS(sys_madvise
, 3)
2197 MIPS_SYS(sys_getdents64
, 3)
2198 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2199 MIPS_SYS(sys_ni_syscall
, 0)
2200 MIPS_SYS(sys_gettid
, 0)
2201 MIPS_SYS(sys_readahead
, 5)
2202 MIPS_SYS(sys_setxattr
, 5)
2203 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2204 MIPS_SYS(sys_fsetxattr
, 5)
2205 MIPS_SYS(sys_getxattr
, 4)
2206 MIPS_SYS(sys_lgetxattr
, 4)
2207 MIPS_SYS(sys_fgetxattr
, 4)
2208 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2209 MIPS_SYS(sys_llistxattr
, 3)
2210 MIPS_SYS(sys_flistxattr
, 3)
2211 MIPS_SYS(sys_removexattr
, 2)
2212 MIPS_SYS(sys_lremovexattr
, 2)
2213 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2214 MIPS_SYS(sys_tkill
, 2)
2215 MIPS_SYS(sys_sendfile64
, 5)
2216 MIPS_SYS(sys_futex
, 6)
2217 MIPS_SYS(sys_sched_setaffinity
, 3)
2218 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2219 MIPS_SYS(sys_io_setup
, 2)
2220 MIPS_SYS(sys_io_destroy
, 1)
2221 MIPS_SYS(sys_io_getevents
, 5)
2222 MIPS_SYS(sys_io_submit
, 3)
2223 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2224 MIPS_SYS(sys_exit_group
, 1)
2225 MIPS_SYS(sys_lookup_dcookie
, 3)
2226 MIPS_SYS(sys_epoll_create
, 1)
2227 MIPS_SYS(sys_epoll_ctl
, 4)
2228 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2229 MIPS_SYS(sys_remap_file_pages
, 5)
2230 MIPS_SYS(sys_set_tid_address
, 1)
2231 MIPS_SYS(sys_restart_syscall
, 0)
2232 MIPS_SYS(sys_fadvise64_64
, 7)
2233 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2234 MIPS_SYS(sys_fstatfs64
, 2)
2235 MIPS_SYS(sys_timer_create
, 3)
2236 MIPS_SYS(sys_timer_settime
, 4)
2237 MIPS_SYS(sys_timer_gettime
, 2)
2238 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2239 MIPS_SYS(sys_timer_delete
, 1)
2240 MIPS_SYS(sys_clock_settime
, 2)
2241 MIPS_SYS(sys_clock_gettime
, 2)
2242 MIPS_SYS(sys_clock_getres
, 2)
2243 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2244 MIPS_SYS(sys_tgkill
, 3)
2245 MIPS_SYS(sys_utimes
, 2)
2246 MIPS_SYS(sys_mbind
, 4)
2247 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2248 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2249 MIPS_SYS(sys_mq_open
, 4)
2250 MIPS_SYS(sys_mq_unlink
, 1)
2251 MIPS_SYS(sys_mq_timedsend
, 5)
2252 MIPS_SYS(sys_mq_timedreceive
, 5)
2253 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2254 MIPS_SYS(sys_mq_getsetattr
, 3)
2255 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2256 MIPS_SYS(sys_waitid
, 4)
2257 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2258 MIPS_SYS(sys_add_key
, 5)
2259 MIPS_SYS(sys_request_key
, 4)
2260 MIPS_SYS(sys_keyctl
, 5)
2261 MIPS_SYS(sys_set_thread_area
, 1)
2262 MIPS_SYS(sys_inotify_init
, 0)
2263 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2264 MIPS_SYS(sys_inotify_rm_watch
, 2)
2265 MIPS_SYS(sys_migrate_pages
, 4)
2266 MIPS_SYS(sys_openat
, 4)
2267 MIPS_SYS(sys_mkdirat
, 3)
2268 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2269 MIPS_SYS(sys_fchownat
, 5)
2270 MIPS_SYS(sys_futimesat
, 3)
2271 MIPS_SYS(sys_fstatat64
, 4)
2272 MIPS_SYS(sys_unlinkat
, 3)
2273 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2274 MIPS_SYS(sys_linkat
, 5)
2275 MIPS_SYS(sys_symlinkat
, 3)
2276 MIPS_SYS(sys_readlinkat
, 4)
2277 MIPS_SYS(sys_fchmodat
, 3)
2278 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2279 MIPS_SYS(sys_pselect6
, 6)
2280 MIPS_SYS(sys_ppoll
, 5)
2281 MIPS_SYS(sys_unshare
, 1)
2282 MIPS_SYS(sys_splice
, 6)
2283 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2284 MIPS_SYS(sys_tee
, 4)
2285 MIPS_SYS(sys_vmsplice
, 4)
2286 MIPS_SYS(sys_move_pages
, 6)
2287 MIPS_SYS(sys_set_robust_list
, 2)
2288 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2289 MIPS_SYS(sys_kexec_load
, 4)
2290 MIPS_SYS(sys_getcpu
, 3)
2291 MIPS_SYS(sys_epoll_pwait
, 6)
2292 MIPS_SYS(sys_ioprio_set
, 3)
2293 MIPS_SYS(sys_ioprio_get
, 2)
2294 MIPS_SYS(sys_utimensat
, 4)
2295 MIPS_SYS(sys_signalfd
, 3)
2296 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2297 MIPS_SYS(sys_eventfd
, 1)
2298 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2299 MIPS_SYS(sys_timerfd_create
, 2)
2300 MIPS_SYS(sys_timerfd_gettime
, 2)
2301 MIPS_SYS(sys_timerfd_settime
, 4)
2302 MIPS_SYS(sys_signalfd4
, 4)
2303 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2304 MIPS_SYS(sys_epoll_create1
, 1)
2305 MIPS_SYS(sys_dup3
, 3)
2306 MIPS_SYS(sys_pipe2
, 2)
2307 MIPS_SYS(sys_inotify_init1
, 1)
2308 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2309 MIPS_SYS(sys_pwritev
, 6)
2310 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2311 MIPS_SYS(sys_perf_event_open
, 5)
2312 MIPS_SYS(sys_accept4
, 4)
2313 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2314 MIPS_SYS(sys_fanotify_init
, 2)
2315 MIPS_SYS(sys_fanotify_mark
, 6)
2316 MIPS_SYS(sys_prlimit64
, 4)
2317 MIPS_SYS(sys_name_to_handle_at
, 5)
2318 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2319 MIPS_SYS(sys_clock_adjtime
, 2)
2320 MIPS_SYS(sys_syncfs
, 1)
2325 static int do_store_exclusive(CPUMIPSState
*env
)
2328 target_ulong page_addr
;
2336 page_addr
= addr
& TARGET_PAGE_MASK
;
2339 flags
= page_get_flags(page_addr
);
2340 if ((flags
& PAGE_READ
) == 0) {
2343 reg
= env
->llreg
& 0x1f;
2344 d
= (env
->llreg
& 0x20) != 0;
2346 segv
= get_user_s64(val
, addr
);
2348 segv
= get_user_s32(val
, addr
);
2351 if (val
!= env
->llval
) {
2352 env
->active_tc
.gpr
[reg
] = 0;
2355 segv
= put_user_u64(env
->llnewval
, addr
);
2357 segv
= put_user_u32(env
->llnewval
, addr
);
2360 env
->active_tc
.gpr
[reg
] = 1;
2367 env
->active_tc
.PC
+= 4;
2380 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2388 info
->si_signo
= TARGET_SIGFPE
;
2390 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2391 queue_signal(env
, info
->si_signo
, &*info
);
2395 info
->si_signo
= TARGET_SIGTRAP
;
2397 queue_signal(env
, info
->si_signo
, &*info
);
2405 void cpu_loop(CPUMIPSState
*env
)
2407 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2408 target_siginfo_t info
;
2411 # ifdef TARGET_ABI_MIPSO32
2412 unsigned int syscall_num
;
2417 trapnr
= cpu_mips_exec(env
);
2421 env
->active_tc
.PC
+= 4;
2422 # ifdef TARGET_ABI_MIPSO32
2423 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2424 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2425 ret
= -TARGET_ENOSYS
;
2429 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2431 nb_args
= mips_syscall_args
[syscall_num
];
2432 sp_reg
= env
->active_tc
.gpr
[29];
2434 /* these arguments are taken from the stack */
2436 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2440 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2444 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2448 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2454 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2455 env
->active_tc
.gpr
[4],
2456 env
->active_tc
.gpr
[5],
2457 env
->active_tc
.gpr
[6],
2458 env
->active_tc
.gpr
[7],
2459 arg5
, arg6
, arg7
, arg8
);
2463 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2464 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2465 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2466 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2467 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2469 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2470 /* Returning from a successful sigreturn syscall.
2471 Avoid clobbering register state. */
2474 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2475 env
->active_tc
.gpr
[7] = 1; /* error flag */
2478 env
->active_tc
.gpr
[7] = 0; /* error flag */
2480 env
->active_tc
.gpr
[2] = ret
;
2486 info
.si_signo
= TARGET_SIGSEGV
;
2488 /* XXX: check env->error_code */
2489 info
.si_code
= TARGET_SEGV_MAPERR
;
2490 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2491 queue_signal(env
, info
.si_signo
, &info
);
2495 info
.si_signo
= TARGET_SIGILL
;
2498 queue_signal(env
, info
.si_signo
, &info
);
2500 case EXCP_INTERRUPT
:
2501 /* just indicate that signals should be handled asap */
2507 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2510 info
.si_signo
= sig
;
2512 info
.si_code
= TARGET_TRAP_BRKPT
;
2513 queue_signal(env
, info
.si_signo
, &info
);
2518 if (do_store_exclusive(env
)) {
2519 info
.si_signo
= TARGET_SIGSEGV
;
2521 info
.si_code
= TARGET_SEGV_MAPERR
;
2522 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2523 queue_signal(env
, info
.si_signo
, &info
);
2527 info
.si_signo
= TARGET_SIGILL
;
2529 info
.si_code
= TARGET_ILL_ILLOPC
;
2530 queue_signal(env
, info
.si_signo
, &info
);
2532 /* The code below was inspired by the MIPS Linux kernel trap
2533 * handling code in arch/mips/kernel/traps.c.
2537 abi_ulong trap_instr
;
2540 if (env
->hflags
& MIPS_HFLAG_M16
) {
2541 if (env
->insn_flags
& ASE_MICROMIPS
) {
2542 /* microMIPS mode */
2543 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2548 if ((trap_instr
>> 10) == 0x11) {
2549 /* 16-bit instruction */
2550 code
= trap_instr
& 0xf;
2552 /* 32-bit instruction */
2555 ret
= get_user_u16(instr_lo
,
2556 env
->active_tc
.PC
+ 2);
2560 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2561 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2562 /* Unfortunately, microMIPS also suffers from
2563 the old assembler bug... */
2564 if (code
>= (1 << 10)) {
2570 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2574 code
= (trap_instr
>> 6) & 0x3f;
2577 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2582 /* As described in the original Linux kernel code, the
2583 * below checks on 'code' are to work around an old
2586 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2587 if (code
>= (1 << 10)) {
2592 if (do_break(env
, &info
, code
) != 0) {
2599 abi_ulong trap_instr
;
2600 unsigned int code
= 0;
2602 if (env
->hflags
& MIPS_HFLAG_M16
) {
2603 /* microMIPS mode */
2606 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2607 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2609 trap_instr
= (instr
[0] << 16) | instr
[1];
2611 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2618 /* The immediate versions don't provide a code. */
2619 if (!(trap_instr
& 0xFC000000)) {
2620 if (env
->hflags
& MIPS_HFLAG_M16
) {
2621 /* microMIPS mode */
2622 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2624 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2628 if (do_break(env
, &info
, code
) != 0) {
2635 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2637 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2640 process_pending_signals(env
);
2645 #ifdef TARGET_OPENRISC
2647 void cpu_loop(CPUOpenRISCState
*env
)
2649 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2653 trapnr
= cpu_exec(env
);
2658 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2662 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2667 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2668 gdbsig
= TARGET_SIGSEGV
;
2671 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2674 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2678 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2682 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2686 qemu_log("\nTLB miss\n");
2689 qemu_log("\nRange\n");
2693 env
->pc
+= 4; /* 0xc00; */
2694 env
->gpr
[11] = do_syscall(env
,
2695 env
->gpr
[11], /* return value */
2696 env
->gpr
[3], /* r3 - r7 are params */
2704 qemu_log("\nFloating point error\n");
2707 qemu_log("\nTrap\n");
2714 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2716 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2717 gdbsig
= TARGET_SIGILL
;
2721 gdb_handlesig(cs
, gdbsig
);
2722 if (gdbsig
!= TARGET_SIGTRAP
) {
2727 process_pending_signals(env
);
2731 #endif /* TARGET_OPENRISC */
2734 void cpu_loop(CPUSH4State
*env
)
2736 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2738 target_siginfo_t info
;
2741 trapnr
= cpu_sh4_exec (env
);
2746 ret
= do_syscall(env
,
2755 env
->gregs
[0] = ret
;
2757 case EXCP_INTERRUPT
:
2758 /* just indicate that signals should be handled asap */
2764 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2767 info
.si_signo
= sig
;
2769 info
.si_code
= TARGET_TRAP_BRKPT
;
2770 queue_signal(env
, info
.si_signo
, &info
);
2776 info
.si_signo
= SIGSEGV
;
2778 info
.si_code
= TARGET_SEGV_MAPERR
;
2779 info
._sifields
._sigfault
._addr
= env
->tea
;
2780 queue_signal(env
, info
.si_signo
, &info
);
2784 printf ("Unhandled trap: 0x%x\n", trapnr
);
2785 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2788 process_pending_signals (env
);
2794 void cpu_loop(CPUCRISState
*env
)
2796 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2798 target_siginfo_t info
;
2801 trapnr
= cpu_cris_exec (env
);
2805 info
.si_signo
= SIGSEGV
;
2807 /* XXX: check env->error_code */
2808 info
.si_code
= TARGET_SEGV_MAPERR
;
2809 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2810 queue_signal(env
, info
.si_signo
, &info
);
2813 case EXCP_INTERRUPT
:
2814 /* just indicate that signals should be handled asap */
2817 ret
= do_syscall(env
,
2826 env
->regs
[10] = ret
;
2832 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2835 info
.si_signo
= sig
;
2837 info
.si_code
= TARGET_TRAP_BRKPT
;
2838 queue_signal(env
, info
.si_signo
, &info
);
2843 printf ("Unhandled trap: 0x%x\n", trapnr
);
2844 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2847 process_pending_signals (env
);
2852 #ifdef TARGET_MICROBLAZE
2853 void cpu_loop(CPUMBState
*env
)
2855 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2857 target_siginfo_t info
;
2860 trapnr
= cpu_mb_exec (env
);
2864 info
.si_signo
= SIGSEGV
;
2866 /* XXX: check env->error_code */
2867 info
.si_code
= TARGET_SEGV_MAPERR
;
2868 info
._sifields
._sigfault
._addr
= 0;
2869 queue_signal(env
, info
.si_signo
, &info
);
2872 case EXCP_INTERRUPT
:
2873 /* just indicate that signals should be handled asap */
2876 /* Return address is 4 bytes after the call. */
2878 env
->sregs
[SR_PC
] = env
->regs
[14];
2879 ret
= do_syscall(env
,
2891 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2892 if (env
->iflags
& D_FLAG
) {
2893 env
->sregs
[SR_ESR
] |= 1 << 12;
2894 env
->sregs
[SR_PC
] -= 4;
2895 /* FIXME: if branch was immed, replay the imm as well. */
2898 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2900 switch (env
->sregs
[SR_ESR
] & 31) {
2901 case ESR_EC_DIVZERO
:
2902 info
.si_signo
= SIGFPE
;
2904 info
.si_code
= TARGET_FPE_FLTDIV
;
2905 info
._sifields
._sigfault
._addr
= 0;
2906 queue_signal(env
, info
.si_signo
, &info
);
2909 info
.si_signo
= SIGFPE
;
2911 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2912 info
.si_code
= TARGET_FPE_FLTINV
;
2914 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2915 info
.si_code
= TARGET_FPE_FLTDIV
;
2917 info
._sifields
._sigfault
._addr
= 0;
2918 queue_signal(env
, info
.si_signo
, &info
);
2921 printf ("Unhandled hw-exception: 0x%x\n",
2922 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2923 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2932 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2935 info
.si_signo
= sig
;
2937 info
.si_code
= TARGET_TRAP_BRKPT
;
2938 queue_signal(env
, info
.si_signo
, &info
);
2943 printf ("Unhandled trap: 0x%x\n", trapnr
);
2944 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2947 process_pending_signals (env
);
2954 void cpu_loop(CPUM68KState
*env
)
2956 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2959 target_siginfo_t info
;
2960 TaskState
*ts
= cs
->opaque
;
2963 trapnr
= cpu_m68k_exec(env
);
2967 if (ts
->sim_syscalls
) {
2969 nr
= lduw(env
->pc
+ 2);
2971 do_m68k_simcall(env
, nr
);
2977 case EXCP_HALT_INSN
:
2978 /* Semihosing syscall. */
2980 do_m68k_semihosting(env
, env
->dregs
[0]);
2984 case EXCP_UNSUPPORTED
:
2986 info
.si_signo
= SIGILL
;
2988 info
.si_code
= TARGET_ILL_ILLOPN
;
2989 info
._sifields
._sigfault
._addr
= env
->pc
;
2990 queue_signal(env
, info
.si_signo
, &info
);
2994 ts
->sim_syscalls
= 0;
2997 env
->dregs
[0] = do_syscall(env
,
3008 case EXCP_INTERRUPT
:
3009 /* just indicate that signals should be handled asap */
3013 info
.si_signo
= SIGSEGV
;
3015 /* XXX: check env->error_code */
3016 info
.si_code
= TARGET_SEGV_MAPERR
;
3017 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3018 queue_signal(env
, info
.si_signo
, &info
);
3025 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3028 info
.si_signo
= sig
;
3030 info
.si_code
= TARGET_TRAP_BRKPT
;
3031 queue_signal(env
, info
.si_signo
, &info
);
3036 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3038 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3041 process_pending_signals(env
);
3044 #endif /* TARGET_M68K */
3047 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3049 target_ulong addr
, val
, tmp
;
3050 target_siginfo_t info
;
3053 addr
= env
->lock_addr
;
3054 tmp
= env
->lock_st_addr
;
3055 env
->lock_addr
= -1;
3056 env
->lock_st_addr
= 0;
3062 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3066 if (val
== env
->lock_value
) {
3068 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3085 info
.si_signo
= TARGET_SIGSEGV
;
3087 info
.si_code
= TARGET_SEGV_MAPERR
;
3088 info
._sifields
._sigfault
._addr
= addr
;
3089 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3092 void cpu_loop(CPUAlphaState
*env
)
3094 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3096 target_siginfo_t info
;
3100 trapnr
= cpu_alpha_exec (env
);
3102 /* All of the traps imply a transition through PALcode, which
3103 implies an REI instruction has been executed. Which means
3104 that the intr_flag should be cleared. */
3109 fprintf(stderr
, "Reset requested. Exit\n");
3113 fprintf(stderr
, "Machine check exception. Exit\n");
3116 case EXCP_SMP_INTERRUPT
:
3117 case EXCP_CLK_INTERRUPT
:
3118 case EXCP_DEV_INTERRUPT
:
3119 fprintf(stderr
, "External interrupt. Exit\n");
3123 env
->lock_addr
= -1;
3124 info
.si_signo
= TARGET_SIGSEGV
;
3126 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3127 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3128 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3129 queue_signal(env
, info
.si_signo
, &info
);
3132 env
->lock_addr
= -1;
3133 info
.si_signo
= TARGET_SIGBUS
;
3135 info
.si_code
= TARGET_BUS_ADRALN
;
3136 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3137 queue_signal(env
, info
.si_signo
, &info
);
3141 env
->lock_addr
= -1;
3142 info
.si_signo
= TARGET_SIGILL
;
3144 info
.si_code
= TARGET_ILL_ILLOPC
;
3145 info
._sifields
._sigfault
._addr
= env
->pc
;
3146 queue_signal(env
, info
.si_signo
, &info
);
3149 env
->lock_addr
= -1;
3150 info
.si_signo
= TARGET_SIGFPE
;
3152 info
.si_code
= TARGET_FPE_FLTINV
;
3153 info
._sifields
._sigfault
._addr
= env
->pc
;
3154 queue_signal(env
, info
.si_signo
, &info
);
3157 /* No-op. Linux simply re-enables the FPU. */
3160 env
->lock_addr
= -1;
3161 switch (env
->error_code
) {
3164 info
.si_signo
= TARGET_SIGTRAP
;
3166 info
.si_code
= TARGET_TRAP_BRKPT
;
3167 info
._sifields
._sigfault
._addr
= env
->pc
;
3168 queue_signal(env
, info
.si_signo
, &info
);
3172 info
.si_signo
= TARGET_SIGTRAP
;
3175 info
._sifields
._sigfault
._addr
= env
->pc
;
3176 queue_signal(env
, info
.si_signo
, &info
);
3180 trapnr
= env
->ir
[IR_V0
];
3181 sysret
= do_syscall(env
, trapnr
,
3182 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3183 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3184 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3186 if (trapnr
== TARGET_NR_sigreturn
3187 || trapnr
== TARGET_NR_rt_sigreturn
) {
3190 /* Syscall writes 0 to V0 to bypass error check, similar
3191 to how this is handled internal to Linux kernel.
3192 (Ab)use trapnr temporarily as boolean indicating error. */
3193 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3194 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3195 env
->ir
[IR_A3
] = trapnr
;
3199 /* ??? We can probably elide the code using page_unprotect
3200 that is checking for self-modifying code. Instead we
3201 could simply call tb_flush here. Until we work out the
3202 changes required to turn off the extra write protection,
3203 this can be a no-op. */
3207 /* Handled in the translator for usermode. */
3211 /* Handled in the translator for usermode. */
3215 info
.si_signo
= TARGET_SIGFPE
;
3216 switch (env
->ir
[IR_A0
]) {
3217 case TARGET_GEN_INTOVF
:
3218 info
.si_code
= TARGET_FPE_INTOVF
;
3220 case TARGET_GEN_INTDIV
:
3221 info
.si_code
= TARGET_FPE_INTDIV
;
3223 case TARGET_GEN_FLTOVF
:
3224 info
.si_code
= TARGET_FPE_FLTOVF
;
3226 case TARGET_GEN_FLTUND
:
3227 info
.si_code
= TARGET_FPE_FLTUND
;
3229 case TARGET_GEN_FLTINV
:
3230 info
.si_code
= TARGET_FPE_FLTINV
;
3232 case TARGET_GEN_FLTINE
:
3233 info
.si_code
= TARGET_FPE_FLTRES
;
3235 case TARGET_GEN_ROPRAND
:
3239 info
.si_signo
= TARGET_SIGTRAP
;
3244 info
._sifields
._sigfault
._addr
= env
->pc
;
3245 queue_signal(env
, info
.si_signo
, &info
);
3252 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3253 if (info
.si_signo
) {
3254 env
->lock_addr
= -1;
3256 info
.si_code
= TARGET_TRAP_BRKPT
;
3257 queue_signal(env
, info
.si_signo
, &info
);
3262 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3264 case EXCP_INTERRUPT
:
3265 /* Just indicate that signals should be handled asap. */
3268 printf ("Unhandled trap: 0x%x\n", trapnr
);
3269 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3272 process_pending_signals (env
);
3275 #endif /* TARGET_ALPHA */
3278 void cpu_loop(CPUS390XState
*env
)
3280 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3282 target_siginfo_t info
;
3286 trapnr
= cpu_s390x_exec(env
);
3288 case EXCP_INTERRUPT
:
3289 /* Just indicate that signals should be handled asap. */
3293 n
= env
->int_svc_code
;
3295 /* syscalls > 255 */
3298 env
->psw
.addr
+= env
->int_svc_ilen
;
3299 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3300 env
->regs
[4], env
->regs
[5],
3301 env
->regs
[6], env
->regs
[7], 0, 0);
3305 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3307 n
= TARGET_TRAP_BRKPT
;
3312 n
= env
->int_pgm_code
;
3315 case PGM_PRIVILEGED
:
3317 n
= TARGET_ILL_ILLOPC
;
3319 case PGM_PROTECTION
:
3320 case PGM_ADDRESSING
:
3322 /* XXX: check env->error_code */
3323 n
= TARGET_SEGV_MAPERR
;
3324 addr
= env
->__excp_addr
;
3327 case PGM_SPECIFICATION
:
3328 case PGM_SPECIAL_OP
:
3332 n
= TARGET_ILL_ILLOPN
;
3335 case PGM_FIXPT_OVERFLOW
:
3337 n
= TARGET_FPE_INTOVF
;
3339 case PGM_FIXPT_DIVIDE
:
3341 n
= TARGET_FPE_INTDIV
;
3345 n
= (env
->fpc
>> 8) & 0xff;
3347 /* compare-and-trap */
3350 /* An IEEE exception, simulated or otherwise. */
3352 n
= TARGET_FPE_FLTINV
;
3353 } else if (n
& 0x40) {
3354 n
= TARGET_FPE_FLTDIV
;
3355 } else if (n
& 0x20) {
3356 n
= TARGET_FPE_FLTOVF
;
3357 } else if (n
& 0x10) {
3358 n
= TARGET_FPE_FLTUND
;
3359 } else if (n
& 0x08) {
3360 n
= TARGET_FPE_FLTRES
;
3362 /* ??? Quantum exception; BFP, DFP error. */
3370 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3371 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3377 addr
= env
->psw
.addr
;
3379 info
.si_signo
= sig
;
3382 info
._sifields
._sigfault
._addr
= addr
;
3383 queue_signal(env
, info
.si_signo
, &info
);
3387 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3388 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3391 process_pending_signals (env
);
3395 #endif /* TARGET_S390X */
3397 THREAD CPUState
*thread_cpu
;
3399 void task_settid(TaskState
*ts
)
3401 if (ts
->ts_tid
== 0) {
3402 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3406 void stop_all_tasks(void)
3409 * We trust that when using NPTL, start_exclusive()
3410 * handles thread stopping correctly.
3415 /* Assumes contents are already zeroed. */
3416 void init_task_state(TaskState
*ts
)
3421 ts
->first_free
= ts
->sigqueue_table
;
3422 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3423 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3425 ts
->sigqueue_table
[i
].next
= NULL
;
3428 CPUArchState
*cpu_copy(CPUArchState
*env
)
3430 CPUState
*cpu
= ENV_GET_CPU(env
);
3431 CPUArchState
*new_env
= cpu_init(cpu_model
);
3432 CPUState
*new_cpu
= ENV_GET_CPU(new_env
);
3433 #if defined(TARGET_HAS_ICE)
3438 /* Reset non arch specific state */
3441 memcpy(new_env
, env
, sizeof(CPUArchState
));
3443 /* Clone all break/watchpoints.
3444 Note: Once we support ptrace with hw-debug register access, make sure
3445 BP_CPU break/watchpoints are handled correctly on clone. */
3446 QTAILQ_INIT(&cpu
->breakpoints
);
3447 QTAILQ_INIT(&cpu
->watchpoints
);
3448 #if defined(TARGET_HAS_ICE)
3449 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3450 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3452 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3453 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, (~wp
->len_mask
) + 1,
3461 static void handle_arg_help(const char *arg
)
3466 static void handle_arg_log(const char *arg
)
3470 mask
= qemu_str_to_log_mask(arg
);
3472 qemu_print_log_usage(stdout
);
3478 static void handle_arg_log_filename(const char *arg
)
3480 qemu_set_log_filename(arg
);
3483 static void handle_arg_set_env(const char *arg
)
3485 char *r
, *p
, *token
;
3486 r
= p
= strdup(arg
);
3487 while ((token
= strsep(&p
, ",")) != NULL
) {
3488 if (envlist_setenv(envlist
, token
) != 0) {
3495 static void handle_arg_unset_env(const char *arg
)
3497 char *r
, *p
, *token
;
3498 r
= p
= strdup(arg
);
3499 while ((token
= strsep(&p
, ",")) != NULL
) {
3500 if (envlist_unsetenv(envlist
, token
) != 0) {
3507 static void handle_arg_argv0(const char *arg
)
3509 argv0
= strdup(arg
);
3512 static void handle_arg_stack_size(const char *arg
)
3515 guest_stack_size
= strtoul(arg
, &p
, 0);
3516 if (guest_stack_size
== 0) {
3521 guest_stack_size
*= 1024 * 1024;
3522 } else if (*p
== 'k' || *p
== 'K') {
3523 guest_stack_size
*= 1024;
3527 static void handle_arg_ld_prefix(const char *arg
)
3529 interp_prefix
= strdup(arg
);
3532 static void handle_arg_pagesize(const char *arg
)
3534 qemu_host_page_size
= atoi(arg
);
3535 if (qemu_host_page_size
== 0 ||
3536 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3537 fprintf(stderr
, "page size must be a power of two\n");
3542 static void handle_arg_gdb(const char *arg
)
3544 gdbstub_port
= atoi(arg
);
3547 static void handle_arg_uname(const char *arg
)
3549 qemu_uname_release
= strdup(arg
);
3552 static void handle_arg_cpu(const char *arg
)
3554 cpu_model
= strdup(arg
);
3555 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3556 /* XXX: implement xxx_cpu_list for targets that still miss it */
3557 #if defined(cpu_list)
3558 cpu_list(stdout
, &fprintf
);
3564 #if defined(CONFIG_USE_GUEST_BASE)
3565 static void handle_arg_guest_base(const char *arg
)
3567 guest_base
= strtol(arg
, NULL
, 0);
3568 have_guest_base
= 1;
3571 static void handle_arg_reserved_va(const char *arg
)
3575 reserved_va
= strtoul(arg
, &p
, 0);
3589 unsigned long unshifted
= reserved_va
;
3591 reserved_va
<<= shift
;
3592 if (((reserved_va
>> shift
) != unshifted
)
3593 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3594 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3597 fprintf(stderr
, "Reserved virtual address too big\n");
3602 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3608 static void handle_arg_singlestep(const char *arg
)
3613 static void handle_arg_strace(const char *arg
)
3618 static void handle_arg_version(const char *arg
)
3620 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3621 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3625 struct qemu_argument
{
3629 void (*handle_opt
)(const char *arg
);
3630 const char *example
;
3634 static const struct qemu_argument arg_table
[] = {
3635 {"h", "", false, handle_arg_help
,
3636 "", "print this help"},
3637 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3638 "port", "wait gdb connection to 'port'"},
3639 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3640 "path", "set the elf interpreter prefix to 'path'"},
3641 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3642 "size", "set the stack size to 'size' bytes"},
3643 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3644 "model", "select CPU (-cpu help for list)"},
3645 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3646 "var=value", "sets targets environment variable (see below)"},
3647 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3648 "var", "unsets targets environment variable (see below)"},
3649 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3650 "argv0", "forces target process argv[0] to be 'argv0'"},
3651 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3652 "uname", "set qemu uname release string to 'uname'"},
3653 #if defined(CONFIG_USE_GUEST_BASE)
3654 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3655 "address", "set guest_base address to 'address'"},
3656 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3657 "size", "reserve 'size' bytes for guest virtual address space"},
3659 {"d", "QEMU_LOG", true, handle_arg_log
,
3660 "item[,...]", "enable logging of specified items "
3661 "(use '-d help' for a list of items)"},
3662 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3663 "logfile", "write logs to 'logfile' (default stderr)"},
3664 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3665 "pagesize", "set the host page size to 'pagesize'"},
3666 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3667 "", "run in singlestep mode"},
3668 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3669 "", "log system calls"},
3670 {"version", "QEMU_VERSION", false, handle_arg_version
,
3671 "", "display version information and exit"},
3672 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3675 static void usage(void)
3677 const struct qemu_argument
*arginfo
;
3681 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3682 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3684 "Options and associated environment variables:\n"
3687 /* Calculate column widths. We must always have at least enough space
3688 * for the column header.
3690 maxarglen
= strlen("Argument");
3691 maxenvlen
= strlen("Env-variable");
3693 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3694 int arglen
= strlen(arginfo
->argv
);
3695 if (arginfo
->has_arg
) {
3696 arglen
+= strlen(arginfo
->example
) + 1;
3698 if (strlen(arginfo
->env
) > maxenvlen
) {
3699 maxenvlen
= strlen(arginfo
->env
);
3701 if (arglen
> maxarglen
) {
3706 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3707 maxenvlen
, "Env-variable");
3709 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3710 if (arginfo
->has_arg
) {
3711 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3712 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3713 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3715 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3716 maxenvlen
, arginfo
->env
,
3723 "QEMU_LD_PREFIX = %s\n"
3724 "QEMU_STACK_SIZE = %ld byte\n",
3729 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3730 "QEMU_UNSET_ENV environment variables to set and unset\n"
3731 "environment variables for the target process.\n"
3732 "It is possible to provide several variables by separating them\n"
3733 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3734 "provide the -E and -U options multiple times.\n"
3735 "The following lines are equivalent:\n"
3736 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3737 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3738 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3739 "Note that if you provide several changes to a single variable\n"
3740 "the last change will stay in effect.\n");
3745 static int parse_args(int argc
, char **argv
)
3749 const struct qemu_argument
*arginfo
;
3751 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3752 if (arginfo
->env
== NULL
) {
3756 r
= getenv(arginfo
->env
);
3758 arginfo
->handle_opt(r
);
3764 if (optind
>= argc
) {
3773 if (!strcmp(r
, "-")) {
3777 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3778 if (!strcmp(r
, arginfo
->argv
)) {
3779 if (arginfo
->has_arg
) {
3780 if (optind
>= argc
) {
3783 arginfo
->handle_opt(argv
[optind
]);
3786 arginfo
->handle_opt(NULL
);
3792 /* no option matched the current argv */
3793 if (arginfo
->handle_opt
== NULL
) {
3798 if (optind
>= argc
) {
3802 filename
= argv
[optind
];
3803 exec_path
= argv
[optind
];
3808 int main(int argc
, char **argv
, char **envp
)
3810 struct target_pt_regs regs1
, *regs
= ®s1
;
3811 struct image_info info1
, *info
= &info1
;
3812 struct linux_binprm bprm
;
3817 char **target_environ
, **wrk
;
3824 module_call_init(MODULE_INIT_QOM
);
3826 qemu_init_auxval(envp
);
3827 qemu_cache_utils_init();
3829 if ((envlist
= envlist_create()) == NULL
) {
3830 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3834 /* add current environment into the list */
3835 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3836 (void) envlist_setenv(envlist
, *wrk
);
3839 /* Read the stack limit from the kernel. If it's "unlimited",
3840 then we can do little else besides use the default. */
3843 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3844 && lim
.rlim_cur
!= RLIM_INFINITY
3845 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3846 guest_stack_size
= lim
.rlim_cur
;
3851 #if defined(cpudef_setup)
3852 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3855 optind
= parse_args(argc
, argv
);
3858 memset(regs
, 0, sizeof(struct target_pt_regs
));
3860 /* Zero out image_info */
3861 memset(info
, 0, sizeof(struct image_info
));
3863 memset(&bprm
, 0, sizeof (bprm
));
3865 /* Scan interp_prefix dir for replacement files. */
3866 init_paths(interp_prefix
);
3868 init_qemu_uname_release();
3870 if (cpu_model
== NULL
) {
3871 #if defined(TARGET_I386)
3872 #ifdef TARGET_X86_64
3873 cpu_model
= "qemu64";
3875 cpu_model
= "qemu32";
3877 #elif defined(TARGET_ARM)
3879 #elif defined(TARGET_UNICORE32)
3881 #elif defined(TARGET_M68K)
3883 #elif defined(TARGET_SPARC)
3884 #ifdef TARGET_SPARC64
3885 cpu_model
= "TI UltraSparc II";
3887 cpu_model
= "Fujitsu MB86904";
3889 #elif defined(TARGET_MIPS)
3890 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3895 #elif defined TARGET_OPENRISC
3896 cpu_model
= "or1200";
3897 #elif defined(TARGET_PPC)
3899 cpu_model
= "970fx";
3908 cpu_exec_init_all();
3909 /* NOTE: we need to init the CPU at this stage to get
3910 qemu_host_page_size */
3911 env
= cpu_init(cpu_model
);
3913 fprintf(stderr
, "Unable to find CPU definition\n");
3916 cpu
= ENV_GET_CPU(env
);
3921 if (getenv("QEMU_STRACE")) {
3925 target_environ
= envlist_to_environ(envlist
, NULL
);
3926 envlist_free(envlist
);
3928 #if defined(CONFIG_USE_GUEST_BASE)
3930 * Now that page sizes are configured in cpu_init() we can do
3931 * proper page alignment for guest_base.
3933 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3935 if (reserved_va
|| have_guest_base
) {
3936 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3938 if (guest_base
== (unsigned long)-1) {
3939 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3940 "space for use as guest address space (check your virtual "
3941 "memory ulimit setting or reserve less using -R option)\n",
3947 mmap_next_start
= reserved_va
;
3950 #endif /* CONFIG_USE_GUEST_BASE */
3953 * Read in mmap_min_addr kernel parameter. This value is used
3954 * When loading the ELF image to determine whether guest_base
3955 * is needed. It is also used in mmap_find_vma.
3960 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3962 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3963 mmap_min_addr
= tmp
;
3964 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
3971 * Prepare copy of argv vector for target.
3973 target_argc
= argc
- optind
;
3974 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
3975 if (target_argv
== NULL
) {
3976 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
3981 * If argv0 is specified (using '-0' switch) we replace
3982 * argv[0] pointer with the given one.
3985 if (argv0
!= NULL
) {
3986 target_argv
[i
++] = strdup(argv0
);
3988 for (; i
< target_argc
; i
++) {
3989 target_argv
[i
] = strdup(argv
[optind
+ i
]);
3991 target_argv
[target_argc
] = NULL
;
3993 ts
= g_malloc0 (sizeof(TaskState
));
3994 init_task_state(ts
);
3995 /* build Task State */
4001 execfd
= qemu_getauxval(AT_EXECFD
);
4003 execfd
= open(filename
, O_RDONLY
);
4005 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4010 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4013 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4017 for (wrk
= target_environ
; *wrk
; wrk
++) {
4021 free(target_environ
);
4023 if (qemu_log_enabled()) {
4024 #if defined(CONFIG_USE_GUEST_BASE)
4025 qemu_log("guest_base 0x%lx\n", guest_base
);
4029 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4030 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4031 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4033 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4035 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4036 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4038 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4039 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4042 target_set_brk(info
->brk
);
4046 #if defined(CONFIG_USE_GUEST_BASE)
4047 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4048 generating the prologue until now so that the prologue can take
4049 the real value of GUEST_BASE into account. */
4050 tcg_prologue_init(&tcg_ctx
);
4053 #if defined(TARGET_I386)
4054 cpu_x86_set_cpl(env
, 3);
4056 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4057 env
->hflags
|= HF_PE_MASK
;
4058 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4059 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4060 env
->hflags
|= HF_OSFXSR_MASK
;
4062 #ifndef TARGET_ABI32
4063 /* enable 64 bit mode if possible */
4064 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4065 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4068 env
->cr
[4] |= CR4_PAE_MASK
;
4069 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4070 env
->hflags
|= HF_LMA_MASK
;
4073 /* flags setup : we activate the IRQs by default as in user mode */
4074 env
->eflags
|= IF_MASK
;
4076 /* linux register setup */
4077 #ifndef TARGET_ABI32
4078 env
->regs
[R_EAX
] = regs
->rax
;
4079 env
->regs
[R_EBX
] = regs
->rbx
;
4080 env
->regs
[R_ECX
] = regs
->rcx
;
4081 env
->regs
[R_EDX
] = regs
->rdx
;
4082 env
->regs
[R_ESI
] = regs
->rsi
;
4083 env
->regs
[R_EDI
] = regs
->rdi
;
4084 env
->regs
[R_EBP
] = regs
->rbp
;
4085 env
->regs
[R_ESP
] = regs
->rsp
;
4086 env
->eip
= regs
->rip
;
4088 env
->regs
[R_EAX
] = regs
->eax
;
4089 env
->regs
[R_EBX
] = regs
->ebx
;
4090 env
->regs
[R_ECX
] = regs
->ecx
;
4091 env
->regs
[R_EDX
] = regs
->edx
;
4092 env
->regs
[R_ESI
] = regs
->esi
;
4093 env
->regs
[R_EDI
] = regs
->edi
;
4094 env
->regs
[R_EBP
] = regs
->ebp
;
4095 env
->regs
[R_ESP
] = regs
->esp
;
4096 env
->eip
= regs
->eip
;
4099 /* linux interrupt setup */
4100 #ifndef TARGET_ABI32
4101 env
->idt
.limit
= 511;
4103 env
->idt
.limit
= 255;
4105 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4106 PROT_READ
|PROT_WRITE
,
4107 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4108 idt_table
= g2h(env
->idt
.base
);
4131 /* linux segment setup */
4133 uint64_t *gdt_table
;
4134 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4135 PROT_READ
|PROT_WRITE
,
4136 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4137 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4138 gdt_table
= g2h(env
->gdt
.base
);
4140 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4141 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4142 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4144 /* 64 bit code segment */
4145 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4146 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4148 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4150 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4151 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4152 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4154 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4155 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4157 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4158 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4159 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4160 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4161 /* This hack makes Wine work... */
4162 env
->segs
[R_FS
].selector
= 0;
4164 cpu_x86_load_seg(env
, R_DS
, 0);
4165 cpu_x86_load_seg(env
, R_ES
, 0);
4166 cpu_x86_load_seg(env
, R_FS
, 0);
4167 cpu_x86_load_seg(env
, R_GS
, 0);
4169 #elif defined(TARGET_AARCH64)
4173 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4175 "The selected ARM CPU does not support 64 bit mode\n");
4179 for (i
= 0; i
< 31; i
++) {
4180 env
->xregs
[i
] = regs
->regs
[i
];
4183 env
->xregs
[31] = regs
->sp
;
4185 #elif defined(TARGET_ARM)
4188 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4189 for(i
= 0; i
< 16; i
++) {
4190 env
->regs
[i
] = regs
->uregs
[i
];
4193 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4194 && (info
->elf_flags
& EF_ARM_BE8
)) {
4195 env
->bswap_code
= 1;
4198 #elif defined(TARGET_UNICORE32)
4201 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4202 for (i
= 0; i
< 32; i
++) {
4203 env
->regs
[i
] = regs
->uregs
[i
];
4206 #elif defined(TARGET_SPARC)
4210 env
->npc
= regs
->npc
;
4212 for(i
= 0; i
< 8; i
++)
4213 env
->gregs
[i
] = regs
->u_regs
[i
];
4214 for(i
= 0; i
< 8; i
++)
4215 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4217 #elif defined(TARGET_PPC)
4221 #if defined(TARGET_PPC64)
4222 #if defined(TARGET_ABI32)
4223 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4225 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4228 env
->nip
= regs
->nip
;
4229 for(i
= 0; i
< 32; i
++) {
4230 env
->gpr
[i
] = regs
->gpr
[i
];
4233 #elif defined(TARGET_M68K)
4236 env
->dregs
[0] = regs
->d0
;
4237 env
->dregs
[1] = regs
->d1
;
4238 env
->dregs
[2] = regs
->d2
;
4239 env
->dregs
[3] = regs
->d3
;
4240 env
->dregs
[4] = regs
->d4
;
4241 env
->dregs
[5] = regs
->d5
;
4242 env
->dregs
[6] = regs
->d6
;
4243 env
->dregs
[7] = regs
->d7
;
4244 env
->aregs
[0] = regs
->a0
;
4245 env
->aregs
[1] = regs
->a1
;
4246 env
->aregs
[2] = regs
->a2
;
4247 env
->aregs
[3] = regs
->a3
;
4248 env
->aregs
[4] = regs
->a4
;
4249 env
->aregs
[5] = regs
->a5
;
4250 env
->aregs
[6] = regs
->a6
;
4251 env
->aregs
[7] = regs
->usp
;
4253 ts
->sim_syscalls
= 1;
4255 #elif defined(TARGET_MICROBLAZE)
4257 env
->regs
[0] = regs
->r0
;
4258 env
->regs
[1] = regs
->r1
;
4259 env
->regs
[2] = regs
->r2
;
4260 env
->regs
[3] = regs
->r3
;
4261 env
->regs
[4] = regs
->r4
;
4262 env
->regs
[5] = regs
->r5
;
4263 env
->regs
[6] = regs
->r6
;
4264 env
->regs
[7] = regs
->r7
;
4265 env
->regs
[8] = regs
->r8
;
4266 env
->regs
[9] = regs
->r9
;
4267 env
->regs
[10] = regs
->r10
;
4268 env
->regs
[11] = regs
->r11
;
4269 env
->regs
[12] = regs
->r12
;
4270 env
->regs
[13] = regs
->r13
;
4271 env
->regs
[14] = regs
->r14
;
4272 env
->regs
[15] = regs
->r15
;
4273 env
->regs
[16] = regs
->r16
;
4274 env
->regs
[17] = regs
->r17
;
4275 env
->regs
[18] = regs
->r18
;
4276 env
->regs
[19] = regs
->r19
;
4277 env
->regs
[20] = regs
->r20
;
4278 env
->regs
[21] = regs
->r21
;
4279 env
->regs
[22] = regs
->r22
;
4280 env
->regs
[23] = regs
->r23
;
4281 env
->regs
[24] = regs
->r24
;
4282 env
->regs
[25] = regs
->r25
;
4283 env
->regs
[26] = regs
->r26
;
4284 env
->regs
[27] = regs
->r27
;
4285 env
->regs
[28] = regs
->r28
;
4286 env
->regs
[29] = regs
->r29
;
4287 env
->regs
[30] = regs
->r30
;
4288 env
->regs
[31] = regs
->r31
;
4289 env
->sregs
[SR_PC
] = regs
->pc
;
4291 #elif defined(TARGET_MIPS)
4295 for(i
= 0; i
< 32; i
++) {
4296 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4298 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4299 if (regs
->cp0_epc
& 1) {
4300 env
->hflags
|= MIPS_HFLAG_M16
;
4303 #elif defined(TARGET_OPENRISC)
4307 for (i
= 0; i
< 32; i
++) {
4308 env
->gpr
[i
] = regs
->gpr
[i
];
4314 #elif defined(TARGET_SH4)
4318 for(i
= 0; i
< 16; i
++) {
4319 env
->gregs
[i
] = regs
->regs
[i
];
4323 #elif defined(TARGET_ALPHA)
4327 for(i
= 0; i
< 28; i
++) {
4328 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4330 env
->ir
[IR_SP
] = regs
->usp
;
4333 #elif defined(TARGET_CRIS)
4335 env
->regs
[0] = regs
->r0
;
4336 env
->regs
[1] = regs
->r1
;
4337 env
->regs
[2] = regs
->r2
;
4338 env
->regs
[3] = regs
->r3
;
4339 env
->regs
[4] = regs
->r4
;
4340 env
->regs
[5] = regs
->r5
;
4341 env
->regs
[6] = regs
->r6
;
4342 env
->regs
[7] = regs
->r7
;
4343 env
->regs
[8] = regs
->r8
;
4344 env
->regs
[9] = regs
->r9
;
4345 env
->regs
[10] = regs
->r10
;
4346 env
->regs
[11] = regs
->r11
;
4347 env
->regs
[12] = regs
->r12
;
4348 env
->regs
[13] = regs
->r13
;
4349 env
->regs
[14] = info
->start_stack
;
4350 env
->regs
[15] = regs
->acr
;
4351 env
->pc
= regs
->erp
;
4353 #elif defined(TARGET_S390X)
4356 for (i
= 0; i
< 16; i
++) {
4357 env
->regs
[i
] = regs
->gprs
[i
];
4359 env
->psw
.mask
= regs
->psw
.mask
;
4360 env
->psw
.addr
= regs
->psw
.addr
;
4363 #error unsupported target CPU
4366 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4367 ts
->stack_base
= info
->start_stack
;
4368 ts
->heap_base
= info
->brk
;
4369 /* This will be filled in on the first SYS_HEAPINFO call. */
4374 if (gdbserver_start(gdbstub_port
) < 0) {
4375 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4379 gdb_handlesig(cpu
, 0);