4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
31 #include "qemu/cache-utils.h"
34 #include "qemu/timer.h"
35 #include "qemu/envlist.h"
45 static const char *cpu_model
;
46 unsigned long mmap_min_addr
;
47 #if defined(CONFIG_USE_GUEST_BASE)
48 unsigned long guest_base
;
50 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
52 * When running 32-on-64 we should make sure we can fit all of the possible
53 * guest address space into a contiguous chunk of virtual host memory.
55 * This way we will never overlap with our own libraries or binaries or stack
56 * or anything else that QEMU maps.
59 /* MIPS only supports 31 bits of virtual address space for user space */
60 unsigned long reserved_va
= 0x77000000;
62 unsigned long reserved_va
= 0xf7000000;
65 unsigned long reserved_va
;
69 static void usage(void);
71 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
72 const char *qemu_uname_release
= CONFIG_UNAME_RELEASE
;
74 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
75 we allocate a bigger stack. Need a better solution, for example
76 by remapping the process stack directly at the right place */
77 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
79 void gemu_log(const char *fmt
, ...)
84 vfprintf(stderr
, fmt
, ap
);
88 #if defined(TARGET_I386)
89 int cpu_get_pic_interrupt(CPUX86State
*env
)
95 /***********************************************************/
96 /* Helper routines for implementing atomic operations. */
98 /* To implement exclusive operations we force all cpus to syncronise.
99 We don't require a full sync, only that no cpus are executing guest code.
100 The alternative is to map target atomic ops onto host equivalents,
101 which requires quite a lot of per host/target work. */
102 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
104 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
105 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
106 static int pending_cpus
;
108 /* Make sure everything is in a consistent state for calling fork(). */
109 void fork_start(void)
111 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
112 pthread_mutex_lock(&exclusive_lock
);
116 void fork_end(int child
)
118 mmap_fork_end(child
);
120 CPUState
*cpu
, *next_cpu
;
121 /* Child processes created by fork() only have a single thread.
122 Discard information about the parent threads. */
123 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
124 if (cpu
!= thread_cpu
) {
125 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
129 pthread_mutex_init(&exclusive_lock
, NULL
);
130 pthread_mutex_init(&cpu_list_mutex
, NULL
);
131 pthread_cond_init(&exclusive_cond
, NULL
);
132 pthread_cond_init(&exclusive_resume
, NULL
);
133 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
134 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
136 pthread_mutex_unlock(&exclusive_lock
);
137 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
141 /* Wait for pending exclusive operations to complete. The exclusive lock
143 static inline void exclusive_idle(void)
145 while (pending_cpus
) {
146 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
150 /* Start an exclusive operation.
151 Must only be called from outside cpu_arm_exec. */
152 static inline void start_exclusive(void)
156 pthread_mutex_lock(&exclusive_lock
);
160 /* Make all other cpus stop executing. */
161 CPU_FOREACH(other_cpu
) {
162 if (other_cpu
->running
) {
167 if (pending_cpus
> 1) {
168 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
172 /* Finish an exclusive operation. */
173 static inline void end_exclusive(void)
176 pthread_cond_broadcast(&exclusive_resume
);
177 pthread_mutex_unlock(&exclusive_lock
);
180 /* Wait for exclusive ops to finish, and begin cpu execution. */
181 static inline void cpu_exec_start(CPUState
*cpu
)
183 pthread_mutex_lock(&exclusive_lock
);
186 pthread_mutex_unlock(&exclusive_lock
);
189 /* Mark cpu as not executing, and release pending exclusive ops. */
190 static inline void cpu_exec_end(CPUState
*cpu
)
192 pthread_mutex_lock(&exclusive_lock
);
193 cpu
->running
= false;
194 if (pending_cpus
> 1) {
196 if (pending_cpus
== 1) {
197 pthread_cond_signal(&exclusive_cond
);
201 pthread_mutex_unlock(&exclusive_lock
);
204 void cpu_list_lock(void)
206 pthread_mutex_lock(&cpu_list_mutex
);
209 void cpu_list_unlock(void)
211 pthread_mutex_unlock(&cpu_list_mutex
);
216 /***********************************************************/
217 /* CPUX86 core interface */
219 void cpu_smm_update(CPUX86State
*env
)
223 uint64_t cpu_get_tsc(CPUX86State
*env
)
225 return cpu_get_real_ticks();
228 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
233 e1
= (addr
<< 16) | (limit
& 0xffff);
234 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
241 static uint64_t *idt_table
;
243 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
244 uint64_t addr
, unsigned int sel
)
247 e1
= (addr
& 0xffff) | (sel
<< 16);
248 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
252 p
[2] = tswap32(addr
>> 32);
255 /* only dpl matters as we do only user space emulation */
256 static void set_idt(int n
, unsigned int dpl
)
258 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
261 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
262 uint32_t addr
, unsigned int sel
)
265 e1
= (addr
& 0xffff) | (sel
<< 16);
266 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
272 /* only dpl matters as we do only user space emulation */
273 static void set_idt(int n
, unsigned int dpl
)
275 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
279 void cpu_loop(CPUX86State
*env
)
281 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
284 target_siginfo_t info
;
287 trapnr
= cpu_x86_exec(env
);
290 /* linux syscall from int $0x80 */
291 env
->regs
[R_EAX
] = do_syscall(env
,
303 /* linux syscall from syscall instruction */
304 env
->regs
[R_EAX
] = do_syscall(env
,
313 env
->eip
= env
->exception_next_eip
;
318 info
.si_signo
= SIGBUS
;
320 info
.si_code
= TARGET_SI_KERNEL
;
321 info
._sifields
._sigfault
._addr
= 0;
322 queue_signal(env
, info
.si_signo
, &info
);
325 /* XXX: potential problem if ABI32 */
326 #ifndef TARGET_X86_64
327 if (env
->eflags
& VM_MASK
) {
328 handle_vm86_fault(env
);
332 info
.si_signo
= SIGSEGV
;
334 info
.si_code
= TARGET_SI_KERNEL
;
335 info
._sifields
._sigfault
._addr
= 0;
336 queue_signal(env
, info
.si_signo
, &info
);
340 info
.si_signo
= SIGSEGV
;
342 if (!(env
->error_code
& 1))
343 info
.si_code
= TARGET_SEGV_MAPERR
;
345 info
.si_code
= TARGET_SEGV_ACCERR
;
346 info
._sifields
._sigfault
._addr
= env
->cr
[2];
347 queue_signal(env
, info
.si_signo
, &info
);
350 #ifndef TARGET_X86_64
351 if (env
->eflags
& VM_MASK
) {
352 handle_vm86_trap(env
, trapnr
);
356 /* division by zero */
357 info
.si_signo
= SIGFPE
;
359 info
.si_code
= TARGET_FPE_INTDIV
;
360 info
._sifields
._sigfault
._addr
= env
->eip
;
361 queue_signal(env
, info
.si_signo
, &info
);
366 #ifndef TARGET_X86_64
367 if (env
->eflags
& VM_MASK
) {
368 handle_vm86_trap(env
, trapnr
);
372 info
.si_signo
= SIGTRAP
;
374 if (trapnr
== EXCP01_DB
) {
375 info
.si_code
= TARGET_TRAP_BRKPT
;
376 info
._sifields
._sigfault
._addr
= env
->eip
;
378 info
.si_code
= TARGET_SI_KERNEL
;
379 info
._sifields
._sigfault
._addr
= 0;
381 queue_signal(env
, info
.si_signo
, &info
);
386 #ifndef TARGET_X86_64
387 if (env
->eflags
& VM_MASK
) {
388 handle_vm86_trap(env
, trapnr
);
392 info
.si_signo
= SIGSEGV
;
394 info
.si_code
= TARGET_SI_KERNEL
;
395 info
._sifields
._sigfault
._addr
= 0;
396 queue_signal(env
, info
.si_signo
, &info
);
400 info
.si_signo
= SIGILL
;
402 info
.si_code
= TARGET_ILL_ILLOPN
;
403 info
._sifields
._sigfault
._addr
= env
->eip
;
404 queue_signal(env
, info
.si_signo
, &info
);
407 /* just indicate that signals should be handled asap */
413 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
418 info
.si_code
= TARGET_TRAP_BRKPT
;
419 queue_signal(env
, info
.si_signo
, &info
);
424 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
425 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
429 process_pending_signals(env
);
436 #define get_user_code_u32(x, gaddr, doswap) \
437 ({ abi_long __r = get_user_u32((x), (gaddr)); \
438 if (!__r && (doswap)) { \
444 #define get_user_code_u16(x, gaddr, doswap) \
445 ({ abi_long __r = get_user_u16((x), (gaddr)); \
446 if (!__r && (doswap)) { \
453 /* Commpage handling -- there is no commpage for AArch64 */
456 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
458 * r0 = pointer to oldval
459 * r1 = pointer to newval
460 * r2 = pointer to target value
463 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
464 * C set if *ptr was changed, clear if no exchange happened
466 * Note segv's in kernel helpers are a bit tricky, we can set the
467 * data address sensibly but the PC address is just the entry point.
469 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
471 uint64_t oldval
, newval
, val
;
473 target_siginfo_t info
;
475 /* Based on the 32 bit code in do_kernel_trap */
477 /* XXX: This only works between threads, not between processes.
478 It's probably possible to implement this with native host
479 operations. However things like ldrex/strex are much harder so
480 there's not much point trying. */
482 cpsr
= cpsr_read(env
);
485 if (get_user_u64(oldval
, env
->regs
[0])) {
486 env
->cp15
.c6_data
= env
->regs
[0];
490 if (get_user_u64(newval
, env
->regs
[1])) {
491 env
->cp15
.c6_data
= env
->regs
[1];
495 if (get_user_u64(val
, addr
)) {
496 env
->cp15
.c6_data
= addr
;
503 if (put_user_u64(val
, addr
)) {
504 env
->cp15
.c6_data
= addr
;
514 cpsr_write(env
, cpsr
, CPSR_C
);
520 /* We get the PC of the entry address - which is as good as anything,
521 on a real kernel what you get depends on which mode it uses. */
522 info
.si_signo
= SIGSEGV
;
524 /* XXX: check env->error_code */
525 info
.si_code
= TARGET_SEGV_MAPERR
;
526 info
._sifields
._sigfault
._addr
= env
->cp15
.c6_data
;
527 queue_signal(env
, info
.si_signo
, &info
);
532 /* Handle a jump to the kernel code page. */
534 do_kernel_trap(CPUARMState
*env
)
540 switch (env
->regs
[15]) {
541 case 0xffff0fa0: /* __kernel_memory_barrier */
542 /* ??? No-op. Will need to do better for SMP. */
544 case 0xffff0fc0: /* __kernel_cmpxchg */
545 /* XXX: This only works between threads, not between processes.
546 It's probably possible to implement this with native host
547 operations. However things like ldrex/strex are much harder so
548 there's not much point trying. */
550 cpsr
= cpsr_read(env
);
552 /* FIXME: This should SEGV if the access fails. */
553 if (get_user_u32(val
, addr
))
555 if (val
== env
->regs
[0]) {
557 /* FIXME: Check for segfaults. */
558 put_user_u32(val
, addr
);
565 cpsr_write(env
, cpsr
, CPSR_C
);
568 case 0xffff0fe0: /* __kernel_get_tls */
569 env
->regs
[0] = env
->cp15
.tpidrro_el0
;
571 case 0xffff0f60: /* __kernel_cmpxchg64 */
572 arm_kernel_cmpxchg64_helper(env
);
578 /* Jump back to the caller. */
579 addr
= env
->regs
[14];
584 env
->regs
[15] = addr
;
589 /* Store exclusive handling for AArch32 */
590 static int do_strex(CPUARMState
*env
)
598 if (env
->exclusive_addr
!= env
->exclusive_test
) {
601 /* We know we're always AArch32 so the address is in uint32_t range
602 * unless it was the -1 exclusive-monitor-lost value (which won't
603 * match exclusive_test above).
605 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
606 addr
= env
->exclusive_addr
;
607 size
= env
->exclusive_info
& 0xf;
610 segv
= get_user_u8(val
, addr
);
613 segv
= get_user_u16(val
, addr
);
617 segv
= get_user_u32(val
, addr
);
623 env
->cp15
.c6_data
= addr
;
628 segv
= get_user_u32(valhi
, addr
+ 4);
630 env
->cp15
.c6_data
= addr
+ 4;
633 val
= deposit64(val
, 32, 32, valhi
);
635 if (val
!= env
->exclusive_val
) {
639 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
642 segv
= put_user_u8(val
, addr
);
645 segv
= put_user_u16(val
, addr
);
649 segv
= put_user_u32(val
, addr
);
653 env
->cp15
.c6_data
= addr
;
657 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
658 segv
= put_user_u32(val
, addr
+ 4);
660 env
->cp15
.c6_data
= addr
+ 4;
667 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
673 void cpu_loop(CPUARMState
*env
)
675 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
677 unsigned int n
, insn
;
678 target_siginfo_t info
;
683 trapnr
= cpu_arm_exec(env
);
688 TaskState
*ts
= env
->opaque
;
692 /* we handle the FPU emulation here, as Linux */
693 /* we get the opcode */
694 /* FIXME - what to do if get_user() fails? */
695 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
697 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
698 if (rc
== 0) { /* illegal instruction */
699 info
.si_signo
= SIGILL
;
701 info
.si_code
= TARGET_ILL_ILLOPN
;
702 info
._sifields
._sigfault
._addr
= env
->regs
[15];
703 queue_signal(env
, info
.si_signo
, &info
);
704 } else if (rc
< 0) { /* FP exception */
707 /* translate softfloat flags to FPSR flags */
708 if (-rc
& float_flag_invalid
)
710 if (-rc
& float_flag_divbyzero
)
712 if (-rc
& float_flag_overflow
)
714 if (-rc
& float_flag_underflow
)
716 if (-rc
& float_flag_inexact
)
719 FPSR fpsr
= ts
->fpa
.fpsr
;
720 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
722 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
723 info
.si_signo
= SIGFPE
;
726 /* ordered by priority, least first */
727 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
728 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
729 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
730 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
731 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
733 info
._sifields
._sigfault
._addr
= env
->regs
[15];
734 queue_signal(env
, info
.si_signo
, &info
);
739 /* accumulate unenabled exceptions */
740 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
742 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
744 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
746 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
748 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
751 } else { /* everything OK */
762 if (trapnr
== EXCP_BKPT
) {
764 /* FIXME - what to do if get_user() fails? */
765 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
769 /* FIXME - what to do if get_user() fails? */
770 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
771 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
776 /* FIXME - what to do if get_user() fails? */
777 get_user_code_u16(insn
, env
->regs
[15] - 2,
781 /* FIXME - what to do if get_user() fails? */
782 get_user_code_u32(insn
, env
->regs
[15] - 4,
788 if (n
== ARM_NR_cacheflush
) {
790 } else if (n
== ARM_NR_semihosting
791 || n
== ARM_NR_thumb_semihosting
) {
792 env
->regs
[0] = do_arm_semihosting (env
);
793 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
795 if (env
->thumb
|| n
== 0) {
798 n
-= ARM_SYSCALL_BASE
;
801 if ( n
> ARM_NR_BASE
) {
803 case ARM_NR_cacheflush
:
807 cpu_set_tls(env
, env
->regs
[0]);
811 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
813 env
->regs
[0] = -TARGET_ENOSYS
;
817 env
->regs
[0] = do_syscall(env
,
833 /* just indicate that signals should be handled asap */
835 case EXCP_PREFETCH_ABORT
:
836 addr
= env
->cp15
.c6_insn
;
838 case EXCP_DATA_ABORT
:
839 addr
= env
->cp15
.c6_data
;
842 info
.si_signo
= SIGSEGV
;
844 /* XXX: check env->error_code */
845 info
.si_code
= TARGET_SEGV_MAPERR
;
846 info
._sifields
._sigfault
._addr
= addr
;
847 queue_signal(env
, info
.si_signo
, &info
);
854 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
859 info
.si_code
= TARGET_TRAP_BRKPT
;
860 queue_signal(env
, info
.si_signo
, &info
);
864 case EXCP_KERNEL_TRAP
:
865 if (do_kernel_trap(env
))
870 addr
= env
->cp15
.c6_data
;
876 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
878 cpu_dump_state(cs
, stderr
, fprintf
, 0);
881 process_pending_signals(env
);
888 * Handle AArch64 store-release exclusive
890 * rs = gets the status result of store exclusive
891 * rt = is the register that is stored
892 * rt2 = is the second register store (in STP)
895 static int do_strex_a64(CPUARMState
*env
)
906 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
907 size
= extract32(env
->exclusive_info
, 0, 2);
908 is_pair
= extract32(env
->exclusive_info
, 2, 1);
909 rs
= extract32(env
->exclusive_info
, 4, 5);
910 rt
= extract32(env
->exclusive_info
, 9, 5);
911 rt2
= extract32(env
->exclusive_info
, 14, 5);
913 addr
= env
->exclusive_addr
;
915 if (addr
!= env
->exclusive_test
) {
921 segv
= get_user_u8(val
, addr
);
924 segv
= get_user_u16(val
, addr
);
927 segv
= get_user_u32(val
, addr
);
930 segv
= get_user_u64(val
, addr
);
936 env
->cp15
.c6_data
= addr
;
939 if (val
!= env
->exclusive_val
) {
944 segv
= get_user_u32(val
, addr
+ 4);
946 segv
= get_user_u64(val
, addr
+ 8);
949 env
->cp15
.c6_data
= addr
+ (size
== 2 ? 4 : 8);
952 if (val
!= env
->exclusive_high
) {
956 val
= env
->xregs
[rt
];
959 segv
= put_user_u8(val
, addr
);
962 segv
= put_user_u16(val
, addr
);
965 segv
= put_user_u32(val
, addr
);
968 segv
= put_user_u64(val
, addr
);
975 val
= env
->xregs
[rt2
];
977 segv
= put_user_u32(val
, addr
+ 4);
979 segv
= put_user_u64(val
, addr
+ 8);
982 env
->cp15
.c6_data
= addr
+ (size
== 2 ? 4 : 8);
989 /* rs == 31 encodes a write to the ZR, thus throwing away
990 * the status return. This is rather silly but valid.
996 /* instruction faulted, PC does not advance */
997 /* either way a strex releases any exclusive lock we have */
998 env
->exclusive_addr
= -1;
1003 /* AArch64 main loop */
1004 void cpu_loop(CPUARMState
*env
)
1006 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1008 target_siginfo_t info
;
1013 trapnr
= cpu_arm_exec(env
);
1018 env
->xregs
[0] = do_syscall(env
,
1028 case EXCP_INTERRUPT
:
1029 /* just indicate that signals should be handled asap */
1032 info
.si_signo
= SIGILL
;
1034 info
.si_code
= TARGET_ILL_ILLOPN
;
1035 info
._sifields
._sigfault
._addr
= env
->pc
;
1036 queue_signal(env
, info
.si_signo
, &info
);
1038 case EXCP_PREFETCH_ABORT
:
1039 addr
= env
->cp15
.c6_insn
;
1041 case EXCP_DATA_ABORT
:
1042 addr
= env
->cp15
.c6_data
;
1044 info
.si_signo
= SIGSEGV
;
1046 /* XXX: check env->error_code */
1047 info
.si_code
= TARGET_SEGV_MAPERR
;
1048 info
._sifields
._sigfault
._addr
= addr
;
1049 queue_signal(env
, info
.si_signo
, &info
);
1053 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1055 info
.si_signo
= sig
;
1057 info
.si_code
= TARGET_TRAP_BRKPT
;
1058 queue_signal(env
, info
.si_signo
, &info
);
1062 if (do_strex_a64(env
)) {
1063 addr
= env
->cp15
.c6_data
;
1068 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1070 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1073 process_pending_signals(env
);
1074 /* Exception return on AArch64 always clears the exclusive monitor,
1075 * so any return to running guest code implies this.
1076 * A strex (successful or otherwise) also clears the monitor, so
1077 * we don't need to specialcase EXCP_STREX.
1079 env
->exclusive_addr
= -1;
1082 #endif /* ndef TARGET_ABI32 */
1086 #ifdef TARGET_UNICORE32
1088 void cpu_loop(CPUUniCore32State
*env
)
1090 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1092 unsigned int n
, insn
;
1093 target_siginfo_t info
;
1097 trapnr
= uc32_cpu_exec(env
);
1100 case UC32_EXCP_PRIV
:
1103 get_user_u32(insn
, env
->regs
[31] - 4);
1104 n
= insn
& 0xffffff;
1106 if (n
>= UC32_SYSCALL_BASE
) {
1108 n
-= UC32_SYSCALL_BASE
;
1109 if (n
== UC32_SYSCALL_NR_set_tls
) {
1110 cpu_set_tls(env
, env
->regs
[0]);
1113 env
->regs
[0] = do_syscall(env
,
1128 case UC32_EXCP_DTRAP
:
1129 case UC32_EXCP_ITRAP
:
1130 info
.si_signo
= SIGSEGV
;
1132 /* XXX: check env->error_code */
1133 info
.si_code
= TARGET_SEGV_MAPERR
;
1134 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1135 queue_signal(env
, info
.si_signo
, &info
);
1137 case EXCP_INTERRUPT
:
1138 /* just indicate that signals should be handled asap */
1144 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1146 info
.si_signo
= sig
;
1148 info
.si_code
= TARGET_TRAP_BRKPT
;
1149 queue_signal(env
, info
.si_signo
, &info
);
1156 process_pending_signals(env
);
1160 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1161 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1167 #define SPARC64_STACK_BIAS 2047
1171 /* WARNING: dealing with register windows _is_ complicated. More info
1172 can be found at http://www.sics.se/~psm/sparcstack.html */
1173 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1175 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1176 /* wrap handling : if cwp is on the last window, then we use the
1177 registers 'after' the end */
1178 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1179 index
+= 16 * env
->nwindows
;
1183 /* save the register window 'cwp1' */
1184 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1189 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1190 #ifdef TARGET_SPARC64
1192 sp_ptr
+= SPARC64_STACK_BIAS
;
1194 #if defined(DEBUG_WIN)
1195 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1198 for(i
= 0; i
< 16; i
++) {
1199 /* FIXME - what to do if put_user() fails? */
1200 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1201 sp_ptr
+= sizeof(abi_ulong
);
1205 static void save_window(CPUSPARCState
*env
)
1207 #ifndef TARGET_SPARC64
1208 unsigned int new_wim
;
1209 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1210 ((1LL << env
->nwindows
) - 1);
1211 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1214 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1220 static void restore_window(CPUSPARCState
*env
)
1222 #ifndef TARGET_SPARC64
1223 unsigned int new_wim
;
1225 unsigned int i
, cwp1
;
1228 #ifndef TARGET_SPARC64
1229 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1230 ((1LL << env
->nwindows
) - 1);
1233 /* restore the invalid window */
1234 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1235 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1236 #ifdef TARGET_SPARC64
1238 sp_ptr
+= SPARC64_STACK_BIAS
;
1240 #if defined(DEBUG_WIN)
1241 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1244 for(i
= 0; i
< 16; i
++) {
1245 /* FIXME - what to do if get_user() fails? */
1246 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1247 sp_ptr
+= sizeof(abi_ulong
);
1249 #ifdef TARGET_SPARC64
1251 if (env
->cleanwin
< env
->nwindows
- 1)
1259 static void flush_windows(CPUSPARCState
*env
)
1265 /* if restore would invoke restore_window(), then we can stop */
1266 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1267 #ifndef TARGET_SPARC64
1268 if (env
->wim
& (1 << cwp1
))
1271 if (env
->canrestore
== 0)
1276 save_window_offset(env
, cwp1
);
1279 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1280 #ifndef TARGET_SPARC64
1281 /* set wim so that restore will reload the registers */
1282 env
->wim
= 1 << cwp1
;
1284 #if defined(DEBUG_WIN)
1285 printf("flush_windows: nb=%d\n", offset
- 1);
1289 void cpu_loop (CPUSPARCState
*env
)
1291 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1294 target_siginfo_t info
;
1297 trapnr
= cpu_sparc_exec (env
);
1299 /* Compute PSR before exposing state. */
1300 if (env
->cc_op
!= CC_OP_FLAGS
) {
1305 #ifndef TARGET_SPARC64
1312 ret
= do_syscall (env
, env
->gregs
[1],
1313 env
->regwptr
[0], env
->regwptr
[1],
1314 env
->regwptr
[2], env
->regwptr
[3],
1315 env
->regwptr
[4], env
->regwptr
[5],
1317 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1318 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1319 env
->xcc
|= PSR_CARRY
;
1321 env
->psr
|= PSR_CARRY
;
1325 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1326 env
->xcc
&= ~PSR_CARRY
;
1328 env
->psr
&= ~PSR_CARRY
;
1331 env
->regwptr
[0] = ret
;
1332 /* next instruction */
1334 env
->npc
= env
->npc
+ 4;
1336 case 0x83: /* flush windows */
1341 /* next instruction */
1343 env
->npc
= env
->npc
+ 4;
1345 #ifndef TARGET_SPARC64
1346 case TT_WIN_OVF
: /* window overflow */
1349 case TT_WIN_UNF
: /* window underflow */
1350 restore_window(env
);
1355 info
.si_signo
= TARGET_SIGSEGV
;
1357 /* XXX: check env->error_code */
1358 info
.si_code
= TARGET_SEGV_MAPERR
;
1359 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1360 queue_signal(env
, info
.si_signo
, &info
);
1364 case TT_SPILL
: /* window overflow */
1367 case TT_FILL
: /* window underflow */
1368 restore_window(env
);
1373 info
.si_signo
= TARGET_SIGSEGV
;
1375 /* XXX: check env->error_code */
1376 info
.si_code
= TARGET_SEGV_MAPERR
;
1377 if (trapnr
== TT_DFAULT
)
1378 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1380 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1381 queue_signal(env
, info
.si_signo
, &info
);
1384 #ifndef TARGET_ABI32
1387 sparc64_get_context(env
);
1391 sparc64_set_context(env
);
1395 case EXCP_INTERRUPT
:
1396 /* just indicate that signals should be handled asap */
1400 info
.si_signo
= TARGET_SIGILL
;
1402 info
.si_code
= TARGET_ILL_ILLOPC
;
1403 info
._sifields
._sigfault
._addr
= env
->pc
;
1404 queue_signal(env
, info
.si_signo
, &info
);
1411 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1414 info
.si_signo
= sig
;
1416 info
.si_code
= TARGET_TRAP_BRKPT
;
1417 queue_signal(env
, info
.si_signo
, &info
);
1422 printf ("Unhandled trap: 0x%x\n", trapnr
);
1423 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1426 process_pending_signals (env
);
1433 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1439 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1441 return cpu_ppc_get_tb(env
);
1444 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1446 return cpu_ppc_get_tb(env
) >> 32;
1449 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1451 return cpu_ppc_get_tb(env
);
1454 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1456 return cpu_ppc_get_tb(env
) >> 32;
1459 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1460 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1462 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1464 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1467 /* XXX: to be fixed */
1468 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1473 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1478 #define EXCP_DUMP(env, fmt, ...) \
1480 CPUState *cs = ENV_GET_CPU(env); \
1481 fprintf(stderr, fmt , ## __VA_ARGS__); \
1482 cpu_dump_state(cs, stderr, fprintf, 0); \
1483 qemu_log(fmt, ## __VA_ARGS__); \
1484 if (qemu_log_enabled()) { \
1485 log_cpu_state(cs, 0); \
1489 static int do_store_exclusive(CPUPPCState
*env
)
1492 target_ulong page_addr
;
1497 addr
= env
->reserve_ea
;
1498 page_addr
= addr
& TARGET_PAGE_MASK
;
1501 flags
= page_get_flags(page_addr
);
1502 if ((flags
& PAGE_READ
) == 0) {
1505 int reg
= env
->reserve_info
& 0x1f;
1506 int size
= (env
->reserve_info
>> 5) & 0xf;
1509 if (addr
== env
->reserve_addr
) {
1511 case 1: segv
= get_user_u8(val
, addr
); break;
1512 case 2: segv
= get_user_u16(val
, addr
); break;
1513 case 4: segv
= get_user_u32(val
, addr
); break;
1514 #if defined(TARGET_PPC64)
1515 case 8: segv
= get_user_u64(val
, addr
); break;
1519 if (!segv
&& val
== env
->reserve_val
) {
1520 val
= env
->gpr
[reg
];
1522 case 1: segv
= put_user_u8(val
, addr
); break;
1523 case 2: segv
= put_user_u16(val
, addr
); break;
1524 case 4: segv
= put_user_u32(val
, addr
); break;
1525 #if defined(TARGET_PPC64)
1526 case 8: segv
= put_user_u64(val
, addr
); break;
1535 env
->crf
[0] = (stored
<< 1) | xer_so
;
1536 env
->reserve_addr
= (target_ulong
)-1;
1546 void cpu_loop(CPUPPCState
*env
)
1548 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1549 target_siginfo_t info
;
1555 trapnr
= cpu_ppc_exec(env
);
1558 case POWERPC_EXCP_NONE
:
1561 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1562 cpu_abort(env
, "Critical interrupt while in user mode. "
1565 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1566 cpu_abort(env
, "Machine check exception while in user mode. "
1569 case POWERPC_EXCP_DSI
: /* Data storage exception */
1570 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1572 /* XXX: check this. Seems bugged */
1573 switch (env
->error_code
& 0xFF000000) {
1575 info
.si_signo
= TARGET_SIGSEGV
;
1577 info
.si_code
= TARGET_SEGV_MAPERR
;
1580 info
.si_signo
= TARGET_SIGILL
;
1582 info
.si_code
= TARGET_ILL_ILLADR
;
1585 info
.si_signo
= TARGET_SIGSEGV
;
1587 info
.si_code
= TARGET_SEGV_ACCERR
;
1590 /* Let's send a regular segfault... */
1591 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1593 info
.si_signo
= TARGET_SIGSEGV
;
1595 info
.si_code
= TARGET_SEGV_MAPERR
;
1598 info
._sifields
._sigfault
._addr
= env
->nip
;
1599 queue_signal(env
, info
.si_signo
, &info
);
1601 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1602 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1603 "\n", env
->spr
[SPR_SRR0
]);
1604 /* XXX: check this */
1605 switch (env
->error_code
& 0xFF000000) {
1607 info
.si_signo
= TARGET_SIGSEGV
;
1609 info
.si_code
= TARGET_SEGV_MAPERR
;
1613 info
.si_signo
= TARGET_SIGSEGV
;
1615 info
.si_code
= TARGET_SEGV_ACCERR
;
1618 /* Let's send a regular segfault... */
1619 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1621 info
.si_signo
= TARGET_SIGSEGV
;
1623 info
.si_code
= TARGET_SEGV_MAPERR
;
1626 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1627 queue_signal(env
, info
.si_signo
, &info
);
1629 case POWERPC_EXCP_EXTERNAL
: /* External input */
1630 cpu_abort(env
, "External interrupt while in user mode. "
1633 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1634 EXCP_DUMP(env
, "Unaligned memory access\n");
1635 /* XXX: check this */
1636 info
.si_signo
= TARGET_SIGBUS
;
1638 info
.si_code
= TARGET_BUS_ADRALN
;
1639 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1640 queue_signal(env
, info
.si_signo
, &info
);
1642 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1643 /* XXX: check this */
1644 switch (env
->error_code
& ~0xF) {
1645 case POWERPC_EXCP_FP
:
1646 EXCP_DUMP(env
, "Floating point program exception\n");
1647 info
.si_signo
= TARGET_SIGFPE
;
1649 switch (env
->error_code
& 0xF) {
1650 case POWERPC_EXCP_FP_OX
:
1651 info
.si_code
= TARGET_FPE_FLTOVF
;
1653 case POWERPC_EXCP_FP_UX
:
1654 info
.si_code
= TARGET_FPE_FLTUND
;
1656 case POWERPC_EXCP_FP_ZX
:
1657 case POWERPC_EXCP_FP_VXZDZ
:
1658 info
.si_code
= TARGET_FPE_FLTDIV
;
1660 case POWERPC_EXCP_FP_XX
:
1661 info
.si_code
= TARGET_FPE_FLTRES
;
1663 case POWERPC_EXCP_FP_VXSOFT
:
1664 info
.si_code
= TARGET_FPE_FLTINV
;
1666 case POWERPC_EXCP_FP_VXSNAN
:
1667 case POWERPC_EXCP_FP_VXISI
:
1668 case POWERPC_EXCP_FP_VXIDI
:
1669 case POWERPC_EXCP_FP_VXIMZ
:
1670 case POWERPC_EXCP_FP_VXVC
:
1671 case POWERPC_EXCP_FP_VXSQRT
:
1672 case POWERPC_EXCP_FP_VXCVI
:
1673 info
.si_code
= TARGET_FPE_FLTSUB
;
1676 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1681 case POWERPC_EXCP_INVAL
:
1682 EXCP_DUMP(env
, "Invalid instruction\n");
1683 info
.si_signo
= TARGET_SIGILL
;
1685 switch (env
->error_code
& 0xF) {
1686 case POWERPC_EXCP_INVAL_INVAL
:
1687 info
.si_code
= TARGET_ILL_ILLOPC
;
1689 case POWERPC_EXCP_INVAL_LSWX
:
1690 info
.si_code
= TARGET_ILL_ILLOPN
;
1692 case POWERPC_EXCP_INVAL_SPR
:
1693 info
.si_code
= TARGET_ILL_PRVREG
;
1695 case POWERPC_EXCP_INVAL_FP
:
1696 info
.si_code
= TARGET_ILL_COPROC
;
1699 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1700 env
->error_code
& 0xF);
1701 info
.si_code
= TARGET_ILL_ILLADR
;
1705 case POWERPC_EXCP_PRIV
:
1706 EXCP_DUMP(env
, "Privilege violation\n");
1707 info
.si_signo
= TARGET_SIGILL
;
1709 switch (env
->error_code
& 0xF) {
1710 case POWERPC_EXCP_PRIV_OPC
:
1711 info
.si_code
= TARGET_ILL_PRVOPC
;
1713 case POWERPC_EXCP_PRIV_REG
:
1714 info
.si_code
= TARGET_ILL_PRVREG
;
1717 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1718 env
->error_code
& 0xF);
1719 info
.si_code
= TARGET_ILL_PRVOPC
;
1723 case POWERPC_EXCP_TRAP
:
1724 cpu_abort(env
, "Tried to call a TRAP\n");
1727 /* Should not happen ! */
1728 cpu_abort(env
, "Unknown program exception (%02x)\n",
1732 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1733 queue_signal(env
, info
.si_signo
, &info
);
1735 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1736 EXCP_DUMP(env
, "No floating point allowed\n");
1737 info
.si_signo
= TARGET_SIGILL
;
1739 info
.si_code
= TARGET_ILL_COPROC
;
1740 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1741 queue_signal(env
, info
.si_signo
, &info
);
1743 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1744 cpu_abort(env
, "Syscall exception while in user mode. "
1747 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1748 EXCP_DUMP(env
, "No APU instruction allowed\n");
1749 info
.si_signo
= TARGET_SIGILL
;
1751 info
.si_code
= TARGET_ILL_COPROC
;
1752 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1753 queue_signal(env
, info
.si_signo
, &info
);
1755 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1756 cpu_abort(env
, "Decrementer interrupt while in user mode. "
1759 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1760 cpu_abort(env
, "Fix interval timer interrupt while in user mode. "
1763 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1764 cpu_abort(env
, "Watchdog timer interrupt while in user mode. "
1767 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1768 cpu_abort(env
, "Data TLB exception while in user mode. "
1771 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1772 cpu_abort(env
, "Instruction TLB exception while in user mode. "
1775 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1776 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1777 info
.si_signo
= TARGET_SIGILL
;
1779 info
.si_code
= TARGET_ILL_COPROC
;
1780 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1781 queue_signal(env
, info
.si_signo
, &info
);
1783 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1784 cpu_abort(env
, "Embedded floating-point data IRQ not handled\n");
1786 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1787 cpu_abort(env
, "Embedded floating-point round IRQ not handled\n");
1789 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1790 cpu_abort(env
, "Performance monitor exception not handled\n");
1792 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1793 cpu_abort(env
, "Doorbell interrupt while in user mode. "
1796 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1797 cpu_abort(env
, "Doorbell critical interrupt while in user mode. "
1800 case POWERPC_EXCP_RESET
: /* System reset exception */
1801 cpu_abort(env
, "Reset interrupt while in user mode. "
1804 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1805 cpu_abort(env
, "Data segment exception while in user mode. "
1808 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1809 cpu_abort(env
, "Instruction segment exception "
1810 "while in user mode. Aborting\n");
1812 /* PowerPC 64 with hypervisor mode support */
1813 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1814 cpu_abort(env
, "Hypervisor decrementer interrupt "
1815 "while in user mode. Aborting\n");
1817 case POWERPC_EXCP_TRACE
: /* Trace exception */
1819 * we use this exception to emulate step-by-step execution mode.
1822 /* PowerPC 64 with hypervisor mode support */
1823 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1824 cpu_abort(env
, "Hypervisor data storage exception "
1825 "while in user mode. Aborting\n");
1827 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1828 cpu_abort(env
, "Hypervisor instruction storage exception "
1829 "while in user mode. Aborting\n");
1831 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1832 cpu_abort(env
, "Hypervisor data segment exception "
1833 "while in user mode. Aborting\n");
1835 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1836 cpu_abort(env
, "Hypervisor instruction segment exception "
1837 "while in user mode. Aborting\n");
1839 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1840 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1841 info
.si_signo
= TARGET_SIGILL
;
1843 info
.si_code
= TARGET_ILL_COPROC
;
1844 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1845 queue_signal(env
, info
.si_signo
, &info
);
1847 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1848 cpu_abort(env
, "Programmable interval timer interrupt "
1849 "while in user mode. Aborting\n");
1851 case POWERPC_EXCP_IO
: /* IO error exception */
1852 cpu_abort(env
, "IO error exception while in user mode. "
1855 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1856 cpu_abort(env
, "Run mode exception while in user mode. "
1859 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1860 cpu_abort(env
, "Emulation trap exception not handled\n");
1862 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1863 cpu_abort(env
, "Instruction fetch TLB exception "
1864 "while in user-mode. Aborting");
1866 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1867 cpu_abort(env
, "Data load TLB exception while in user-mode. "
1870 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1871 cpu_abort(env
, "Data store TLB exception while in user-mode. "
1874 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1875 cpu_abort(env
, "Floating-point assist exception not handled\n");
1877 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1878 cpu_abort(env
, "Instruction address breakpoint exception "
1881 case POWERPC_EXCP_SMI
: /* System management interrupt */
1882 cpu_abort(env
, "System management interrupt while in user mode. "
1885 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1886 cpu_abort(env
, "Thermal interrupt interrupt while in user mode. "
1889 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1890 cpu_abort(env
, "Performance monitor exception not handled\n");
1892 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1893 cpu_abort(env
, "Vector assist exception not handled\n");
1895 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1896 cpu_abort(env
, "Soft patch exception not handled\n");
1898 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1899 cpu_abort(env
, "Maintenance exception while in user mode. "
1902 case POWERPC_EXCP_STOP
: /* stop translation */
1903 /* We did invalidate the instruction cache. Go on */
1905 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1906 /* We just stopped because of a branch. Go on */
1908 case POWERPC_EXCP_SYSCALL_USER
:
1909 /* system call in user-mode emulation */
1911 * PPC ABI uses overflow flag in cr0 to signal an error
1914 env
->crf
[0] &= ~0x1;
1915 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1916 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1918 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1919 /* Returning from a successful sigreturn syscall.
1920 Avoid corrupting register state. */
1923 if (ret
> (target_ulong
)(-515)) {
1929 case POWERPC_EXCP_STCX
:
1930 if (do_store_exclusive(env
)) {
1931 info
.si_signo
= TARGET_SIGSEGV
;
1933 info
.si_code
= TARGET_SEGV_MAPERR
;
1934 info
._sifields
._sigfault
._addr
= env
->nip
;
1935 queue_signal(env
, info
.si_signo
, &info
);
1942 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1944 info
.si_signo
= sig
;
1946 info
.si_code
= TARGET_TRAP_BRKPT
;
1947 queue_signal(env
, info
.si_signo
, &info
);
1951 case EXCP_INTERRUPT
:
1952 /* just indicate that signals should be handled asap */
1955 cpu_abort(env
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1958 process_pending_signals(env
);
1965 # ifdef TARGET_ABI_MIPSO32
1966 # define MIPS_SYS(name, args) args,
1967 static const uint8_t mips_syscall_args
[] = {
1968 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1969 MIPS_SYS(sys_exit
, 1)
1970 MIPS_SYS(sys_fork
, 0)
1971 MIPS_SYS(sys_read
, 3)
1972 MIPS_SYS(sys_write
, 3)
1973 MIPS_SYS(sys_open
, 3) /* 4005 */
1974 MIPS_SYS(sys_close
, 1)
1975 MIPS_SYS(sys_waitpid
, 3)
1976 MIPS_SYS(sys_creat
, 2)
1977 MIPS_SYS(sys_link
, 2)
1978 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1979 MIPS_SYS(sys_execve
, 0)
1980 MIPS_SYS(sys_chdir
, 1)
1981 MIPS_SYS(sys_time
, 1)
1982 MIPS_SYS(sys_mknod
, 3)
1983 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1984 MIPS_SYS(sys_lchown
, 3)
1985 MIPS_SYS(sys_ni_syscall
, 0)
1986 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
1987 MIPS_SYS(sys_lseek
, 3)
1988 MIPS_SYS(sys_getpid
, 0) /* 4020 */
1989 MIPS_SYS(sys_mount
, 5)
1990 MIPS_SYS(sys_umount
, 1)
1991 MIPS_SYS(sys_setuid
, 1)
1992 MIPS_SYS(sys_getuid
, 0)
1993 MIPS_SYS(sys_stime
, 1) /* 4025 */
1994 MIPS_SYS(sys_ptrace
, 4)
1995 MIPS_SYS(sys_alarm
, 1)
1996 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
1997 MIPS_SYS(sys_pause
, 0)
1998 MIPS_SYS(sys_utime
, 2) /* 4030 */
1999 MIPS_SYS(sys_ni_syscall
, 0)
2000 MIPS_SYS(sys_ni_syscall
, 0)
2001 MIPS_SYS(sys_access
, 2)
2002 MIPS_SYS(sys_nice
, 1)
2003 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2004 MIPS_SYS(sys_sync
, 0)
2005 MIPS_SYS(sys_kill
, 2)
2006 MIPS_SYS(sys_rename
, 2)
2007 MIPS_SYS(sys_mkdir
, 2)
2008 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2009 MIPS_SYS(sys_dup
, 1)
2010 MIPS_SYS(sys_pipe
, 0)
2011 MIPS_SYS(sys_times
, 1)
2012 MIPS_SYS(sys_ni_syscall
, 0)
2013 MIPS_SYS(sys_brk
, 1) /* 4045 */
2014 MIPS_SYS(sys_setgid
, 1)
2015 MIPS_SYS(sys_getgid
, 0)
2016 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2017 MIPS_SYS(sys_geteuid
, 0)
2018 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2019 MIPS_SYS(sys_acct
, 0)
2020 MIPS_SYS(sys_umount2
, 2)
2021 MIPS_SYS(sys_ni_syscall
, 0)
2022 MIPS_SYS(sys_ioctl
, 3)
2023 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2024 MIPS_SYS(sys_ni_syscall
, 2)
2025 MIPS_SYS(sys_setpgid
, 2)
2026 MIPS_SYS(sys_ni_syscall
, 0)
2027 MIPS_SYS(sys_olduname
, 1)
2028 MIPS_SYS(sys_umask
, 1) /* 4060 */
2029 MIPS_SYS(sys_chroot
, 1)
2030 MIPS_SYS(sys_ustat
, 2)
2031 MIPS_SYS(sys_dup2
, 2)
2032 MIPS_SYS(sys_getppid
, 0)
2033 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2034 MIPS_SYS(sys_setsid
, 0)
2035 MIPS_SYS(sys_sigaction
, 3)
2036 MIPS_SYS(sys_sgetmask
, 0)
2037 MIPS_SYS(sys_ssetmask
, 1)
2038 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2039 MIPS_SYS(sys_setregid
, 2)
2040 MIPS_SYS(sys_sigsuspend
, 0)
2041 MIPS_SYS(sys_sigpending
, 1)
2042 MIPS_SYS(sys_sethostname
, 2)
2043 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2044 MIPS_SYS(sys_getrlimit
, 2)
2045 MIPS_SYS(sys_getrusage
, 2)
2046 MIPS_SYS(sys_gettimeofday
, 2)
2047 MIPS_SYS(sys_settimeofday
, 2)
2048 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2049 MIPS_SYS(sys_setgroups
, 2)
2050 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2051 MIPS_SYS(sys_symlink
, 2)
2052 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2053 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2054 MIPS_SYS(sys_uselib
, 1)
2055 MIPS_SYS(sys_swapon
, 2)
2056 MIPS_SYS(sys_reboot
, 3)
2057 MIPS_SYS(old_readdir
, 3)
2058 MIPS_SYS(old_mmap
, 6) /* 4090 */
2059 MIPS_SYS(sys_munmap
, 2)
2060 MIPS_SYS(sys_truncate
, 2)
2061 MIPS_SYS(sys_ftruncate
, 2)
2062 MIPS_SYS(sys_fchmod
, 2)
2063 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2064 MIPS_SYS(sys_getpriority
, 2)
2065 MIPS_SYS(sys_setpriority
, 3)
2066 MIPS_SYS(sys_ni_syscall
, 0)
2067 MIPS_SYS(sys_statfs
, 2)
2068 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2069 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2070 MIPS_SYS(sys_socketcall
, 2)
2071 MIPS_SYS(sys_syslog
, 3)
2072 MIPS_SYS(sys_setitimer
, 3)
2073 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2074 MIPS_SYS(sys_newstat
, 2)
2075 MIPS_SYS(sys_newlstat
, 2)
2076 MIPS_SYS(sys_newfstat
, 2)
2077 MIPS_SYS(sys_uname
, 1)
2078 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2079 MIPS_SYS(sys_vhangup
, 0)
2080 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2081 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2082 MIPS_SYS(sys_wait4
, 4)
2083 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2084 MIPS_SYS(sys_sysinfo
, 1)
2085 MIPS_SYS(sys_ipc
, 6)
2086 MIPS_SYS(sys_fsync
, 1)
2087 MIPS_SYS(sys_sigreturn
, 0)
2088 MIPS_SYS(sys_clone
, 6) /* 4120 */
2089 MIPS_SYS(sys_setdomainname
, 2)
2090 MIPS_SYS(sys_newuname
, 1)
2091 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2092 MIPS_SYS(sys_adjtimex
, 1)
2093 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2094 MIPS_SYS(sys_sigprocmask
, 3)
2095 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2096 MIPS_SYS(sys_init_module
, 5)
2097 MIPS_SYS(sys_delete_module
, 1)
2098 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2099 MIPS_SYS(sys_quotactl
, 0)
2100 MIPS_SYS(sys_getpgid
, 1)
2101 MIPS_SYS(sys_fchdir
, 1)
2102 MIPS_SYS(sys_bdflush
, 2)
2103 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2104 MIPS_SYS(sys_personality
, 1)
2105 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2106 MIPS_SYS(sys_setfsuid
, 1)
2107 MIPS_SYS(sys_setfsgid
, 1)
2108 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2109 MIPS_SYS(sys_getdents
, 3)
2110 MIPS_SYS(sys_select
, 5)
2111 MIPS_SYS(sys_flock
, 2)
2112 MIPS_SYS(sys_msync
, 3)
2113 MIPS_SYS(sys_readv
, 3) /* 4145 */
2114 MIPS_SYS(sys_writev
, 3)
2115 MIPS_SYS(sys_cacheflush
, 3)
2116 MIPS_SYS(sys_cachectl
, 3)
2117 MIPS_SYS(sys_sysmips
, 4)
2118 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2119 MIPS_SYS(sys_getsid
, 1)
2120 MIPS_SYS(sys_fdatasync
, 0)
2121 MIPS_SYS(sys_sysctl
, 1)
2122 MIPS_SYS(sys_mlock
, 2)
2123 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2124 MIPS_SYS(sys_mlockall
, 1)
2125 MIPS_SYS(sys_munlockall
, 0)
2126 MIPS_SYS(sys_sched_setparam
, 2)
2127 MIPS_SYS(sys_sched_getparam
, 2)
2128 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2129 MIPS_SYS(sys_sched_getscheduler
, 1)
2130 MIPS_SYS(sys_sched_yield
, 0)
2131 MIPS_SYS(sys_sched_get_priority_max
, 1)
2132 MIPS_SYS(sys_sched_get_priority_min
, 1)
2133 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2134 MIPS_SYS(sys_nanosleep
, 2)
2135 MIPS_SYS(sys_mremap
, 5)
2136 MIPS_SYS(sys_accept
, 3)
2137 MIPS_SYS(sys_bind
, 3)
2138 MIPS_SYS(sys_connect
, 3) /* 4170 */
2139 MIPS_SYS(sys_getpeername
, 3)
2140 MIPS_SYS(sys_getsockname
, 3)
2141 MIPS_SYS(sys_getsockopt
, 5)
2142 MIPS_SYS(sys_listen
, 2)
2143 MIPS_SYS(sys_recv
, 4) /* 4175 */
2144 MIPS_SYS(sys_recvfrom
, 6)
2145 MIPS_SYS(sys_recvmsg
, 3)
2146 MIPS_SYS(sys_send
, 4)
2147 MIPS_SYS(sys_sendmsg
, 3)
2148 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2149 MIPS_SYS(sys_setsockopt
, 5)
2150 MIPS_SYS(sys_shutdown
, 2)
2151 MIPS_SYS(sys_socket
, 3)
2152 MIPS_SYS(sys_socketpair
, 4)
2153 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2154 MIPS_SYS(sys_getresuid
, 3)
2155 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2156 MIPS_SYS(sys_poll
, 3)
2157 MIPS_SYS(sys_nfsservctl
, 3)
2158 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2159 MIPS_SYS(sys_getresgid
, 3)
2160 MIPS_SYS(sys_prctl
, 5)
2161 MIPS_SYS(sys_rt_sigreturn
, 0)
2162 MIPS_SYS(sys_rt_sigaction
, 4)
2163 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2164 MIPS_SYS(sys_rt_sigpending
, 2)
2165 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2166 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2167 MIPS_SYS(sys_rt_sigsuspend
, 0)
2168 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2169 MIPS_SYS(sys_pwrite64
, 6)
2170 MIPS_SYS(sys_chown
, 3)
2171 MIPS_SYS(sys_getcwd
, 2)
2172 MIPS_SYS(sys_capget
, 2)
2173 MIPS_SYS(sys_capset
, 2) /* 4205 */
2174 MIPS_SYS(sys_sigaltstack
, 2)
2175 MIPS_SYS(sys_sendfile
, 4)
2176 MIPS_SYS(sys_ni_syscall
, 0)
2177 MIPS_SYS(sys_ni_syscall
, 0)
2178 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2179 MIPS_SYS(sys_truncate64
, 4)
2180 MIPS_SYS(sys_ftruncate64
, 4)
2181 MIPS_SYS(sys_stat64
, 2)
2182 MIPS_SYS(sys_lstat64
, 2)
2183 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2184 MIPS_SYS(sys_pivot_root
, 2)
2185 MIPS_SYS(sys_mincore
, 3)
2186 MIPS_SYS(sys_madvise
, 3)
2187 MIPS_SYS(sys_getdents64
, 3)
2188 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2189 MIPS_SYS(sys_ni_syscall
, 0)
2190 MIPS_SYS(sys_gettid
, 0)
2191 MIPS_SYS(sys_readahead
, 5)
2192 MIPS_SYS(sys_setxattr
, 5)
2193 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2194 MIPS_SYS(sys_fsetxattr
, 5)
2195 MIPS_SYS(sys_getxattr
, 4)
2196 MIPS_SYS(sys_lgetxattr
, 4)
2197 MIPS_SYS(sys_fgetxattr
, 4)
2198 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2199 MIPS_SYS(sys_llistxattr
, 3)
2200 MIPS_SYS(sys_flistxattr
, 3)
2201 MIPS_SYS(sys_removexattr
, 2)
2202 MIPS_SYS(sys_lremovexattr
, 2)
2203 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2204 MIPS_SYS(sys_tkill
, 2)
2205 MIPS_SYS(sys_sendfile64
, 5)
2206 MIPS_SYS(sys_futex
, 6)
2207 MIPS_SYS(sys_sched_setaffinity
, 3)
2208 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2209 MIPS_SYS(sys_io_setup
, 2)
2210 MIPS_SYS(sys_io_destroy
, 1)
2211 MIPS_SYS(sys_io_getevents
, 5)
2212 MIPS_SYS(sys_io_submit
, 3)
2213 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2214 MIPS_SYS(sys_exit_group
, 1)
2215 MIPS_SYS(sys_lookup_dcookie
, 3)
2216 MIPS_SYS(sys_epoll_create
, 1)
2217 MIPS_SYS(sys_epoll_ctl
, 4)
2218 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2219 MIPS_SYS(sys_remap_file_pages
, 5)
2220 MIPS_SYS(sys_set_tid_address
, 1)
2221 MIPS_SYS(sys_restart_syscall
, 0)
2222 MIPS_SYS(sys_fadvise64_64
, 7)
2223 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2224 MIPS_SYS(sys_fstatfs64
, 2)
2225 MIPS_SYS(sys_timer_create
, 3)
2226 MIPS_SYS(sys_timer_settime
, 4)
2227 MIPS_SYS(sys_timer_gettime
, 2)
2228 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2229 MIPS_SYS(sys_timer_delete
, 1)
2230 MIPS_SYS(sys_clock_settime
, 2)
2231 MIPS_SYS(sys_clock_gettime
, 2)
2232 MIPS_SYS(sys_clock_getres
, 2)
2233 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2234 MIPS_SYS(sys_tgkill
, 3)
2235 MIPS_SYS(sys_utimes
, 2)
2236 MIPS_SYS(sys_mbind
, 4)
2237 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2238 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2239 MIPS_SYS(sys_mq_open
, 4)
2240 MIPS_SYS(sys_mq_unlink
, 1)
2241 MIPS_SYS(sys_mq_timedsend
, 5)
2242 MIPS_SYS(sys_mq_timedreceive
, 5)
2243 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2244 MIPS_SYS(sys_mq_getsetattr
, 3)
2245 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2246 MIPS_SYS(sys_waitid
, 4)
2247 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2248 MIPS_SYS(sys_add_key
, 5)
2249 MIPS_SYS(sys_request_key
, 4)
2250 MIPS_SYS(sys_keyctl
, 5)
2251 MIPS_SYS(sys_set_thread_area
, 1)
2252 MIPS_SYS(sys_inotify_init
, 0)
2253 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2254 MIPS_SYS(sys_inotify_rm_watch
, 2)
2255 MIPS_SYS(sys_migrate_pages
, 4)
2256 MIPS_SYS(sys_openat
, 4)
2257 MIPS_SYS(sys_mkdirat
, 3)
2258 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2259 MIPS_SYS(sys_fchownat
, 5)
2260 MIPS_SYS(sys_futimesat
, 3)
2261 MIPS_SYS(sys_fstatat64
, 4)
2262 MIPS_SYS(sys_unlinkat
, 3)
2263 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2264 MIPS_SYS(sys_linkat
, 5)
2265 MIPS_SYS(sys_symlinkat
, 3)
2266 MIPS_SYS(sys_readlinkat
, 4)
2267 MIPS_SYS(sys_fchmodat
, 3)
2268 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2269 MIPS_SYS(sys_pselect6
, 6)
2270 MIPS_SYS(sys_ppoll
, 5)
2271 MIPS_SYS(sys_unshare
, 1)
2272 MIPS_SYS(sys_splice
, 6)
2273 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2274 MIPS_SYS(sys_tee
, 4)
2275 MIPS_SYS(sys_vmsplice
, 4)
2276 MIPS_SYS(sys_move_pages
, 6)
2277 MIPS_SYS(sys_set_robust_list
, 2)
2278 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2279 MIPS_SYS(sys_kexec_load
, 4)
2280 MIPS_SYS(sys_getcpu
, 3)
2281 MIPS_SYS(sys_epoll_pwait
, 6)
2282 MIPS_SYS(sys_ioprio_set
, 3)
2283 MIPS_SYS(sys_ioprio_get
, 2)
2284 MIPS_SYS(sys_utimensat
, 4)
2285 MIPS_SYS(sys_signalfd
, 3)
2286 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2287 MIPS_SYS(sys_eventfd
, 1)
2288 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2289 MIPS_SYS(sys_timerfd_create
, 2)
2290 MIPS_SYS(sys_timerfd_gettime
, 2)
2291 MIPS_SYS(sys_timerfd_settime
, 4)
2292 MIPS_SYS(sys_signalfd4
, 4)
2293 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2294 MIPS_SYS(sys_epoll_create1
, 1)
2295 MIPS_SYS(sys_dup3
, 3)
2296 MIPS_SYS(sys_pipe2
, 2)
2297 MIPS_SYS(sys_inotify_init1
, 1)
2298 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2299 MIPS_SYS(sys_pwritev
, 6)
2300 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2301 MIPS_SYS(sys_perf_event_open
, 5)
2302 MIPS_SYS(sys_accept4
, 4)
2303 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2304 MIPS_SYS(sys_fanotify_init
, 2)
2305 MIPS_SYS(sys_fanotify_mark
, 6)
2306 MIPS_SYS(sys_prlimit64
, 4)
2307 MIPS_SYS(sys_name_to_handle_at
, 5)
2308 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2309 MIPS_SYS(sys_clock_adjtime
, 2)
2310 MIPS_SYS(sys_syncfs
, 1)
2315 static int do_store_exclusive(CPUMIPSState
*env
)
2318 target_ulong page_addr
;
2326 page_addr
= addr
& TARGET_PAGE_MASK
;
2329 flags
= page_get_flags(page_addr
);
2330 if ((flags
& PAGE_READ
) == 0) {
2333 reg
= env
->llreg
& 0x1f;
2334 d
= (env
->llreg
& 0x20) != 0;
2336 segv
= get_user_s64(val
, addr
);
2338 segv
= get_user_s32(val
, addr
);
2341 if (val
!= env
->llval
) {
2342 env
->active_tc
.gpr
[reg
] = 0;
2345 segv
= put_user_u64(env
->llnewval
, addr
);
2347 segv
= put_user_u32(env
->llnewval
, addr
);
2350 env
->active_tc
.gpr
[reg
] = 1;
2357 env
->active_tc
.PC
+= 4;
2370 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2378 info
->si_signo
= TARGET_SIGFPE
;
2380 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2381 queue_signal(env
, info
->si_signo
, &*info
);
2391 void cpu_loop(CPUMIPSState
*env
)
2393 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2394 target_siginfo_t info
;
2397 # ifdef TARGET_ABI_MIPSO32
2398 unsigned int syscall_num
;
2403 trapnr
= cpu_mips_exec(env
);
2407 env
->active_tc
.PC
+= 4;
2408 # ifdef TARGET_ABI_MIPSO32
2409 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2410 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2411 ret
= -TARGET_ENOSYS
;
2415 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2417 nb_args
= mips_syscall_args
[syscall_num
];
2418 sp_reg
= env
->active_tc
.gpr
[29];
2420 /* these arguments are taken from the stack */
2422 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2426 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2430 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2434 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2440 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2441 env
->active_tc
.gpr
[4],
2442 env
->active_tc
.gpr
[5],
2443 env
->active_tc
.gpr
[6],
2444 env
->active_tc
.gpr
[7],
2445 arg5
, arg6
, arg7
, arg8
);
2449 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2450 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2451 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2452 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2453 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2455 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2456 /* Returning from a successful sigreturn syscall.
2457 Avoid clobbering register state. */
2460 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2461 env
->active_tc
.gpr
[7] = 1; /* error flag */
2464 env
->active_tc
.gpr
[7] = 0; /* error flag */
2466 env
->active_tc
.gpr
[2] = ret
;
2472 info
.si_signo
= TARGET_SIGSEGV
;
2474 /* XXX: check env->error_code */
2475 info
.si_code
= TARGET_SEGV_MAPERR
;
2476 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2477 queue_signal(env
, info
.si_signo
, &info
);
2481 info
.si_signo
= TARGET_SIGILL
;
2484 queue_signal(env
, info
.si_signo
, &info
);
2486 case EXCP_INTERRUPT
:
2487 /* just indicate that signals should be handled asap */
2493 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2496 info
.si_signo
= sig
;
2498 info
.si_code
= TARGET_TRAP_BRKPT
;
2499 queue_signal(env
, info
.si_signo
, &info
);
2504 if (do_store_exclusive(env
)) {
2505 info
.si_signo
= TARGET_SIGSEGV
;
2507 info
.si_code
= TARGET_SEGV_MAPERR
;
2508 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2509 queue_signal(env
, info
.si_signo
, &info
);
2513 info
.si_signo
= TARGET_SIGILL
;
2515 info
.si_code
= TARGET_ILL_ILLOPC
;
2516 queue_signal(env
, info
.si_signo
, &info
);
2518 /* The code below was inspired by the MIPS Linux kernel trap
2519 * handling code in arch/mips/kernel/traps.c.
2523 abi_ulong trap_instr
;
2526 if (env
->hflags
& MIPS_HFLAG_M16
) {
2527 if (env
->insn_flags
& ASE_MICROMIPS
) {
2528 /* microMIPS mode */
2529 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2534 if ((trap_instr
>> 10) == 0x11) {
2535 /* 16-bit instruction */
2536 code
= trap_instr
& 0xf;
2538 /* 32-bit instruction */
2541 ret
= get_user_u16(instr_lo
,
2542 env
->active_tc
.PC
+ 2);
2546 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2547 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2548 /* Unfortunately, microMIPS also suffers from
2549 the old assembler bug... */
2550 if (code
>= (1 << 10)) {
2556 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2560 code
= (trap_instr
>> 6) & 0x3f;
2563 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2568 /* As described in the original Linux kernel code, the
2569 * below checks on 'code' are to work around an old
2572 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2573 if (code
>= (1 << 10)) {
2578 if (do_break(env
, &info
, code
) != 0) {
2585 abi_ulong trap_instr
;
2586 unsigned int code
= 0;
2588 if (env
->hflags
& MIPS_HFLAG_M16
) {
2589 /* microMIPS mode */
2592 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2593 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2595 trap_instr
= (instr
[0] << 16) | instr
[1];
2597 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2604 /* The immediate versions don't provide a code. */
2605 if (!(trap_instr
& 0xFC000000)) {
2606 if (env
->hflags
& MIPS_HFLAG_M16
) {
2607 /* microMIPS mode */
2608 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2610 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2614 if (do_break(env
, &info
, code
) != 0) {
2621 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2623 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2626 process_pending_signals(env
);
2631 #ifdef TARGET_OPENRISC
2633 void cpu_loop(CPUOpenRISCState
*env
)
2635 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2639 trapnr
= cpu_exec(env
);
2644 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2648 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2653 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2654 gdbsig
= TARGET_SIGSEGV
;
2657 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2660 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2664 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2668 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2672 qemu_log("\nTLB miss\n");
2675 qemu_log("\nRange\n");
2679 env
->pc
+= 4; /* 0xc00; */
2680 env
->gpr
[11] = do_syscall(env
,
2681 env
->gpr
[11], /* return value */
2682 env
->gpr
[3], /* r3 - r7 are params */
2690 qemu_log("\nFloating point error\n");
2693 qemu_log("\nTrap\n");
2700 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2702 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2703 gdbsig
= TARGET_SIGILL
;
2707 gdb_handlesig(cs
, gdbsig
);
2708 if (gdbsig
!= TARGET_SIGTRAP
) {
2713 process_pending_signals(env
);
2717 #endif /* TARGET_OPENRISC */
2720 void cpu_loop(CPUSH4State
*env
)
2722 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2724 target_siginfo_t info
;
2727 trapnr
= cpu_sh4_exec (env
);
2732 ret
= do_syscall(env
,
2741 env
->gregs
[0] = ret
;
2743 case EXCP_INTERRUPT
:
2744 /* just indicate that signals should be handled asap */
2750 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2753 info
.si_signo
= sig
;
2755 info
.si_code
= TARGET_TRAP_BRKPT
;
2756 queue_signal(env
, info
.si_signo
, &info
);
2762 info
.si_signo
= SIGSEGV
;
2764 info
.si_code
= TARGET_SEGV_MAPERR
;
2765 info
._sifields
._sigfault
._addr
= env
->tea
;
2766 queue_signal(env
, info
.si_signo
, &info
);
2770 printf ("Unhandled trap: 0x%x\n", trapnr
);
2771 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2774 process_pending_signals (env
);
2780 void cpu_loop(CPUCRISState
*env
)
2782 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2784 target_siginfo_t info
;
2787 trapnr
= cpu_cris_exec (env
);
2791 info
.si_signo
= SIGSEGV
;
2793 /* XXX: check env->error_code */
2794 info
.si_code
= TARGET_SEGV_MAPERR
;
2795 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2796 queue_signal(env
, info
.si_signo
, &info
);
2799 case EXCP_INTERRUPT
:
2800 /* just indicate that signals should be handled asap */
2803 ret
= do_syscall(env
,
2812 env
->regs
[10] = ret
;
2818 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2821 info
.si_signo
= sig
;
2823 info
.si_code
= TARGET_TRAP_BRKPT
;
2824 queue_signal(env
, info
.si_signo
, &info
);
2829 printf ("Unhandled trap: 0x%x\n", trapnr
);
2830 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2833 process_pending_signals (env
);
2838 #ifdef TARGET_MICROBLAZE
2839 void cpu_loop(CPUMBState
*env
)
2841 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2843 target_siginfo_t info
;
2846 trapnr
= cpu_mb_exec (env
);
2850 info
.si_signo
= SIGSEGV
;
2852 /* XXX: check env->error_code */
2853 info
.si_code
= TARGET_SEGV_MAPERR
;
2854 info
._sifields
._sigfault
._addr
= 0;
2855 queue_signal(env
, info
.si_signo
, &info
);
2858 case EXCP_INTERRUPT
:
2859 /* just indicate that signals should be handled asap */
2862 /* Return address is 4 bytes after the call. */
2864 env
->sregs
[SR_PC
] = env
->regs
[14];
2865 ret
= do_syscall(env
,
2877 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2878 if (env
->iflags
& D_FLAG
) {
2879 env
->sregs
[SR_ESR
] |= 1 << 12;
2880 env
->sregs
[SR_PC
] -= 4;
2881 /* FIXME: if branch was immed, replay the imm as well. */
2884 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2886 switch (env
->sregs
[SR_ESR
] & 31) {
2887 case ESR_EC_DIVZERO
:
2888 info
.si_signo
= SIGFPE
;
2890 info
.si_code
= TARGET_FPE_FLTDIV
;
2891 info
._sifields
._sigfault
._addr
= 0;
2892 queue_signal(env
, info
.si_signo
, &info
);
2895 info
.si_signo
= SIGFPE
;
2897 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2898 info
.si_code
= TARGET_FPE_FLTINV
;
2900 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2901 info
.si_code
= TARGET_FPE_FLTDIV
;
2903 info
._sifields
._sigfault
._addr
= 0;
2904 queue_signal(env
, info
.si_signo
, &info
);
2907 printf ("Unhandled hw-exception: 0x%x\n",
2908 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2909 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2918 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2921 info
.si_signo
= sig
;
2923 info
.si_code
= TARGET_TRAP_BRKPT
;
2924 queue_signal(env
, info
.si_signo
, &info
);
2929 printf ("Unhandled trap: 0x%x\n", trapnr
);
2930 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2933 process_pending_signals (env
);
2940 void cpu_loop(CPUM68KState
*env
)
2942 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2945 target_siginfo_t info
;
2946 TaskState
*ts
= env
->opaque
;
2949 trapnr
= cpu_m68k_exec(env
);
2953 if (ts
->sim_syscalls
) {
2955 nr
= lduw(env
->pc
+ 2);
2957 do_m68k_simcall(env
, nr
);
2963 case EXCP_HALT_INSN
:
2964 /* Semihosing syscall. */
2966 do_m68k_semihosting(env
, env
->dregs
[0]);
2970 case EXCP_UNSUPPORTED
:
2972 info
.si_signo
= SIGILL
;
2974 info
.si_code
= TARGET_ILL_ILLOPN
;
2975 info
._sifields
._sigfault
._addr
= env
->pc
;
2976 queue_signal(env
, info
.si_signo
, &info
);
2980 ts
->sim_syscalls
= 0;
2983 env
->dregs
[0] = do_syscall(env
,
2994 case EXCP_INTERRUPT
:
2995 /* just indicate that signals should be handled asap */
2999 info
.si_signo
= SIGSEGV
;
3001 /* XXX: check env->error_code */
3002 info
.si_code
= TARGET_SEGV_MAPERR
;
3003 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3004 queue_signal(env
, info
.si_signo
, &info
);
3011 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3014 info
.si_signo
= sig
;
3016 info
.si_code
= TARGET_TRAP_BRKPT
;
3017 queue_signal(env
, info
.si_signo
, &info
);
3022 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3024 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3027 process_pending_signals(env
);
3030 #endif /* TARGET_M68K */
3033 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3035 target_ulong addr
, val
, tmp
;
3036 target_siginfo_t info
;
3039 addr
= env
->lock_addr
;
3040 tmp
= env
->lock_st_addr
;
3041 env
->lock_addr
= -1;
3042 env
->lock_st_addr
= 0;
3048 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3052 if (val
== env
->lock_value
) {
3054 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3071 info
.si_signo
= TARGET_SIGSEGV
;
3073 info
.si_code
= TARGET_SEGV_MAPERR
;
3074 info
._sifields
._sigfault
._addr
= addr
;
3075 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3078 void cpu_loop(CPUAlphaState
*env
)
3080 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3082 target_siginfo_t info
;
3086 trapnr
= cpu_alpha_exec (env
);
3088 /* All of the traps imply a transition through PALcode, which
3089 implies an REI instruction has been executed. Which means
3090 that the intr_flag should be cleared. */
3095 fprintf(stderr
, "Reset requested. Exit\n");
3099 fprintf(stderr
, "Machine check exception. Exit\n");
3102 case EXCP_SMP_INTERRUPT
:
3103 case EXCP_CLK_INTERRUPT
:
3104 case EXCP_DEV_INTERRUPT
:
3105 fprintf(stderr
, "External interrupt. Exit\n");
3109 env
->lock_addr
= -1;
3110 info
.si_signo
= TARGET_SIGSEGV
;
3112 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3113 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3114 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3115 queue_signal(env
, info
.si_signo
, &info
);
3118 env
->lock_addr
= -1;
3119 info
.si_signo
= TARGET_SIGBUS
;
3121 info
.si_code
= TARGET_BUS_ADRALN
;
3122 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3123 queue_signal(env
, info
.si_signo
, &info
);
3127 env
->lock_addr
= -1;
3128 info
.si_signo
= TARGET_SIGILL
;
3130 info
.si_code
= TARGET_ILL_ILLOPC
;
3131 info
._sifields
._sigfault
._addr
= env
->pc
;
3132 queue_signal(env
, info
.si_signo
, &info
);
3135 env
->lock_addr
= -1;
3136 info
.si_signo
= TARGET_SIGFPE
;
3138 info
.si_code
= TARGET_FPE_FLTINV
;
3139 info
._sifields
._sigfault
._addr
= env
->pc
;
3140 queue_signal(env
, info
.si_signo
, &info
);
3143 /* No-op. Linux simply re-enables the FPU. */
3146 env
->lock_addr
= -1;
3147 switch (env
->error_code
) {
3150 info
.si_signo
= TARGET_SIGTRAP
;
3152 info
.si_code
= TARGET_TRAP_BRKPT
;
3153 info
._sifields
._sigfault
._addr
= env
->pc
;
3154 queue_signal(env
, info
.si_signo
, &info
);
3158 info
.si_signo
= TARGET_SIGTRAP
;
3161 info
._sifields
._sigfault
._addr
= env
->pc
;
3162 queue_signal(env
, info
.si_signo
, &info
);
3166 trapnr
= env
->ir
[IR_V0
];
3167 sysret
= do_syscall(env
, trapnr
,
3168 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3169 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3170 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3172 if (trapnr
== TARGET_NR_sigreturn
3173 || trapnr
== TARGET_NR_rt_sigreturn
) {
3176 /* Syscall writes 0 to V0 to bypass error check, similar
3177 to how this is handled internal to Linux kernel.
3178 (Ab)use trapnr temporarily as boolean indicating error. */
3179 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3180 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3181 env
->ir
[IR_A3
] = trapnr
;
3185 /* ??? We can probably elide the code using page_unprotect
3186 that is checking for self-modifying code. Instead we
3187 could simply call tb_flush here. Until we work out the
3188 changes required to turn off the extra write protection,
3189 this can be a no-op. */
3193 /* Handled in the translator for usermode. */
3197 /* Handled in the translator for usermode. */
3201 info
.si_signo
= TARGET_SIGFPE
;
3202 switch (env
->ir
[IR_A0
]) {
3203 case TARGET_GEN_INTOVF
:
3204 info
.si_code
= TARGET_FPE_INTOVF
;
3206 case TARGET_GEN_INTDIV
:
3207 info
.si_code
= TARGET_FPE_INTDIV
;
3209 case TARGET_GEN_FLTOVF
:
3210 info
.si_code
= TARGET_FPE_FLTOVF
;
3212 case TARGET_GEN_FLTUND
:
3213 info
.si_code
= TARGET_FPE_FLTUND
;
3215 case TARGET_GEN_FLTINV
:
3216 info
.si_code
= TARGET_FPE_FLTINV
;
3218 case TARGET_GEN_FLTINE
:
3219 info
.si_code
= TARGET_FPE_FLTRES
;
3221 case TARGET_GEN_ROPRAND
:
3225 info
.si_signo
= TARGET_SIGTRAP
;
3230 info
._sifields
._sigfault
._addr
= env
->pc
;
3231 queue_signal(env
, info
.si_signo
, &info
);
3238 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3239 if (info
.si_signo
) {
3240 env
->lock_addr
= -1;
3242 info
.si_code
= TARGET_TRAP_BRKPT
;
3243 queue_signal(env
, info
.si_signo
, &info
);
3248 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3250 case EXCP_INTERRUPT
:
3251 /* Just indicate that signals should be handled asap. */
3254 printf ("Unhandled trap: 0x%x\n", trapnr
);
3255 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3258 process_pending_signals (env
);
3261 #endif /* TARGET_ALPHA */
3264 void cpu_loop(CPUS390XState
*env
)
3266 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3268 target_siginfo_t info
;
3272 trapnr
= cpu_s390x_exec(env
);
3274 case EXCP_INTERRUPT
:
3275 /* Just indicate that signals should be handled asap. */
3279 n
= env
->int_svc_code
;
3281 /* syscalls > 255 */
3284 env
->psw
.addr
+= env
->int_svc_ilen
;
3285 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3286 env
->regs
[4], env
->regs
[5],
3287 env
->regs
[6], env
->regs
[7], 0, 0);
3291 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3293 n
= TARGET_TRAP_BRKPT
;
3298 n
= env
->int_pgm_code
;
3301 case PGM_PRIVILEGED
:
3303 n
= TARGET_ILL_ILLOPC
;
3305 case PGM_PROTECTION
:
3306 case PGM_ADDRESSING
:
3308 /* XXX: check env->error_code */
3309 n
= TARGET_SEGV_MAPERR
;
3310 addr
= env
->__excp_addr
;
3313 case PGM_SPECIFICATION
:
3314 case PGM_SPECIAL_OP
:
3318 n
= TARGET_ILL_ILLOPN
;
3321 case PGM_FIXPT_OVERFLOW
:
3323 n
= TARGET_FPE_INTOVF
;
3325 case PGM_FIXPT_DIVIDE
:
3327 n
= TARGET_FPE_INTDIV
;
3331 n
= (env
->fpc
>> 8) & 0xff;
3333 /* compare-and-trap */
3336 /* An IEEE exception, simulated or otherwise. */
3338 n
= TARGET_FPE_FLTINV
;
3339 } else if (n
& 0x40) {
3340 n
= TARGET_FPE_FLTDIV
;
3341 } else if (n
& 0x20) {
3342 n
= TARGET_FPE_FLTOVF
;
3343 } else if (n
& 0x10) {
3344 n
= TARGET_FPE_FLTUND
;
3345 } else if (n
& 0x08) {
3346 n
= TARGET_FPE_FLTRES
;
3348 /* ??? Quantum exception; BFP, DFP error. */
3356 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3357 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3363 addr
= env
->psw
.addr
;
3365 info
.si_signo
= sig
;
3368 info
._sifields
._sigfault
._addr
= addr
;
3369 queue_signal(env
, info
.si_signo
, &info
);
3373 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3374 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3377 process_pending_signals (env
);
3381 #endif /* TARGET_S390X */
3383 THREAD CPUState
*thread_cpu
;
3385 void task_settid(TaskState
*ts
)
3387 if (ts
->ts_tid
== 0) {
3388 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3392 void stop_all_tasks(void)
3395 * We trust that when using NPTL, start_exclusive()
3396 * handles thread stopping correctly.
3401 /* Assumes contents are already zeroed. */
3402 void init_task_state(TaskState
*ts
)
3407 ts
->first_free
= ts
->sigqueue_table
;
3408 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3409 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3411 ts
->sigqueue_table
[i
].next
= NULL
;
3414 CPUArchState
*cpu_copy(CPUArchState
*env
)
3416 CPUArchState
*new_env
= cpu_init(cpu_model
);
3417 #if defined(TARGET_HAS_ICE)
3422 /* Reset non arch specific state */
3423 cpu_reset(ENV_GET_CPU(new_env
));
3425 memcpy(new_env
, env
, sizeof(CPUArchState
));
3427 /* Clone all break/watchpoints.
3428 Note: Once we support ptrace with hw-debug register access, make sure
3429 BP_CPU break/watchpoints are handled correctly on clone. */
3430 QTAILQ_INIT(&env
->breakpoints
);
3431 QTAILQ_INIT(&env
->watchpoints
);
3432 #if defined(TARGET_HAS_ICE)
3433 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3434 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
3436 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3437 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
3445 static void handle_arg_help(const char *arg
)
3450 static void handle_arg_log(const char *arg
)
3454 mask
= qemu_str_to_log_mask(arg
);
3456 qemu_print_log_usage(stdout
);
3462 static void handle_arg_log_filename(const char *arg
)
3464 qemu_set_log_filename(arg
);
3467 static void handle_arg_set_env(const char *arg
)
3469 char *r
, *p
, *token
;
3470 r
= p
= strdup(arg
);
3471 while ((token
= strsep(&p
, ",")) != NULL
) {
3472 if (envlist_setenv(envlist
, token
) != 0) {
3479 static void handle_arg_unset_env(const char *arg
)
3481 char *r
, *p
, *token
;
3482 r
= p
= strdup(arg
);
3483 while ((token
= strsep(&p
, ",")) != NULL
) {
3484 if (envlist_unsetenv(envlist
, token
) != 0) {
3491 static void handle_arg_argv0(const char *arg
)
3493 argv0
= strdup(arg
);
3496 static void handle_arg_stack_size(const char *arg
)
3499 guest_stack_size
= strtoul(arg
, &p
, 0);
3500 if (guest_stack_size
== 0) {
3505 guest_stack_size
*= 1024 * 1024;
3506 } else if (*p
== 'k' || *p
== 'K') {
3507 guest_stack_size
*= 1024;
3511 static void handle_arg_ld_prefix(const char *arg
)
3513 interp_prefix
= strdup(arg
);
3516 static void handle_arg_pagesize(const char *arg
)
3518 qemu_host_page_size
= atoi(arg
);
3519 if (qemu_host_page_size
== 0 ||
3520 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3521 fprintf(stderr
, "page size must be a power of two\n");
3526 static void handle_arg_gdb(const char *arg
)
3528 gdbstub_port
= atoi(arg
);
3531 static void handle_arg_uname(const char *arg
)
3533 qemu_uname_release
= strdup(arg
);
3536 static void handle_arg_cpu(const char *arg
)
3538 cpu_model
= strdup(arg
);
3539 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3540 /* XXX: implement xxx_cpu_list for targets that still miss it */
3541 #if defined(cpu_list)
3542 cpu_list(stdout
, &fprintf
);
3548 #if defined(CONFIG_USE_GUEST_BASE)
3549 static void handle_arg_guest_base(const char *arg
)
3551 guest_base
= strtol(arg
, NULL
, 0);
3552 have_guest_base
= 1;
3555 static void handle_arg_reserved_va(const char *arg
)
3559 reserved_va
= strtoul(arg
, &p
, 0);
3573 unsigned long unshifted
= reserved_va
;
3575 reserved_va
<<= shift
;
3576 if (((reserved_va
>> shift
) != unshifted
)
3577 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3578 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3581 fprintf(stderr
, "Reserved virtual address too big\n");
3586 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3592 static void handle_arg_singlestep(const char *arg
)
3597 static void handle_arg_strace(const char *arg
)
3602 static void handle_arg_version(const char *arg
)
3604 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3605 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3609 struct qemu_argument
{
3613 void (*handle_opt
)(const char *arg
);
3614 const char *example
;
3618 static const struct qemu_argument arg_table
[] = {
3619 {"h", "", false, handle_arg_help
,
3620 "", "print this help"},
3621 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3622 "port", "wait gdb connection to 'port'"},
3623 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3624 "path", "set the elf interpreter prefix to 'path'"},
3625 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3626 "size", "set the stack size to 'size' bytes"},
3627 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3628 "model", "select CPU (-cpu help for list)"},
3629 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3630 "var=value", "sets targets environment variable (see below)"},
3631 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3632 "var", "unsets targets environment variable (see below)"},
3633 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3634 "argv0", "forces target process argv[0] to be 'argv0'"},
3635 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3636 "uname", "set qemu uname release string to 'uname'"},
3637 #if defined(CONFIG_USE_GUEST_BASE)
3638 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3639 "address", "set guest_base address to 'address'"},
3640 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3641 "size", "reserve 'size' bytes for guest virtual address space"},
3643 {"d", "QEMU_LOG", true, handle_arg_log
,
3644 "item[,...]", "enable logging of specified items "
3645 "(use '-d help' for a list of items)"},
3646 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3647 "logfile", "write logs to 'logfile' (default stderr)"},
3648 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3649 "pagesize", "set the host page size to 'pagesize'"},
3650 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3651 "", "run in singlestep mode"},
3652 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3653 "", "log system calls"},
3654 {"version", "QEMU_VERSION", false, handle_arg_version
,
3655 "", "display version information and exit"},
3656 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3659 static void usage(void)
3661 const struct qemu_argument
*arginfo
;
3665 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3666 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3668 "Options and associated environment variables:\n"
3671 /* Calculate column widths. We must always have at least enough space
3672 * for the column header.
3674 maxarglen
= strlen("Argument");
3675 maxenvlen
= strlen("Env-variable");
3677 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3678 int arglen
= strlen(arginfo
->argv
);
3679 if (arginfo
->has_arg
) {
3680 arglen
+= strlen(arginfo
->example
) + 1;
3682 if (strlen(arginfo
->env
) > maxenvlen
) {
3683 maxenvlen
= strlen(arginfo
->env
);
3685 if (arglen
> maxarglen
) {
3690 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3691 maxenvlen
, "Env-variable");
3693 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3694 if (arginfo
->has_arg
) {
3695 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3696 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3697 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3699 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3700 maxenvlen
, arginfo
->env
,
3707 "QEMU_LD_PREFIX = %s\n"
3708 "QEMU_STACK_SIZE = %ld byte\n",
3713 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3714 "QEMU_UNSET_ENV environment variables to set and unset\n"
3715 "environment variables for the target process.\n"
3716 "It is possible to provide several variables by separating them\n"
3717 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3718 "provide the -E and -U options multiple times.\n"
3719 "The following lines are equivalent:\n"
3720 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3721 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3722 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3723 "Note that if you provide several changes to a single variable\n"
3724 "the last change will stay in effect.\n");
3729 static int parse_args(int argc
, char **argv
)
3733 const struct qemu_argument
*arginfo
;
3735 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3736 if (arginfo
->env
== NULL
) {
3740 r
= getenv(arginfo
->env
);
3742 arginfo
->handle_opt(r
);
3748 if (optind
>= argc
) {
3757 if (!strcmp(r
, "-")) {
3761 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3762 if (!strcmp(r
, arginfo
->argv
)) {
3763 if (arginfo
->has_arg
) {
3764 if (optind
>= argc
) {
3767 arginfo
->handle_opt(argv
[optind
]);
3770 arginfo
->handle_opt(NULL
);
3776 /* no option matched the current argv */
3777 if (arginfo
->handle_opt
== NULL
) {
3782 if (optind
>= argc
) {
3786 filename
= argv
[optind
];
3787 exec_path
= argv
[optind
];
3792 int main(int argc
, char **argv
, char **envp
)
3794 struct target_pt_regs regs1
, *regs
= ®s1
;
3795 struct image_info info1
, *info
= &info1
;
3796 struct linux_binprm bprm
;
3801 char **target_environ
, **wrk
;
3808 module_call_init(MODULE_INIT_QOM
);
3810 qemu_init_auxval(envp
);
3811 qemu_cache_utils_init();
3813 if ((envlist
= envlist_create()) == NULL
) {
3814 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3818 /* add current environment into the list */
3819 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3820 (void) envlist_setenv(envlist
, *wrk
);
3823 /* Read the stack limit from the kernel. If it's "unlimited",
3824 then we can do little else besides use the default. */
3827 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3828 && lim
.rlim_cur
!= RLIM_INFINITY
3829 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3830 guest_stack_size
= lim
.rlim_cur
;
3835 #if defined(cpudef_setup)
3836 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3839 optind
= parse_args(argc
, argv
);
3842 memset(regs
, 0, sizeof(struct target_pt_regs
));
3844 /* Zero out image_info */
3845 memset(info
, 0, sizeof(struct image_info
));
3847 memset(&bprm
, 0, sizeof (bprm
));
3849 /* Scan interp_prefix dir for replacement files. */
3850 init_paths(interp_prefix
);
3852 init_qemu_uname_release();
3854 if (cpu_model
== NULL
) {
3855 #if defined(TARGET_I386)
3856 #ifdef TARGET_X86_64
3857 cpu_model
= "qemu64";
3859 cpu_model
= "qemu32";
3861 #elif defined(TARGET_ARM)
3863 #elif defined(TARGET_UNICORE32)
3865 #elif defined(TARGET_M68K)
3867 #elif defined(TARGET_SPARC)
3868 #ifdef TARGET_SPARC64
3869 cpu_model
= "TI UltraSparc II";
3871 cpu_model
= "Fujitsu MB86904";
3873 #elif defined(TARGET_MIPS)
3874 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3879 #elif defined TARGET_OPENRISC
3880 cpu_model
= "or1200";
3881 #elif defined(TARGET_PPC)
3883 cpu_model
= "970fx";
3892 cpu_exec_init_all();
3893 /* NOTE: we need to init the CPU at this stage to get
3894 qemu_host_page_size */
3895 env
= cpu_init(cpu_model
);
3897 fprintf(stderr
, "Unable to find CPU definition\n");
3900 cpu
= ENV_GET_CPU(env
);
3905 if (getenv("QEMU_STRACE")) {
3909 target_environ
= envlist_to_environ(envlist
, NULL
);
3910 envlist_free(envlist
);
3912 #if defined(CONFIG_USE_GUEST_BASE)
3914 * Now that page sizes are configured in cpu_init() we can do
3915 * proper page alignment for guest_base.
3917 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3919 if (reserved_va
|| have_guest_base
) {
3920 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3922 if (guest_base
== (unsigned long)-1) {
3923 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3924 "space for use as guest address space (check your virtual "
3925 "memory ulimit setting or reserve less using -R option)\n",
3931 mmap_next_start
= reserved_va
;
3934 #endif /* CONFIG_USE_GUEST_BASE */
3937 * Read in mmap_min_addr kernel parameter. This value is used
3938 * When loading the ELF image to determine whether guest_base
3939 * is needed. It is also used in mmap_find_vma.
3944 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3946 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3947 mmap_min_addr
= tmp
;
3948 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
3955 * Prepare copy of argv vector for target.
3957 target_argc
= argc
- optind
;
3958 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
3959 if (target_argv
== NULL
) {
3960 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
3965 * If argv0 is specified (using '-0' switch) we replace
3966 * argv[0] pointer with the given one.
3969 if (argv0
!= NULL
) {
3970 target_argv
[i
++] = strdup(argv0
);
3972 for (; i
< target_argc
; i
++) {
3973 target_argv
[i
] = strdup(argv
[optind
+ i
]);
3975 target_argv
[target_argc
] = NULL
;
3977 ts
= g_malloc0 (sizeof(TaskState
));
3978 init_task_state(ts
);
3979 /* build Task State */
3985 execfd
= qemu_getauxval(AT_EXECFD
);
3987 execfd
= open(filename
, O_RDONLY
);
3989 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
3994 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
3997 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4001 for (wrk
= target_environ
; *wrk
; wrk
++) {
4005 free(target_environ
);
4007 if (qemu_log_enabled()) {
4008 #if defined(CONFIG_USE_GUEST_BASE)
4009 qemu_log("guest_base 0x%lx\n", guest_base
);
4013 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4014 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4015 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4017 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4019 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4020 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4022 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4023 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4026 target_set_brk(info
->brk
);
4030 #if defined(CONFIG_USE_GUEST_BASE)
4031 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4032 generating the prologue until now so that the prologue can take
4033 the real value of GUEST_BASE into account. */
4034 tcg_prologue_init(&tcg_ctx
);
4037 #if defined(TARGET_I386)
4038 cpu_x86_set_cpl(env
, 3);
4040 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4041 env
->hflags
|= HF_PE_MASK
;
4042 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4043 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4044 env
->hflags
|= HF_OSFXSR_MASK
;
4046 #ifndef TARGET_ABI32
4047 /* enable 64 bit mode if possible */
4048 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4049 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4052 env
->cr
[4] |= CR4_PAE_MASK
;
4053 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4054 env
->hflags
|= HF_LMA_MASK
;
4057 /* flags setup : we activate the IRQs by default as in user mode */
4058 env
->eflags
|= IF_MASK
;
4060 /* linux register setup */
4061 #ifndef TARGET_ABI32
4062 env
->regs
[R_EAX
] = regs
->rax
;
4063 env
->regs
[R_EBX
] = regs
->rbx
;
4064 env
->regs
[R_ECX
] = regs
->rcx
;
4065 env
->regs
[R_EDX
] = regs
->rdx
;
4066 env
->regs
[R_ESI
] = regs
->rsi
;
4067 env
->regs
[R_EDI
] = regs
->rdi
;
4068 env
->regs
[R_EBP
] = regs
->rbp
;
4069 env
->regs
[R_ESP
] = regs
->rsp
;
4070 env
->eip
= regs
->rip
;
4072 env
->regs
[R_EAX
] = regs
->eax
;
4073 env
->regs
[R_EBX
] = regs
->ebx
;
4074 env
->regs
[R_ECX
] = regs
->ecx
;
4075 env
->regs
[R_EDX
] = regs
->edx
;
4076 env
->regs
[R_ESI
] = regs
->esi
;
4077 env
->regs
[R_EDI
] = regs
->edi
;
4078 env
->regs
[R_EBP
] = regs
->ebp
;
4079 env
->regs
[R_ESP
] = regs
->esp
;
4080 env
->eip
= regs
->eip
;
4083 /* linux interrupt setup */
4084 #ifndef TARGET_ABI32
4085 env
->idt
.limit
= 511;
4087 env
->idt
.limit
= 255;
4089 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4090 PROT_READ
|PROT_WRITE
,
4091 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4092 idt_table
= g2h(env
->idt
.base
);
4115 /* linux segment setup */
4117 uint64_t *gdt_table
;
4118 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4119 PROT_READ
|PROT_WRITE
,
4120 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4121 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4122 gdt_table
= g2h(env
->gdt
.base
);
4124 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4125 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4126 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4128 /* 64 bit code segment */
4129 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4130 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4132 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4134 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4135 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4136 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4138 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4139 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4141 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4142 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4143 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4144 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4145 /* This hack makes Wine work... */
4146 env
->segs
[R_FS
].selector
= 0;
4148 cpu_x86_load_seg(env
, R_DS
, 0);
4149 cpu_x86_load_seg(env
, R_ES
, 0);
4150 cpu_x86_load_seg(env
, R_FS
, 0);
4151 cpu_x86_load_seg(env
, R_GS
, 0);
4153 #elif defined(TARGET_AARCH64)
4157 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4159 "The selected ARM CPU does not support 64 bit mode\n");
4163 for (i
= 0; i
< 31; i
++) {
4164 env
->xregs
[i
] = regs
->regs
[i
];
4167 env
->xregs
[31] = regs
->sp
;
4169 #elif defined(TARGET_ARM)
4172 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4173 for(i
= 0; i
< 16; i
++) {
4174 env
->regs
[i
] = regs
->uregs
[i
];
4177 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4178 && (info
->elf_flags
& EF_ARM_BE8
)) {
4179 env
->bswap_code
= 1;
4182 #elif defined(TARGET_UNICORE32)
4185 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4186 for (i
= 0; i
< 32; i
++) {
4187 env
->regs
[i
] = regs
->uregs
[i
];
4190 #elif defined(TARGET_SPARC)
4194 env
->npc
= regs
->npc
;
4196 for(i
= 0; i
< 8; i
++)
4197 env
->gregs
[i
] = regs
->u_regs
[i
];
4198 for(i
= 0; i
< 8; i
++)
4199 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4201 #elif defined(TARGET_PPC)
4205 #if defined(TARGET_PPC64)
4206 #if defined(TARGET_ABI32)
4207 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4209 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4212 env
->nip
= regs
->nip
;
4213 for(i
= 0; i
< 32; i
++) {
4214 env
->gpr
[i
] = regs
->gpr
[i
];
4217 #elif defined(TARGET_M68K)
4220 env
->dregs
[0] = regs
->d0
;
4221 env
->dregs
[1] = regs
->d1
;
4222 env
->dregs
[2] = regs
->d2
;
4223 env
->dregs
[3] = regs
->d3
;
4224 env
->dregs
[4] = regs
->d4
;
4225 env
->dregs
[5] = regs
->d5
;
4226 env
->dregs
[6] = regs
->d6
;
4227 env
->dregs
[7] = regs
->d7
;
4228 env
->aregs
[0] = regs
->a0
;
4229 env
->aregs
[1] = regs
->a1
;
4230 env
->aregs
[2] = regs
->a2
;
4231 env
->aregs
[3] = regs
->a3
;
4232 env
->aregs
[4] = regs
->a4
;
4233 env
->aregs
[5] = regs
->a5
;
4234 env
->aregs
[6] = regs
->a6
;
4235 env
->aregs
[7] = regs
->usp
;
4237 ts
->sim_syscalls
= 1;
4239 #elif defined(TARGET_MICROBLAZE)
4241 env
->regs
[0] = regs
->r0
;
4242 env
->regs
[1] = regs
->r1
;
4243 env
->regs
[2] = regs
->r2
;
4244 env
->regs
[3] = regs
->r3
;
4245 env
->regs
[4] = regs
->r4
;
4246 env
->regs
[5] = regs
->r5
;
4247 env
->regs
[6] = regs
->r6
;
4248 env
->regs
[7] = regs
->r7
;
4249 env
->regs
[8] = regs
->r8
;
4250 env
->regs
[9] = regs
->r9
;
4251 env
->regs
[10] = regs
->r10
;
4252 env
->regs
[11] = regs
->r11
;
4253 env
->regs
[12] = regs
->r12
;
4254 env
->regs
[13] = regs
->r13
;
4255 env
->regs
[14] = regs
->r14
;
4256 env
->regs
[15] = regs
->r15
;
4257 env
->regs
[16] = regs
->r16
;
4258 env
->regs
[17] = regs
->r17
;
4259 env
->regs
[18] = regs
->r18
;
4260 env
->regs
[19] = regs
->r19
;
4261 env
->regs
[20] = regs
->r20
;
4262 env
->regs
[21] = regs
->r21
;
4263 env
->regs
[22] = regs
->r22
;
4264 env
->regs
[23] = regs
->r23
;
4265 env
->regs
[24] = regs
->r24
;
4266 env
->regs
[25] = regs
->r25
;
4267 env
->regs
[26] = regs
->r26
;
4268 env
->regs
[27] = regs
->r27
;
4269 env
->regs
[28] = regs
->r28
;
4270 env
->regs
[29] = regs
->r29
;
4271 env
->regs
[30] = regs
->r30
;
4272 env
->regs
[31] = regs
->r31
;
4273 env
->sregs
[SR_PC
] = regs
->pc
;
4275 #elif defined(TARGET_MIPS)
4279 for(i
= 0; i
< 32; i
++) {
4280 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4282 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4283 if (regs
->cp0_epc
& 1) {
4284 env
->hflags
|= MIPS_HFLAG_M16
;
4287 #elif defined(TARGET_OPENRISC)
4291 for (i
= 0; i
< 32; i
++) {
4292 env
->gpr
[i
] = regs
->gpr
[i
];
4298 #elif defined(TARGET_SH4)
4302 for(i
= 0; i
< 16; i
++) {
4303 env
->gregs
[i
] = regs
->regs
[i
];
4307 #elif defined(TARGET_ALPHA)
4311 for(i
= 0; i
< 28; i
++) {
4312 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4314 env
->ir
[IR_SP
] = regs
->usp
;
4317 #elif defined(TARGET_CRIS)
4319 env
->regs
[0] = regs
->r0
;
4320 env
->regs
[1] = regs
->r1
;
4321 env
->regs
[2] = regs
->r2
;
4322 env
->regs
[3] = regs
->r3
;
4323 env
->regs
[4] = regs
->r4
;
4324 env
->regs
[5] = regs
->r5
;
4325 env
->regs
[6] = regs
->r6
;
4326 env
->regs
[7] = regs
->r7
;
4327 env
->regs
[8] = regs
->r8
;
4328 env
->regs
[9] = regs
->r9
;
4329 env
->regs
[10] = regs
->r10
;
4330 env
->regs
[11] = regs
->r11
;
4331 env
->regs
[12] = regs
->r12
;
4332 env
->regs
[13] = regs
->r13
;
4333 env
->regs
[14] = info
->start_stack
;
4334 env
->regs
[15] = regs
->acr
;
4335 env
->pc
= regs
->erp
;
4337 #elif defined(TARGET_S390X)
4340 for (i
= 0; i
< 16; i
++) {
4341 env
->regs
[i
] = regs
->gprs
[i
];
4343 env
->psw
.mask
= regs
->psw
.mask
;
4344 env
->psw
.addr
= regs
->psw
.addr
;
4347 #error unsupported target CPU
4350 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4351 ts
->stack_base
= info
->start_stack
;
4352 ts
->heap_base
= info
->brk
;
4353 /* This will be filled in on the first SYS_HEAPINFO call. */
4358 if (gdbserver_start(gdbstub_port
) < 0) {
4359 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4363 gdb_handlesig(cpu
, 0);