4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
44 static const char *cpu_model
;
45 unsigned long mmap_min_addr
;
46 #if defined(CONFIG_USE_GUEST_BASE)
47 unsigned long guest_base
;
49 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
51 * When running 32-on-64 we should make sure we can fit all of the possible
52 * guest address space into a contiguous chunk of virtual host memory.
54 * This way we will never overlap with our own libraries or binaries or stack
55 * or anything else that QEMU maps.
58 /* MIPS only supports 31 bits of virtual address space for user space */
59 unsigned long reserved_va
= 0x77000000;
61 unsigned long reserved_va
= 0xf7000000;
64 unsigned long reserved_va
;
68 static void usage(void);
70 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
71 const char *qemu_uname_release
;
73 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
74 we allocate a bigger stack. Need a better solution, for example
75 by remapping the process stack directly at the right place */
76 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
78 void gemu_log(const char *fmt
, ...)
83 vfprintf(stderr
, fmt
, ap
);
87 #if defined(TARGET_I386)
88 int cpu_get_pic_interrupt(CPUX86State
*env
)
94 /***********************************************************/
95 /* Helper routines for implementing atomic operations. */
97 /* To implement exclusive operations we force all cpus to syncronise.
98 We don't require a full sync, only that no cpus are executing guest code.
99 The alternative is to map target atomic ops onto host equivalents,
100 which requires quite a lot of per host/target work. */
101 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
102 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
104 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
105 static int pending_cpus
;
107 /* Make sure everything is in a consistent state for calling fork(). */
108 void fork_start(void)
110 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
111 pthread_mutex_lock(&exclusive_lock
);
115 void fork_end(int child
)
117 mmap_fork_end(child
);
119 CPUState
*cpu
, *next_cpu
;
120 /* Child processes created by fork() only have a single thread.
121 Discard information about the parent threads. */
122 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
123 if (cpu
!= thread_cpu
) {
124 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
128 pthread_mutex_init(&exclusive_lock
, NULL
);
129 pthread_mutex_init(&cpu_list_mutex
, NULL
);
130 pthread_cond_init(&exclusive_cond
, NULL
);
131 pthread_cond_init(&exclusive_resume
, NULL
);
132 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
133 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
135 pthread_mutex_unlock(&exclusive_lock
);
136 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
140 /* Wait for pending exclusive operations to complete. The exclusive lock
142 static inline void exclusive_idle(void)
144 while (pending_cpus
) {
145 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
149 /* Start an exclusive operation.
150 Must only be called from outside cpu_arm_exec. */
151 static inline void start_exclusive(void)
155 pthread_mutex_lock(&exclusive_lock
);
159 /* Make all other cpus stop executing. */
160 CPU_FOREACH(other_cpu
) {
161 if (other_cpu
->running
) {
166 if (pending_cpus
> 1) {
167 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
171 /* Finish an exclusive operation. */
172 static inline void __attribute__((unused
)) end_exclusive(void)
175 pthread_cond_broadcast(&exclusive_resume
);
176 pthread_mutex_unlock(&exclusive_lock
);
179 /* Wait for exclusive ops to finish, and begin cpu execution. */
180 static inline void cpu_exec_start(CPUState
*cpu
)
182 pthread_mutex_lock(&exclusive_lock
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Mark cpu as not executing, and release pending exclusive ops. */
189 static inline void cpu_exec_end(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
192 cpu
->running
= false;
193 if (pending_cpus
> 1) {
195 if (pending_cpus
== 1) {
196 pthread_cond_signal(&exclusive_cond
);
200 pthread_mutex_unlock(&exclusive_lock
);
203 void cpu_list_lock(void)
205 pthread_mutex_lock(&cpu_list_mutex
);
208 void cpu_list_unlock(void)
210 pthread_mutex_unlock(&cpu_list_mutex
);
215 /***********************************************************/
216 /* CPUX86 core interface */
218 void cpu_smm_update(CPUX86State
*env
)
222 uint64_t cpu_get_tsc(CPUX86State
*env
)
224 return cpu_get_real_ticks();
227 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
232 e1
= (addr
<< 16) | (limit
& 0xffff);
233 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
240 static uint64_t *idt_table
;
242 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
243 uint64_t addr
, unsigned int sel
)
246 e1
= (addr
& 0xffff) | (sel
<< 16);
247 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
251 p
[2] = tswap32(addr
>> 32);
254 /* only dpl matters as we do only user space emulation */
255 static void set_idt(int n
, unsigned int dpl
)
257 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
260 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
261 uint32_t addr
, unsigned int sel
)
264 e1
= (addr
& 0xffff) | (sel
<< 16);
265 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
271 /* only dpl matters as we do only user space emulation */
272 static void set_idt(int n
, unsigned int dpl
)
274 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
278 void cpu_loop(CPUX86State
*env
)
280 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
283 target_siginfo_t info
;
287 trapnr
= cpu_x86_exec(env
);
291 /* linux syscall from int $0x80 */
292 env
->regs
[R_EAX
] = do_syscall(env
,
304 /* linux syscall from syscall instruction */
305 env
->regs
[R_EAX
] = do_syscall(env
,
318 info
.si_signo
= TARGET_SIGBUS
;
320 info
.si_code
= TARGET_SI_KERNEL
;
321 info
._sifields
._sigfault
._addr
= 0;
322 queue_signal(env
, info
.si_signo
, &info
);
325 /* XXX: potential problem if ABI32 */
326 #ifndef TARGET_X86_64
327 if (env
->eflags
& VM_MASK
) {
328 handle_vm86_fault(env
);
332 info
.si_signo
= TARGET_SIGSEGV
;
334 info
.si_code
= TARGET_SI_KERNEL
;
335 info
._sifields
._sigfault
._addr
= 0;
336 queue_signal(env
, info
.si_signo
, &info
);
340 info
.si_signo
= TARGET_SIGSEGV
;
342 if (!(env
->error_code
& 1))
343 info
.si_code
= TARGET_SEGV_MAPERR
;
345 info
.si_code
= TARGET_SEGV_ACCERR
;
346 info
._sifields
._sigfault
._addr
= env
->cr
[2];
347 queue_signal(env
, info
.si_signo
, &info
);
350 #ifndef TARGET_X86_64
351 if (env
->eflags
& VM_MASK
) {
352 handle_vm86_trap(env
, trapnr
);
356 /* division by zero */
357 info
.si_signo
= TARGET_SIGFPE
;
359 info
.si_code
= TARGET_FPE_INTDIV
;
360 info
._sifields
._sigfault
._addr
= env
->eip
;
361 queue_signal(env
, info
.si_signo
, &info
);
366 #ifndef TARGET_X86_64
367 if (env
->eflags
& VM_MASK
) {
368 handle_vm86_trap(env
, trapnr
);
372 info
.si_signo
= TARGET_SIGTRAP
;
374 if (trapnr
== EXCP01_DB
) {
375 info
.si_code
= TARGET_TRAP_BRKPT
;
376 info
._sifields
._sigfault
._addr
= env
->eip
;
378 info
.si_code
= TARGET_SI_KERNEL
;
379 info
._sifields
._sigfault
._addr
= 0;
381 queue_signal(env
, info
.si_signo
, &info
);
386 #ifndef TARGET_X86_64
387 if (env
->eflags
& VM_MASK
) {
388 handle_vm86_trap(env
, trapnr
);
392 info
.si_signo
= TARGET_SIGSEGV
;
394 info
.si_code
= TARGET_SI_KERNEL
;
395 info
._sifields
._sigfault
._addr
= 0;
396 queue_signal(env
, info
.si_signo
, &info
);
400 info
.si_signo
= TARGET_SIGILL
;
402 info
.si_code
= TARGET_ILL_ILLOPN
;
403 info
._sifields
._sigfault
._addr
= env
->eip
;
404 queue_signal(env
, info
.si_signo
, &info
);
407 /* just indicate that signals should be handled asap */
413 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
418 info
.si_code
= TARGET_TRAP_BRKPT
;
419 queue_signal(env
, info
.si_signo
, &info
);
424 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
425 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
429 process_pending_signals(env
);
436 #define get_user_code_u32(x, gaddr, doswap) \
437 ({ abi_long __r = get_user_u32((x), (gaddr)); \
438 if (!__r && (doswap)) { \
444 #define get_user_code_u16(x, gaddr, doswap) \
445 ({ abi_long __r = get_user_u16((x), (gaddr)); \
446 if (!__r && (doswap)) { \
453 /* Commpage handling -- there is no commpage for AArch64 */
456 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
458 * r0 = pointer to oldval
459 * r1 = pointer to newval
460 * r2 = pointer to target value
463 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
464 * C set if *ptr was changed, clear if no exchange happened
466 * Note segv's in kernel helpers are a bit tricky, we can set the
467 * data address sensibly but the PC address is just the entry point.
469 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
471 uint64_t oldval
, newval
, val
;
473 target_siginfo_t info
;
475 /* Based on the 32 bit code in do_kernel_trap */
477 /* XXX: This only works between threads, not between processes.
478 It's probably possible to implement this with native host
479 operations. However things like ldrex/strex are much harder so
480 there's not much point trying. */
482 cpsr
= cpsr_read(env
);
485 if (get_user_u64(oldval
, env
->regs
[0])) {
486 env
->exception
.vaddress
= env
->regs
[0];
490 if (get_user_u64(newval
, env
->regs
[1])) {
491 env
->exception
.vaddress
= env
->regs
[1];
495 if (get_user_u64(val
, addr
)) {
496 env
->exception
.vaddress
= addr
;
503 if (put_user_u64(val
, addr
)) {
504 env
->exception
.vaddress
= addr
;
514 cpsr_write(env
, cpsr
, CPSR_C
);
520 /* We get the PC of the entry address - which is as good as anything,
521 on a real kernel what you get depends on which mode it uses. */
522 info
.si_signo
= TARGET_SIGSEGV
;
524 /* XXX: check env->error_code */
525 info
.si_code
= TARGET_SEGV_MAPERR
;
526 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
527 queue_signal(env
, info
.si_signo
, &info
);
532 /* Handle a jump to the kernel code page. */
534 do_kernel_trap(CPUARMState
*env
)
540 switch (env
->regs
[15]) {
541 case 0xffff0fa0: /* __kernel_memory_barrier */
542 /* ??? No-op. Will need to do better for SMP. */
544 case 0xffff0fc0: /* __kernel_cmpxchg */
545 /* XXX: This only works between threads, not between processes.
546 It's probably possible to implement this with native host
547 operations. However things like ldrex/strex are much harder so
548 there's not much point trying. */
550 cpsr
= cpsr_read(env
);
552 /* FIXME: This should SEGV if the access fails. */
553 if (get_user_u32(val
, addr
))
555 if (val
== env
->regs
[0]) {
557 /* FIXME: Check for segfaults. */
558 put_user_u32(val
, addr
);
565 cpsr_write(env
, cpsr
, CPSR_C
);
568 case 0xffff0fe0: /* __kernel_get_tls */
569 env
->regs
[0] = env
->cp15
.tpidrro_el
[0];
571 case 0xffff0f60: /* __kernel_cmpxchg64 */
572 arm_kernel_cmpxchg64_helper(env
);
578 /* Jump back to the caller. */
579 addr
= env
->regs
[14];
584 env
->regs
[15] = addr
;
589 /* Store exclusive handling for AArch32 */
590 static int do_strex(CPUARMState
*env
)
598 if (env
->exclusive_addr
!= env
->exclusive_test
) {
601 /* We know we're always AArch32 so the address is in uint32_t range
602 * unless it was the -1 exclusive-monitor-lost value (which won't
603 * match exclusive_test above).
605 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
606 addr
= env
->exclusive_addr
;
607 size
= env
->exclusive_info
& 0xf;
610 segv
= get_user_u8(val
, addr
);
613 segv
= get_user_u16(val
, addr
);
617 segv
= get_user_u32(val
, addr
);
623 env
->exception
.vaddress
= addr
;
628 segv
= get_user_u32(valhi
, addr
+ 4);
630 env
->exception
.vaddress
= addr
+ 4;
633 val
= deposit64(val
, 32, 32, valhi
);
635 if (val
!= env
->exclusive_val
) {
639 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
642 segv
= put_user_u8(val
, addr
);
645 segv
= put_user_u16(val
, addr
);
649 segv
= put_user_u32(val
, addr
);
653 env
->exception
.vaddress
= addr
;
657 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
658 segv
= put_user_u32(val
, addr
+ 4);
660 env
->exception
.vaddress
= addr
+ 4;
667 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
673 void cpu_loop(CPUARMState
*env
)
675 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
677 unsigned int n
, insn
;
678 target_siginfo_t info
;
683 trapnr
= cpu_arm_exec(env
);
688 TaskState
*ts
= cs
->opaque
;
692 /* we handle the FPU emulation here, as Linux */
693 /* we get the opcode */
694 /* FIXME - what to do if get_user() fails? */
695 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
697 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
698 if (rc
== 0) { /* illegal instruction */
699 info
.si_signo
= TARGET_SIGILL
;
701 info
.si_code
= TARGET_ILL_ILLOPN
;
702 info
._sifields
._sigfault
._addr
= env
->regs
[15];
703 queue_signal(env
, info
.si_signo
, &info
);
704 } else if (rc
< 0) { /* FP exception */
707 /* translate softfloat flags to FPSR flags */
708 if (-rc
& float_flag_invalid
)
710 if (-rc
& float_flag_divbyzero
)
712 if (-rc
& float_flag_overflow
)
714 if (-rc
& float_flag_underflow
)
716 if (-rc
& float_flag_inexact
)
719 FPSR fpsr
= ts
->fpa
.fpsr
;
720 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
722 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
723 info
.si_signo
= TARGET_SIGFPE
;
726 /* ordered by priority, least first */
727 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
728 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
729 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
730 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
731 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
733 info
._sifields
._sigfault
._addr
= env
->regs
[15];
734 queue_signal(env
, info
.si_signo
, &info
);
739 /* accumulate unenabled exceptions */
740 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
742 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
744 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
746 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
748 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
751 } else { /* everything OK */
762 if (trapnr
== EXCP_BKPT
) {
764 /* FIXME - what to do if get_user() fails? */
765 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
769 /* FIXME - what to do if get_user() fails? */
770 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
771 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
776 /* FIXME - what to do if get_user() fails? */
777 get_user_code_u16(insn
, env
->regs
[15] - 2,
781 /* FIXME - what to do if get_user() fails? */
782 get_user_code_u32(insn
, env
->regs
[15] - 4,
788 if (n
== ARM_NR_cacheflush
) {
790 } else if (n
== ARM_NR_semihosting
791 || n
== ARM_NR_thumb_semihosting
) {
792 env
->regs
[0] = do_arm_semihosting (env
);
793 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
795 if (env
->thumb
|| n
== 0) {
798 n
-= ARM_SYSCALL_BASE
;
801 if ( n
> ARM_NR_BASE
) {
803 case ARM_NR_cacheflush
:
807 cpu_set_tls(env
, env
->regs
[0]);
810 case ARM_NR_breakpoint
:
811 env
->regs
[15] -= env
->thumb
? 2 : 4;
814 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
816 env
->regs
[0] = -TARGET_ENOSYS
;
820 env
->regs
[0] = do_syscall(env
,
836 /* just indicate that signals should be handled asap */
839 if (!do_strex(env
)) {
842 /* fall through for segv */
843 case EXCP_PREFETCH_ABORT
:
844 case EXCP_DATA_ABORT
:
845 addr
= env
->exception
.vaddress
;
847 info
.si_signo
= TARGET_SIGSEGV
;
849 /* XXX: check env->error_code */
850 info
.si_code
= TARGET_SEGV_MAPERR
;
851 info
._sifields
._sigfault
._addr
= addr
;
852 queue_signal(env
, info
.si_signo
, &info
);
860 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
865 info
.si_code
= TARGET_TRAP_BRKPT
;
866 queue_signal(env
, info
.si_signo
, &info
);
870 case EXCP_KERNEL_TRAP
:
871 if (do_kernel_trap(env
))
876 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
878 cpu_dump_state(cs
, stderr
, fprintf
, 0);
881 process_pending_signals(env
);
888 * Handle AArch64 store-release exclusive
890 * rs = gets the status result of store exclusive
891 * rt = is the register that is stored
892 * rt2 = is the second register store (in STP)
895 static int do_strex_a64(CPUARMState
*env
)
906 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
907 size
= extract32(env
->exclusive_info
, 0, 2);
908 is_pair
= extract32(env
->exclusive_info
, 2, 1);
909 rs
= extract32(env
->exclusive_info
, 4, 5);
910 rt
= extract32(env
->exclusive_info
, 9, 5);
911 rt2
= extract32(env
->exclusive_info
, 14, 5);
913 addr
= env
->exclusive_addr
;
915 if (addr
!= env
->exclusive_test
) {
921 segv
= get_user_u8(val
, addr
);
924 segv
= get_user_u16(val
, addr
);
927 segv
= get_user_u32(val
, addr
);
930 segv
= get_user_u64(val
, addr
);
936 env
->exception
.vaddress
= addr
;
939 if (val
!= env
->exclusive_val
) {
944 segv
= get_user_u32(val
, addr
+ 4);
946 segv
= get_user_u64(val
, addr
+ 8);
949 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
952 if (val
!= env
->exclusive_high
) {
956 /* handle the zero register */
957 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
960 segv
= put_user_u8(val
, addr
);
963 segv
= put_user_u16(val
, addr
);
966 segv
= put_user_u32(val
, addr
);
969 segv
= put_user_u64(val
, addr
);
976 /* handle the zero register */
977 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
979 segv
= put_user_u32(val
, addr
+ 4);
981 segv
= put_user_u64(val
, addr
+ 8);
984 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
991 /* rs == 31 encodes a write to the ZR, thus throwing away
992 * the status return. This is rather silly but valid.
998 /* instruction faulted, PC does not advance */
999 /* either way a strex releases any exclusive lock we have */
1000 env
->exclusive_addr
= -1;
1005 /* AArch64 main loop */
1006 void cpu_loop(CPUARMState
*env
)
1008 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1010 target_siginfo_t info
;
1014 trapnr
= cpu_arm_exec(env
);
1019 env
->xregs
[0] = do_syscall(env
,
1029 case EXCP_INTERRUPT
:
1030 /* just indicate that signals should be handled asap */
1033 info
.si_signo
= TARGET_SIGILL
;
1035 info
.si_code
= TARGET_ILL_ILLOPN
;
1036 info
._sifields
._sigfault
._addr
= env
->pc
;
1037 queue_signal(env
, info
.si_signo
, &info
);
1040 if (!do_strex_a64(env
)) {
1043 /* fall through for segv */
1044 case EXCP_PREFETCH_ABORT
:
1045 case EXCP_DATA_ABORT
:
1046 info
.si_signo
= TARGET_SIGSEGV
;
1048 /* XXX: check env->error_code */
1049 info
.si_code
= TARGET_SEGV_MAPERR
;
1050 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1051 queue_signal(env
, info
.si_signo
, &info
);
1055 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1057 info
.si_signo
= sig
;
1059 info
.si_code
= TARGET_TRAP_BRKPT
;
1060 queue_signal(env
, info
.si_signo
, &info
);
1064 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1066 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1069 process_pending_signals(env
);
1070 /* Exception return on AArch64 always clears the exclusive monitor,
1071 * so any return to running guest code implies this.
1072 * A strex (successful or otherwise) also clears the monitor, so
1073 * we don't need to specialcase EXCP_STREX.
1075 env
->exclusive_addr
= -1;
1078 #endif /* ndef TARGET_ABI32 */
1082 #ifdef TARGET_UNICORE32
1084 void cpu_loop(CPUUniCore32State
*env
)
1086 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1088 unsigned int n
, insn
;
1089 target_siginfo_t info
;
1093 trapnr
= uc32_cpu_exec(env
);
1096 case UC32_EXCP_PRIV
:
1099 get_user_u32(insn
, env
->regs
[31] - 4);
1100 n
= insn
& 0xffffff;
1102 if (n
>= UC32_SYSCALL_BASE
) {
1104 n
-= UC32_SYSCALL_BASE
;
1105 if (n
== UC32_SYSCALL_NR_set_tls
) {
1106 cpu_set_tls(env
, env
->regs
[0]);
1109 env
->regs
[0] = do_syscall(env
,
1124 case UC32_EXCP_DTRAP
:
1125 case UC32_EXCP_ITRAP
:
1126 info
.si_signo
= TARGET_SIGSEGV
;
1128 /* XXX: check env->error_code */
1129 info
.si_code
= TARGET_SEGV_MAPERR
;
1130 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1131 queue_signal(env
, info
.si_signo
, &info
);
1133 case EXCP_INTERRUPT
:
1134 /* just indicate that signals should be handled asap */
1140 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1142 info
.si_signo
= sig
;
1144 info
.si_code
= TARGET_TRAP_BRKPT
;
1145 queue_signal(env
, info
.si_signo
, &info
);
1152 process_pending_signals(env
);
1156 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1157 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1163 #define SPARC64_STACK_BIAS 2047
1167 /* WARNING: dealing with register windows _is_ complicated. More info
1168 can be found at http://www.sics.se/~psm/sparcstack.html */
1169 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1171 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1172 /* wrap handling : if cwp is on the last window, then we use the
1173 registers 'after' the end */
1174 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1175 index
+= 16 * env
->nwindows
;
1179 /* save the register window 'cwp1' */
1180 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1185 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1186 #ifdef TARGET_SPARC64
1188 sp_ptr
+= SPARC64_STACK_BIAS
;
1190 #if defined(DEBUG_WIN)
1191 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1194 for(i
= 0; i
< 16; i
++) {
1195 /* FIXME - what to do if put_user() fails? */
1196 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1197 sp_ptr
+= sizeof(abi_ulong
);
1201 static void save_window(CPUSPARCState
*env
)
1203 #ifndef TARGET_SPARC64
1204 unsigned int new_wim
;
1205 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1206 ((1LL << env
->nwindows
) - 1);
1207 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1210 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1216 static void restore_window(CPUSPARCState
*env
)
1218 #ifndef TARGET_SPARC64
1219 unsigned int new_wim
;
1221 unsigned int i
, cwp1
;
1224 #ifndef TARGET_SPARC64
1225 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1226 ((1LL << env
->nwindows
) - 1);
1229 /* restore the invalid window */
1230 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1231 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1232 #ifdef TARGET_SPARC64
1234 sp_ptr
+= SPARC64_STACK_BIAS
;
1236 #if defined(DEBUG_WIN)
1237 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1240 for(i
= 0; i
< 16; i
++) {
1241 /* FIXME - what to do if get_user() fails? */
1242 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1243 sp_ptr
+= sizeof(abi_ulong
);
1245 #ifdef TARGET_SPARC64
1247 if (env
->cleanwin
< env
->nwindows
- 1)
1255 static void flush_windows(CPUSPARCState
*env
)
1261 /* if restore would invoke restore_window(), then we can stop */
1262 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1263 #ifndef TARGET_SPARC64
1264 if (env
->wim
& (1 << cwp1
))
1267 if (env
->canrestore
== 0)
1272 save_window_offset(env
, cwp1
);
1275 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1276 #ifndef TARGET_SPARC64
1277 /* set wim so that restore will reload the registers */
1278 env
->wim
= 1 << cwp1
;
1280 #if defined(DEBUG_WIN)
1281 printf("flush_windows: nb=%d\n", offset
- 1);
1285 void cpu_loop (CPUSPARCState
*env
)
1287 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1290 target_siginfo_t info
;
1294 trapnr
= cpu_sparc_exec (env
);
1297 /* Compute PSR before exposing state. */
1298 if (env
->cc_op
!= CC_OP_FLAGS
) {
1303 #ifndef TARGET_SPARC64
1310 ret
= do_syscall (env
, env
->gregs
[1],
1311 env
->regwptr
[0], env
->regwptr
[1],
1312 env
->regwptr
[2], env
->regwptr
[3],
1313 env
->regwptr
[4], env
->regwptr
[5],
1315 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1316 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1317 env
->xcc
|= PSR_CARRY
;
1319 env
->psr
|= PSR_CARRY
;
1323 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1324 env
->xcc
&= ~PSR_CARRY
;
1326 env
->psr
&= ~PSR_CARRY
;
1329 env
->regwptr
[0] = ret
;
1330 /* next instruction */
1332 env
->npc
= env
->npc
+ 4;
1334 case 0x83: /* flush windows */
1339 /* next instruction */
1341 env
->npc
= env
->npc
+ 4;
1343 #ifndef TARGET_SPARC64
1344 case TT_WIN_OVF
: /* window overflow */
1347 case TT_WIN_UNF
: /* window underflow */
1348 restore_window(env
);
1353 info
.si_signo
= TARGET_SIGSEGV
;
1355 /* XXX: check env->error_code */
1356 info
.si_code
= TARGET_SEGV_MAPERR
;
1357 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1358 queue_signal(env
, info
.si_signo
, &info
);
1362 case TT_SPILL
: /* window overflow */
1365 case TT_FILL
: /* window underflow */
1366 restore_window(env
);
1371 info
.si_signo
= TARGET_SIGSEGV
;
1373 /* XXX: check env->error_code */
1374 info
.si_code
= TARGET_SEGV_MAPERR
;
1375 if (trapnr
== TT_DFAULT
)
1376 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1378 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1379 queue_signal(env
, info
.si_signo
, &info
);
1382 #ifndef TARGET_ABI32
1385 sparc64_get_context(env
);
1389 sparc64_set_context(env
);
1393 case EXCP_INTERRUPT
:
1394 /* just indicate that signals should be handled asap */
1398 info
.si_signo
= TARGET_SIGILL
;
1400 info
.si_code
= TARGET_ILL_ILLOPC
;
1401 info
._sifields
._sigfault
._addr
= env
->pc
;
1402 queue_signal(env
, info
.si_signo
, &info
);
1409 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1412 info
.si_signo
= sig
;
1414 info
.si_code
= TARGET_TRAP_BRKPT
;
1415 queue_signal(env
, info
.si_signo
, &info
);
1420 printf ("Unhandled trap: 0x%x\n", trapnr
);
1421 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1424 process_pending_signals (env
);
1431 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1437 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1439 return cpu_ppc_get_tb(env
);
1442 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1444 return cpu_ppc_get_tb(env
) >> 32;
1447 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1449 return cpu_ppc_get_tb(env
);
1452 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1454 return cpu_ppc_get_tb(env
) >> 32;
1457 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1458 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1460 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1462 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1465 /* XXX: to be fixed */
1466 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1471 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1476 #define EXCP_DUMP(env, fmt, ...) \
1478 CPUState *cs = ENV_GET_CPU(env); \
1479 fprintf(stderr, fmt , ## __VA_ARGS__); \
1480 cpu_dump_state(cs, stderr, fprintf, 0); \
1481 qemu_log(fmt, ## __VA_ARGS__); \
1482 if (qemu_log_enabled()) { \
1483 log_cpu_state(cs, 0); \
1487 static int do_store_exclusive(CPUPPCState
*env
)
1490 target_ulong page_addr
;
1491 target_ulong val
, val2
__attribute__((unused
)) = 0;
1495 addr
= env
->reserve_ea
;
1496 page_addr
= addr
& TARGET_PAGE_MASK
;
1499 flags
= page_get_flags(page_addr
);
1500 if ((flags
& PAGE_READ
) == 0) {
1503 int reg
= env
->reserve_info
& 0x1f;
1504 int size
= env
->reserve_info
>> 5;
1507 if (addr
== env
->reserve_addr
) {
1509 case 1: segv
= get_user_u8(val
, addr
); break;
1510 case 2: segv
= get_user_u16(val
, addr
); break;
1511 case 4: segv
= get_user_u32(val
, addr
); break;
1512 #if defined(TARGET_PPC64)
1513 case 8: segv
= get_user_u64(val
, addr
); break;
1515 segv
= get_user_u64(val
, addr
);
1517 segv
= get_user_u64(val2
, addr
+ 8);
1524 if (!segv
&& val
== env
->reserve_val
) {
1525 val
= env
->gpr
[reg
];
1527 case 1: segv
= put_user_u8(val
, addr
); break;
1528 case 2: segv
= put_user_u16(val
, addr
); break;
1529 case 4: segv
= put_user_u32(val
, addr
); break;
1530 #if defined(TARGET_PPC64)
1531 case 8: segv
= put_user_u64(val
, addr
); break;
1533 if (val2
== env
->reserve_val2
) {
1536 val
= env
->gpr
[reg
+1];
1538 val2
= env
->gpr
[reg
+1];
1540 segv
= put_user_u64(val
, addr
);
1542 segv
= put_user_u64(val2
, addr
+ 8);
1555 env
->crf
[0] = (stored
<< 1) | xer_so
;
1556 env
->reserve_addr
= (target_ulong
)-1;
1566 void cpu_loop(CPUPPCState
*env
)
1568 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1569 target_siginfo_t info
;
1575 trapnr
= cpu_ppc_exec(env
);
1578 case POWERPC_EXCP_NONE
:
1581 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1582 cpu_abort(cs
, "Critical interrupt while in user mode. "
1585 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1586 cpu_abort(cs
, "Machine check exception while in user mode. "
1589 case POWERPC_EXCP_DSI
: /* Data storage exception */
1590 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1592 /* XXX: check this. Seems bugged */
1593 switch (env
->error_code
& 0xFF000000) {
1595 info
.si_signo
= TARGET_SIGSEGV
;
1597 info
.si_code
= TARGET_SEGV_MAPERR
;
1600 info
.si_signo
= TARGET_SIGILL
;
1602 info
.si_code
= TARGET_ILL_ILLADR
;
1605 info
.si_signo
= TARGET_SIGSEGV
;
1607 info
.si_code
= TARGET_SEGV_ACCERR
;
1610 /* Let's send a regular segfault... */
1611 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1613 info
.si_signo
= TARGET_SIGSEGV
;
1615 info
.si_code
= TARGET_SEGV_MAPERR
;
1618 info
._sifields
._sigfault
._addr
= env
->nip
;
1619 queue_signal(env
, info
.si_signo
, &info
);
1621 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1622 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1623 "\n", env
->spr
[SPR_SRR0
]);
1624 /* XXX: check this */
1625 switch (env
->error_code
& 0xFF000000) {
1627 info
.si_signo
= TARGET_SIGSEGV
;
1629 info
.si_code
= TARGET_SEGV_MAPERR
;
1633 info
.si_signo
= TARGET_SIGSEGV
;
1635 info
.si_code
= TARGET_SEGV_ACCERR
;
1638 /* Let's send a regular segfault... */
1639 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1641 info
.si_signo
= TARGET_SIGSEGV
;
1643 info
.si_code
= TARGET_SEGV_MAPERR
;
1646 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1647 queue_signal(env
, info
.si_signo
, &info
);
1649 case POWERPC_EXCP_EXTERNAL
: /* External input */
1650 cpu_abort(cs
, "External interrupt while in user mode. "
1653 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1654 EXCP_DUMP(env
, "Unaligned memory access\n");
1655 /* XXX: check this */
1656 info
.si_signo
= TARGET_SIGBUS
;
1658 info
.si_code
= TARGET_BUS_ADRALN
;
1659 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1660 queue_signal(env
, info
.si_signo
, &info
);
1662 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1663 /* XXX: check this */
1664 switch (env
->error_code
& ~0xF) {
1665 case POWERPC_EXCP_FP
:
1666 EXCP_DUMP(env
, "Floating point program exception\n");
1667 info
.si_signo
= TARGET_SIGFPE
;
1669 switch (env
->error_code
& 0xF) {
1670 case POWERPC_EXCP_FP_OX
:
1671 info
.si_code
= TARGET_FPE_FLTOVF
;
1673 case POWERPC_EXCP_FP_UX
:
1674 info
.si_code
= TARGET_FPE_FLTUND
;
1676 case POWERPC_EXCP_FP_ZX
:
1677 case POWERPC_EXCP_FP_VXZDZ
:
1678 info
.si_code
= TARGET_FPE_FLTDIV
;
1680 case POWERPC_EXCP_FP_XX
:
1681 info
.si_code
= TARGET_FPE_FLTRES
;
1683 case POWERPC_EXCP_FP_VXSOFT
:
1684 info
.si_code
= TARGET_FPE_FLTINV
;
1686 case POWERPC_EXCP_FP_VXSNAN
:
1687 case POWERPC_EXCP_FP_VXISI
:
1688 case POWERPC_EXCP_FP_VXIDI
:
1689 case POWERPC_EXCP_FP_VXIMZ
:
1690 case POWERPC_EXCP_FP_VXVC
:
1691 case POWERPC_EXCP_FP_VXSQRT
:
1692 case POWERPC_EXCP_FP_VXCVI
:
1693 info
.si_code
= TARGET_FPE_FLTSUB
;
1696 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1701 case POWERPC_EXCP_INVAL
:
1702 EXCP_DUMP(env
, "Invalid instruction\n");
1703 info
.si_signo
= TARGET_SIGILL
;
1705 switch (env
->error_code
& 0xF) {
1706 case POWERPC_EXCP_INVAL_INVAL
:
1707 info
.si_code
= TARGET_ILL_ILLOPC
;
1709 case POWERPC_EXCP_INVAL_LSWX
:
1710 info
.si_code
= TARGET_ILL_ILLOPN
;
1712 case POWERPC_EXCP_INVAL_SPR
:
1713 info
.si_code
= TARGET_ILL_PRVREG
;
1715 case POWERPC_EXCP_INVAL_FP
:
1716 info
.si_code
= TARGET_ILL_COPROC
;
1719 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1720 env
->error_code
& 0xF);
1721 info
.si_code
= TARGET_ILL_ILLADR
;
1725 case POWERPC_EXCP_PRIV
:
1726 EXCP_DUMP(env
, "Privilege violation\n");
1727 info
.si_signo
= TARGET_SIGILL
;
1729 switch (env
->error_code
& 0xF) {
1730 case POWERPC_EXCP_PRIV_OPC
:
1731 info
.si_code
= TARGET_ILL_PRVOPC
;
1733 case POWERPC_EXCP_PRIV_REG
:
1734 info
.si_code
= TARGET_ILL_PRVREG
;
1737 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1738 env
->error_code
& 0xF);
1739 info
.si_code
= TARGET_ILL_PRVOPC
;
1743 case POWERPC_EXCP_TRAP
:
1744 cpu_abort(cs
, "Tried to call a TRAP\n");
1747 /* Should not happen ! */
1748 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1752 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1753 queue_signal(env
, info
.si_signo
, &info
);
1755 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1756 EXCP_DUMP(env
, "No floating point allowed\n");
1757 info
.si_signo
= TARGET_SIGILL
;
1759 info
.si_code
= TARGET_ILL_COPROC
;
1760 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1761 queue_signal(env
, info
.si_signo
, &info
);
1763 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1764 cpu_abort(cs
, "Syscall exception while in user mode. "
1767 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1768 EXCP_DUMP(env
, "No APU instruction allowed\n");
1769 info
.si_signo
= TARGET_SIGILL
;
1771 info
.si_code
= TARGET_ILL_COPROC
;
1772 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1773 queue_signal(env
, info
.si_signo
, &info
);
1775 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1776 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1779 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1780 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1783 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1784 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1787 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1788 cpu_abort(cs
, "Data TLB exception while in user mode. "
1791 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1792 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1795 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1796 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1797 info
.si_signo
= TARGET_SIGILL
;
1799 info
.si_code
= TARGET_ILL_COPROC
;
1800 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1801 queue_signal(env
, info
.si_signo
, &info
);
1803 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1804 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1806 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1807 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1809 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1810 cpu_abort(cs
, "Performance monitor exception not handled\n");
1812 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1813 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1816 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1817 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1820 case POWERPC_EXCP_RESET
: /* System reset exception */
1821 cpu_abort(cs
, "Reset interrupt while in user mode. "
1824 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1825 cpu_abort(cs
, "Data segment exception while in user mode. "
1828 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1829 cpu_abort(cs
, "Instruction segment exception "
1830 "while in user mode. Aborting\n");
1832 /* PowerPC 64 with hypervisor mode support */
1833 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1834 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1835 "while in user mode. Aborting\n");
1837 case POWERPC_EXCP_TRACE
: /* Trace exception */
1839 * we use this exception to emulate step-by-step execution mode.
1842 /* PowerPC 64 with hypervisor mode support */
1843 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1844 cpu_abort(cs
, "Hypervisor data storage exception "
1845 "while in user mode. Aborting\n");
1847 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1848 cpu_abort(cs
, "Hypervisor instruction storage exception "
1849 "while in user mode. Aborting\n");
1851 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1852 cpu_abort(cs
, "Hypervisor data segment exception "
1853 "while in user mode. Aborting\n");
1855 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1856 cpu_abort(cs
, "Hypervisor instruction segment exception "
1857 "while in user mode. Aborting\n");
1859 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1860 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1861 info
.si_signo
= TARGET_SIGILL
;
1863 info
.si_code
= TARGET_ILL_COPROC
;
1864 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1865 queue_signal(env
, info
.si_signo
, &info
);
1867 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1868 cpu_abort(cs
, "Programmable interval timer interrupt "
1869 "while in user mode. Aborting\n");
1871 case POWERPC_EXCP_IO
: /* IO error exception */
1872 cpu_abort(cs
, "IO error exception while in user mode. "
1875 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1876 cpu_abort(cs
, "Run mode exception while in user mode. "
1879 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1880 cpu_abort(cs
, "Emulation trap exception not handled\n");
1882 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1883 cpu_abort(cs
, "Instruction fetch TLB exception "
1884 "while in user-mode. Aborting");
1886 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1887 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1890 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1891 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1894 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1895 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1897 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1898 cpu_abort(cs
, "Instruction address breakpoint exception "
1901 case POWERPC_EXCP_SMI
: /* System management interrupt */
1902 cpu_abort(cs
, "System management interrupt while in user mode. "
1905 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1906 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1909 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1910 cpu_abort(cs
, "Performance monitor exception not handled\n");
1912 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1913 cpu_abort(cs
, "Vector assist exception not handled\n");
1915 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1916 cpu_abort(cs
, "Soft patch exception not handled\n");
1918 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1919 cpu_abort(cs
, "Maintenance exception while in user mode. "
1922 case POWERPC_EXCP_STOP
: /* stop translation */
1923 /* We did invalidate the instruction cache. Go on */
1925 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1926 /* We just stopped because of a branch. Go on */
1928 case POWERPC_EXCP_SYSCALL_USER
:
1929 /* system call in user-mode emulation */
1931 * PPC ABI uses overflow flag in cr0 to signal an error
1934 env
->crf
[0] &= ~0x1;
1935 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1936 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1938 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1939 /* Returning from a successful sigreturn syscall.
1940 Avoid corrupting register state. */
1943 if (ret
> (target_ulong
)(-515)) {
1949 case POWERPC_EXCP_STCX
:
1950 if (do_store_exclusive(env
)) {
1951 info
.si_signo
= TARGET_SIGSEGV
;
1953 info
.si_code
= TARGET_SEGV_MAPERR
;
1954 info
._sifields
._sigfault
._addr
= env
->nip
;
1955 queue_signal(env
, info
.si_signo
, &info
);
1962 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1964 info
.si_signo
= sig
;
1966 info
.si_code
= TARGET_TRAP_BRKPT
;
1967 queue_signal(env
, info
.si_signo
, &info
);
1971 case EXCP_INTERRUPT
:
1972 /* just indicate that signals should be handled asap */
1975 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1978 process_pending_signals(env
);
1985 # ifdef TARGET_ABI_MIPSO32
1986 # define MIPS_SYS(name, args) args,
1987 static const uint8_t mips_syscall_args
[] = {
1988 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1989 MIPS_SYS(sys_exit
, 1)
1990 MIPS_SYS(sys_fork
, 0)
1991 MIPS_SYS(sys_read
, 3)
1992 MIPS_SYS(sys_write
, 3)
1993 MIPS_SYS(sys_open
, 3) /* 4005 */
1994 MIPS_SYS(sys_close
, 1)
1995 MIPS_SYS(sys_waitpid
, 3)
1996 MIPS_SYS(sys_creat
, 2)
1997 MIPS_SYS(sys_link
, 2)
1998 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1999 MIPS_SYS(sys_execve
, 0)
2000 MIPS_SYS(sys_chdir
, 1)
2001 MIPS_SYS(sys_time
, 1)
2002 MIPS_SYS(sys_mknod
, 3)
2003 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2004 MIPS_SYS(sys_lchown
, 3)
2005 MIPS_SYS(sys_ni_syscall
, 0)
2006 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2007 MIPS_SYS(sys_lseek
, 3)
2008 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2009 MIPS_SYS(sys_mount
, 5)
2010 MIPS_SYS(sys_umount
, 1)
2011 MIPS_SYS(sys_setuid
, 1)
2012 MIPS_SYS(sys_getuid
, 0)
2013 MIPS_SYS(sys_stime
, 1) /* 4025 */
2014 MIPS_SYS(sys_ptrace
, 4)
2015 MIPS_SYS(sys_alarm
, 1)
2016 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2017 MIPS_SYS(sys_pause
, 0)
2018 MIPS_SYS(sys_utime
, 2) /* 4030 */
2019 MIPS_SYS(sys_ni_syscall
, 0)
2020 MIPS_SYS(sys_ni_syscall
, 0)
2021 MIPS_SYS(sys_access
, 2)
2022 MIPS_SYS(sys_nice
, 1)
2023 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2024 MIPS_SYS(sys_sync
, 0)
2025 MIPS_SYS(sys_kill
, 2)
2026 MIPS_SYS(sys_rename
, 2)
2027 MIPS_SYS(sys_mkdir
, 2)
2028 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2029 MIPS_SYS(sys_dup
, 1)
2030 MIPS_SYS(sys_pipe
, 0)
2031 MIPS_SYS(sys_times
, 1)
2032 MIPS_SYS(sys_ni_syscall
, 0)
2033 MIPS_SYS(sys_brk
, 1) /* 4045 */
2034 MIPS_SYS(sys_setgid
, 1)
2035 MIPS_SYS(sys_getgid
, 0)
2036 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2037 MIPS_SYS(sys_geteuid
, 0)
2038 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2039 MIPS_SYS(sys_acct
, 0)
2040 MIPS_SYS(sys_umount2
, 2)
2041 MIPS_SYS(sys_ni_syscall
, 0)
2042 MIPS_SYS(sys_ioctl
, 3)
2043 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2044 MIPS_SYS(sys_ni_syscall
, 2)
2045 MIPS_SYS(sys_setpgid
, 2)
2046 MIPS_SYS(sys_ni_syscall
, 0)
2047 MIPS_SYS(sys_olduname
, 1)
2048 MIPS_SYS(sys_umask
, 1) /* 4060 */
2049 MIPS_SYS(sys_chroot
, 1)
2050 MIPS_SYS(sys_ustat
, 2)
2051 MIPS_SYS(sys_dup2
, 2)
2052 MIPS_SYS(sys_getppid
, 0)
2053 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2054 MIPS_SYS(sys_setsid
, 0)
2055 MIPS_SYS(sys_sigaction
, 3)
2056 MIPS_SYS(sys_sgetmask
, 0)
2057 MIPS_SYS(sys_ssetmask
, 1)
2058 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2059 MIPS_SYS(sys_setregid
, 2)
2060 MIPS_SYS(sys_sigsuspend
, 0)
2061 MIPS_SYS(sys_sigpending
, 1)
2062 MIPS_SYS(sys_sethostname
, 2)
2063 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2064 MIPS_SYS(sys_getrlimit
, 2)
2065 MIPS_SYS(sys_getrusage
, 2)
2066 MIPS_SYS(sys_gettimeofday
, 2)
2067 MIPS_SYS(sys_settimeofday
, 2)
2068 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2069 MIPS_SYS(sys_setgroups
, 2)
2070 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2071 MIPS_SYS(sys_symlink
, 2)
2072 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2073 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2074 MIPS_SYS(sys_uselib
, 1)
2075 MIPS_SYS(sys_swapon
, 2)
2076 MIPS_SYS(sys_reboot
, 3)
2077 MIPS_SYS(old_readdir
, 3)
2078 MIPS_SYS(old_mmap
, 6) /* 4090 */
2079 MIPS_SYS(sys_munmap
, 2)
2080 MIPS_SYS(sys_truncate
, 2)
2081 MIPS_SYS(sys_ftruncate
, 2)
2082 MIPS_SYS(sys_fchmod
, 2)
2083 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2084 MIPS_SYS(sys_getpriority
, 2)
2085 MIPS_SYS(sys_setpriority
, 3)
2086 MIPS_SYS(sys_ni_syscall
, 0)
2087 MIPS_SYS(sys_statfs
, 2)
2088 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2089 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2090 MIPS_SYS(sys_socketcall
, 2)
2091 MIPS_SYS(sys_syslog
, 3)
2092 MIPS_SYS(sys_setitimer
, 3)
2093 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2094 MIPS_SYS(sys_newstat
, 2)
2095 MIPS_SYS(sys_newlstat
, 2)
2096 MIPS_SYS(sys_newfstat
, 2)
2097 MIPS_SYS(sys_uname
, 1)
2098 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2099 MIPS_SYS(sys_vhangup
, 0)
2100 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2101 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2102 MIPS_SYS(sys_wait4
, 4)
2103 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2104 MIPS_SYS(sys_sysinfo
, 1)
2105 MIPS_SYS(sys_ipc
, 6)
2106 MIPS_SYS(sys_fsync
, 1)
2107 MIPS_SYS(sys_sigreturn
, 0)
2108 MIPS_SYS(sys_clone
, 6) /* 4120 */
2109 MIPS_SYS(sys_setdomainname
, 2)
2110 MIPS_SYS(sys_newuname
, 1)
2111 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2112 MIPS_SYS(sys_adjtimex
, 1)
2113 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2114 MIPS_SYS(sys_sigprocmask
, 3)
2115 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2116 MIPS_SYS(sys_init_module
, 5)
2117 MIPS_SYS(sys_delete_module
, 1)
2118 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2119 MIPS_SYS(sys_quotactl
, 0)
2120 MIPS_SYS(sys_getpgid
, 1)
2121 MIPS_SYS(sys_fchdir
, 1)
2122 MIPS_SYS(sys_bdflush
, 2)
2123 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2124 MIPS_SYS(sys_personality
, 1)
2125 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2126 MIPS_SYS(sys_setfsuid
, 1)
2127 MIPS_SYS(sys_setfsgid
, 1)
2128 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2129 MIPS_SYS(sys_getdents
, 3)
2130 MIPS_SYS(sys_select
, 5)
2131 MIPS_SYS(sys_flock
, 2)
2132 MIPS_SYS(sys_msync
, 3)
2133 MIPS_SYS(sys_readv
, 3) /* 4145 */
2134 MIPS_SYS(sys_writev
, 3)
2135 MIPS_SYS(sys_cacheflush
, 3)
2136 MIPS_SYS(sys_cachectl
, 3)
2137 MIPS_SYS(sys_sysmips
, 4)
2138 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2139 MIPS_SYS(sys_getsid
, 1)
2140 MIPS_SYS(sys_fdatasync
, 0)
2141 MIPS_SYS(sys_sysctl
, 1)
2142 MIPS_SYS(sys_mlock
, 2)
2143 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2144 MIPS_SYS(sys_mlockall
, 1)
2145 MIPS_SYS(sys_munlockall
, 0)
2146 MIPS_SYS(sys_sched_setparam
, 2)
2147 MIPS_SYS(sys_sched_getparam
, 2)
2148 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2149 MIPS_SYS(sys_sched_getscheduler
, 1)
2150 MIPS_SYS(sys_sched_yield
, 0)
2151 MIPS_SYS(sys_sched_get_priority_max
, 1)
2152 MIPS_SYS(sys_sched_get_priority_min
, 1)
2153 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2154 MIPS_SYS(sys_nanosleep
, 2)
2155 MIPS_SYS(sys_mremap
, 5)
2156 MIPS_SYS(sys_accept
, 3)
2157 MIPS_SYS(sys_bind
, 3)
2158 MIPS_SYS(sys_connect
, 3) /* 4170 */
2159 MIPS_SYS(sys_getpeername
, 3)
2160 MIPS_SYS(sys_getsockname
, 3)
2161 MIPS_SYS(sys_getsockopt
, 5)
2162 MIPS_SYS(sys_listen
, 2)
2163 MIPS_SYS(sys_recv
, 4) /* 4175 */
2164 MIPS_SYS(sys_recvfrom
, 6)
2165 MIPS_SYS(sys_recvmsg
, 3)
2166 MIPS_SYS(sys_send
, 4)
2167 MIPS_SYS(sys_sendmsg
, 3)
2168 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2169 MIPS_SYS(sys_setsockopt
, 5)
2170 MIPS_SYS(sys_shutdown
, 2)
2171 MIPS_SYS(sys_socket
, 3)
2172 MIPS_SYS(sys_socketpair
, 4)
2173 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2174 MIPS_SYS(sys_getresuid
, 3)
2175 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2176 MIPS_SYS(sys_poll
, 3)
2177 MIPS_SYS(sys_nfsservctl
, 3)
2178 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2179 MIPS_SYS(sys_getresgid
, 3)
2180 MIPS_SYS(sys_prctl
, 5)
2181 MIPS_SYS(sys_rt_sigreturn
, 0)
2182 MIPS_SYS(sys_rt_sigaction
, 4)
2183 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2184 MIPS_SYS(sys_rt_sigpending
, 2)
2185 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2186 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2187 MIPS_SYS(sys_rt_sigsuspend
, 0)
2188 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2189 MIPS_SYS(sys_pwrite64
, 6)
2190 MIPS_SYS(sys_chown
, 3)
2191 MIPS_SYS(sys_getcwd
, 2)
2192 MIPS_SYS(sys_capget
, 2)
2193 MIPS_SYS(sys_capset
, 2) /* 4205 */
2194 MIPS_SYS(sys_sigaltstack
, 2)
2195 MIPS_SYS(sys_sendfile
, 4)
2196 MIPS_SYS(sys_ni_syscall
, 0)
2197 MIPS_SYS(sys_ni_syscall
, 0)
2198 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2199 MIPS_SYS(sys_truncate64
, 4)
2200 MIPS_SYS(sys_ftruncate64
, 4)
2201 MIPS_SYS(sys_stat64
, 2)
2202 MIPS_SYS(sys_lstat64
, 2)
2203 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2204 MIPS_SYS(sys_pivot_root
, 2)
2205 MIPS_SYS(sys_mincore
, 3)
2206 MIPS_SYS(sys_madvise
, 3)
2207 MIPS_SYS(sys_getdents64
, 3)
2208 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2209 MIPS_SYS(sys_ni_syscall
, 0)
2210 MIPS_SYS(sys_gettid
, 0)
2211 MIPS_SYS(sys_readahead
, 5)
2212 MIPS_SYS(sys_setxattr
, 5)
2213 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2214 MIPS_SYS(sys_fsetxattr
, 5)
2215 MIPS_SYS(sys_getxattr
, 4)
2216 MIPS_SYS(sys_lgetxattr
, 4)
2217 MIPS_SYS(sys_fgetxattr
, 4)
2218 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2219 MIPS_SYS(sys_llistxattr
, 3)
2220 MIPS_SYS(sys_flistxattr
, 3)
2221 MIPS_SYS(sys_removexattr
, 2)
2222 MIPS_SYS(sys_lremovexattr
, 2)
2223 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2224 MIPS_SYS(sys_tkill
, 2)
2225 MIPS_SYS(sys_sendfile64
, 5)
2226 MIPS_SYS(sys_futex
, 6)
2227 MIPS_SYS(sys_sched_setaffinity
, 3)
2228 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2229 MIPS_SYS(sys_io_setup
, 2)
2230 MIPS_SYS(sys_io_destroy
, 1)
2231 MIPS_SYS(sys_io_getevents
, 5)
2232 MIPS_SYS(sys_io_submit
, 3)
2233 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2234 MIPS_SYS(sys_exit_group
, 1)
2235 MIPS_SYS(sys_lookup_dcookie
, 3)
2236 MIPS_SYS(sys_epoll_create
, 1)
2237 MIPS_SYS(sys_epoll_ctl
, 4)
2238 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2239 MIPS_SYS(sys_remap_file_pages
, 5)
2240 MIPS_SYS(sys_set_tid_address
, 1)
2241 MIPS_SYS(sys_restart_syscall
, 0)
2242 MIPS_SYS(sys_fadvise64_64
, 7)
2243 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2244 MIPS_SYS(sys_fstatfs64
, 2)
2245 MIPS_SYS(sys_timer_create
, 3)
2246 MIPS_SYS(sys_timer_settime
, 4)
2247 MIPS_SYS(sys_timer_gettime
, 2)
2248 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2249 MIPS_SYS(sys_timer_delete
, 1)
2250 MIPS_SYS(sys_clock_settime
, 2)
2251 MIPS_SYS(sys_clock_gettime
, 2)
2252 MIPS_SYS(sys_clock_getres
, 2)
2253 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2254 MIPS_SYS(sys_tgkill
, 3)
2255 MIPS_SYS(sys_utimes
, 2)
2256 MIPS_SYS(sys_mbind
, 4)
2257 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2258 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2259 MIPS_SYS(sys_mq_open
, 4)
2260 MIPS_SYS(sys_mq_unlink
, 1)
2261 MIPS_SYS(sys_mq_timedsend
, 5)
2262 MIPS_SYS(sys_mq_timedreceive
, 5)
2263 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2264 MIPS_SYS(sys_mq_getsetattr
, 3)
2265 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2266 MIPS_SYS(sys_waitid
, 4)
2267 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2268 MIPS_SYS(sys_add_key
, 5)
2269 MIPS_SYS(sys_request_key
, 4)
2270 MIPS_SYS(sys_keyctl
, 5)
2271 MIPS_SYS(sys_set_thread_area
, 1)
2272 MIPS_SYS(sys_inotify_init
, 0)
2273 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2274 MIPS_SYS(sys_inotify_rm_watch
, 2)
2275 MIPS_SYS(sys_migrate_pages
, 4)
2276 MIPS_SYS(sys_openat
, 4)
2277 MIPS_SYS(sys_mkdirat
, 3)
2278 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2279 MIPS_SYS(sys_fchownat
, 5)
2280 MIPS_SYS(sys_futimesat
, 3)
2281 MIPS_SYS(sys_fstatat64
, 4)
2282 MIPS_SYS(sys_unlinkat
, 3)
2283 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2284 MIPS_SYS(sys_linkat
, 5)
2285 MIPS_SYS(sys_symlinkat
, 3)
2286 MIPS_SYS(sys_readlinkat
, 4)
2287 MIPS_SYS(sys_fchmodat
, 3)
2288 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2289 MIPS_SYS(sys_pselect6
, 6)
2290 MIPS_SYS(sys_ppoll
, 5)
2291 MIPS_SYS(sys_unshare
, 1)
2292 MIPS_SYS(sys_splice
, 6)
2293 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2294 MIPS_SYS(sys_tee
, 4)
2295 MIPS_SYS(sys_vmsplice
, 4)
2296 MIPS_SYS(sys_move_pages
, 6)
2297 MIPS_SYS(sys_set_robust_list
, 2)
2298 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2299 MIPS_SYS(sys_kexec_load
, 4)
2300 MIPS_SYS(sys_getcpu
, 3)
2301 MIPS_SYS(sys_epoll_pwait
, 6)
2302 MIPS_SYS(sys_ioprio_set
, 3)
2303 MIPS_SYS(sys_ioprio_get
, 2)
2304 MIPS_SYS(sys_utimensat
, 4)
2305 MIPS_SYS(sys_signalfd
, 3)
2306 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2307 MIPS_SYS(sys_eventfd
, 1)
2308 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2309 MIPS_SYS(sys_timerfd_create
, 2)
2310 MIPS_SYS(sys_timerfd_gettime
, 2)
2311 MIPS_SYS(sys_timerfd_settime
, 4)
2312 MIPS_SYS(sys_signalfd4
, 4)
2313 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2314 MIPS_SYS(sys_epoll_create1
, 1)
2315 MIPS_SYS(sys_dup3
, 3)
2316 MIPS_SYS(sys_pipe2
, 2)
2317 MIPS_SYS(sys_inotify_init1
, 1)
2318 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2319 MIPS_SYS(sys_pwritev
, 6)
2320 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2321 MIPS_SYS(sys_perf_event_open
, 5)
2322 MIPS_SYS(sys_accept4
, 4)
2323 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2324 MIPS_SYS(sys_fanotify_init
, 2)
2325 MIPS_SYS(sys_fanotify_mark
, 6)
2326 MIPS_SYS(sys_prlimit64
, 4)
2327 MIPS_SYS(sys_name_to_handle_at
, 5)
2328 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2329 MIPS_SYS(sys_clock_adjtime
, 2)
2330 MIPS_SYS(sys_syncfs
, 1)
2335 static int do_store_exclusive(CPUMIPSState
*env
)
2338 target_ulong page_addr
;
2346 page_addr
= addr
& TARGET_PAGE_MASK
;
2349 flags
= page_get_flags(page_addr
);
2350 if ((flags
& PAGE_READ
) == 0) {
2353 reg
= env
->llreg
& 0x1f;
2354 d
= (env
->llreg
& 0x20) != 0;
2356 segv
= get_user_s64(val
, addr
);
2358 segv
= get_user_s32(val
, addr
);
2361 if (val
!= env
->llval
) {
2362 env
->active_tc
.gpr
[reg
] = 0;
2365 segv
= put_user_u64(env
->llnewval
, addr
);
2367 segv
= put_user_u32(env
->llnewval
, addr
);
2370 env
->active_tc
.gpr
[reg
] = 1;
2377 env
->active_tc
.PC
+= 4;
2390 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2398 info
->si_signo
= TARGET_SIGFPE
;
2400 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2401 queue_signal(env
, info
->si_signo
, &*info
);
2405 info
->si_signo
= TARGET_SIGTRAP
;
2407 queue_signal(env
, info
->si_signo
, &*info
);
2415 void cpu_loop(CPUMIPSState
*env
)
2417 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2418 target_siginfo_t info
;
2421 # ifdef TARGET_ABI_MIPSO32
2422 unsigned int syscall_num
;
2427 trapnr
= cpu_mips_exec(env
);
2431 env
->active_tc
.PC
+= 4;
2432 # ifdef TARGET_ABI_MIPSO32
2433 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2434 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2435 ret
= -TARGET_ENOSYS
;
2439 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2441 nb_args
= mips_syscall_args
[syscall_num
];
2442 sp_reg
= env
->active_tc
.gpr
[29];
2444 /* these arguments are taken from the stack */
2446 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2450 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2454 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2458 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2464 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2465 env
->active_tc
.gpr
[4],
2466 env
->active_tc
.gpr
[5],
2467 env
->active_tc
.gpr
[6],
2468 env
->active_tc
.gpr
[7],
2469 arg5
, arg6
, arg7
, arg8
);
2473 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2474 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2475 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2476 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2477 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2479 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2480 /* Returning from a successful sigreturn syscall.
2481 Avoid clobbering register state. */
2484 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2485 env
->active_tc
.gpr
[7] = 1; /* error flag */
2488 env
->active_tc
.gpr
[7] = 0; /* error flag */
2490 env
->active_tc
.gpr
[2] = ret
;
2496 info
.si_signo
= TARGET_SIGSEGV
;
2498 /* XXX: check env->error_code */
2499 info
.si_code
= TARGET_SEGV_MAPERR
;
2500 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2501 queue_signal(env
, info
.si_signo
, &info
);
2505 info
.si_signo
= TARGET_SIGILL
;
2508 queue_signal(env
, info
.si_signo
, &info
);
2510 case EXCP_INTERRUPT
:
2511 /* just indicate that signals should be handled asap */
2517 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2520 info
.si_signo
= sig
;
2522 info
.si_code
= TARGET_TRAP_BRKPT
;
2523 queue_signal(env
, info
.si_signo
, &info
);
2528 if (do_store_exclusive(env
)) {
2529 info
.si_signo
= TARGET_SIGSEGV
;
2531 info
.si_code
= TARGET_SEGV_MAPERR
;
2532 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2533 queue_signal(env
, info
.si_signo
, &info
);
2537 info
.si_signo
= TARGET_SIGILL
;
2539 info
.si_code
= TARGET_ILL_ILLOPC
;
2540 queue_signal(env
, info
.si_signo
, &info
);
2542 /* The code below was inspired by the MIPS Linux kernel trap
2543 * handling code in arch/mips/kernel/traps.c.
2547 abi_ulong trap_instr
;
2550 if (env
->hflags
& MIPS_HFLAG_M16
) {
2551 if (env
->insn_flags
& ASE_MICROMIPS
) {
2552 /* microMIPS mode */
2553 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2558 if ((trap_instr
>> 10) == 0x11) {
2559 /* 16-bit instruction */
2560 code
= trap_instr
& 0xf;
2562 /* 32-bit instruction */
2565 ret
= get_user_u16(instr_lo
,
2566 env
->active_tc
.PC
+ 2);
2570 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2571 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2572 /* Unfortunately, microMIPS also suffers from
2573 the old assembler bug... */
2574 if (code
>= (1 << 10)) {
2580 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2584 code
= (trap_instr
>> 6) & 0x3f;
2587 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2592 /* As described in the original Linux kernel code, the
2593 * below checks on 'code' are to work around an old
2596 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2597 if (code
>= (1 << 10)) {
2602 if (do_break(env
, &info
, code
) != 0) {
2609 abi_ulong trap_instr
;
2610 unsigned int code
= 0;
2612 if (env
->hflags
& MIPS_HFLAG_M16
) {
2613 /* microMIPS mode */
2616 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2617 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2619 trap_instr
= (instr
[0] << 16) | instr
[1];
2621 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2628 /* The immediate versions don't provide a code. */
2629 if (!(trap_instr
& 0xFC000000)) {
2630 if (env
->hflags
& MIPS_HFLAG_M16
) {
2631 /* microMIPS mode */
2632 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2634 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2638 if (do_break(env
, &info
, code
) != 0) {
2645 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2647 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2650 process_pending_signals(env
);
2655 #ifdef TARGET_OPENRISC
2657 void cpu_loop(CPUOpenRISCState
*env
)
2659 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2664 trapnr
= cpu_exec(env
);
2670 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2674 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2675 gdbsig
= TARGET_SIGBUS
;
2679 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2680 gdbsig
= TARGET_SIGSEGV
;
2683 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2686 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2687 gdbsig
= TARGET_SIGBUS
;
2690 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2691 gdbsig
= TARGET_SIGILL
;
2694 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2698 qemu_log("\nTLB miss\n");
2701 qemu_log("\nRange\n");
2702 gdbsig
= TARGET_SIGSEGV
;
2705 env
->pc
+= 4; /* 0xc00; */
2706 env
->gpr
[11] = do_syscall(env
,
2707 env
->gpr
[11], /* return value */
2708 env
->gpr
[3], /* r3 - r7 are params */
2716 qemu_log("\nFloating point error\n");
2719 qemu_log("\nTrap\n");
2720 gdbsig
= TARGET_SIGTRAP
;
2726 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2728 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2729 gdbsig
= TARGET_SIGILL
;
2733 gdb_handlesig(cs
, gdbsig
);
2734 if (gdbsig
!= TARGET_SIGTRAP
) {
2739 process_pending_signals(env
);
2743 #endif /* TARGET_OPENRISC */
2746 void cpu_loop(CPUSH4State
*env
)
2748 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2750 target_siginfo_t info
;
2754 trapnr
= cpu_sh4_exec (env
);
2760 ret
= do_syscall(env
,
2769 env
->gregs
[0] = ret
;
2771 case EXCP_INTERRUPT
:
2772 /* just indicate that signals should be handled asap */
2778 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2781 info
.si_signo
= sig
;
2783 info
.si_code
= TARGET_TRAP_BRKPT
;
2784 queue_signal(env
, info
.si_signo
, &info
);
2790 info
.si_signo
= TARGET_SIGSEGV
;
2792 info
.si_code
= TARGET_SEGV_MAPERR
;
2793 info
._sifields
._sigfault
._addr
= env
->tea
;
2794 queue_signal(env
, info
.si_signo
, &info
);
2798 printf ("Unhandled trap: 0x%x\n", trapnr
);
2799 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2802 process_pending_signals (env
);
2808 void cpu_loop(CPUCRISState
*env
)
2810 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2812 target_siginfo_t info
;
2816 trapnr
= cpu_cris_exec (env
);
2821 info
.si_signo
= TARGET_SIGSEGV
;
2823 /* XXX: check env->error_code */
2824 info
.si_code
= TARGET_SEGV_MAPERR
;
2825 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2826 queue_signal(env
, info
.si_signo
, &info
);
2829 case EXCP_INTERRUPT
:
2830 /* just indicate that signals should be handled asap */
2833 ret
= do_syscall(env
,
2842 env
->regs
[10] = ret
;
2848 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2851 info
.si_signo
= sig
;
2853 info
.si_code
= TARGET_TRAP_BRKPT
;
2854 queue_signal(env
, info
.si_signo
, &info
);
2859 printf ("Unhandled trap: 0x%x\n", trapnr
);
2860 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2863 process_pending_signals (env
);
2868 #ifdef TARGET_MICROBLAZE
2869 void cpu_loop(CPUMBState
*env
)
2871 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2873 target_siginfo_t info
;
2877 trapnr
= cpu_mb_exec (env
);
2882 info
.si_signo
= TARGET_SIGSEGV
;
2884 /* XXX: check env->error_code */
2885 info
.si_code
= TARGET_SEGV_MAPERR
;
2886 info
._sifields
._sigfault
._addr
= 0;
2887 queue_signal(env
, info
.si_signo
, &info
);
2890 case EXCP_INTERRUPT
:
2891 /* just indicate that signals should be handled asap */
2894 /* Return address is 4 bytes after the call. */
2896 env
->sregs
[SR_PC
] = env
->regs
[14];
2897 ret
= do_syscall(env
,
2909 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2910 if (env
->iflags
& D_FLAG
) {
2911 env
->sregs
[SR_ESR
] |= 1 << 12;
2912 env
->sregs
[SR_PC
] -= 4;
2913 /* FIXME: if branch was immed, replay the imm as well. */
2916 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2918 switch (env
->sregs
[SR_ESR
] & 31) {
2919 case ESR_EC_DIVZERO
:
2920 info
.si_signo
= TARGET_SIGFPE
;
2922 info
.si_code
= TARGET_FPE_FLTDIV
;
2923 info
._sifields
._sigfault
._addr
= 0;
2924 queue_signal(env
, info
.si_signo
, &info
);
2927 info
.si_signo
= TARGET_SIGFPE
;
2929 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2930 info
.si_code
= TARGET_FPE_FLTINV
;
2932 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2933 info
.si_code
= TARGET_FPE_FLTDIV
;
2935 info
._sifields
._sigfault
._addr
= 0;
2936 queue_signal(env
, info
.si_signo
, &info
);
2939 printf ("Unhandled hw-exception: 0x%x\n",
2940 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2941 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2950 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2953 info
.si_signo
= sig
;
2955 info
.si_code
= TARGET_TRAP_BRKPT
;
2956 queue_signal(env
, info
.si_signo
, &info
);
2961 printf ("Unhandled trap: 0x%x\n", trapnr
);
2962 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2965 process_pending_signals (env
);
2972 void cpu_loop(CPUM68KState
*env
)
2974 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2977 target_siginfo_t info
;
2978 TaskState
*ts
= cs
->opaque
;
2982 trapnr
= cpu_m68k_exec(env
);
2987 if (ts
->sim_syscalls
) {
2989 get_user_u16(nr
, env
->pc
+ 2);
2991 do_m68k_simcall(env
, nr
);
2997 case EXCP_HALT_INSN
:
2998 /* Semihosing syscall. */
3000 do_m68k_semihosting(env
, env
->dregs
[0]);
3004 case EXCP_UNSUPPORTED
:
3006 info
.si_signo
= TARGET_SIGILL
;
3008 info
.si_code
= TARGET_ILL_ILLOPN
;
3009 info
._sifields
._sigfault
._addr
= env
->pc
;
3010 queue_signal(env
, info
.si_signo
, &info
);
3014 ts
->sim_syscalls
= 0;
3017 env
->dregs
[0] = do_syscall(env
,
3028 case EXCP_INTERRUPT
:
3029 /* just indicate that signals should be handled asap */
3033 info
.si_signo
= TARGET_SIGSEGV
;
3035 /* XXX: check env->error_code */
3036 info
.si_code
= TARGET_SEGV_MAPERR
;
3037 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3038 queue_signal(env
, info
.si_signo
, &info
);
3045 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3048 info
.si_signo
= sig
;
3050 info
.si_code
= TARGET_TRAP_BRKPT
;
3051 queue_signal(env
, info
.si_signo
, &info
);
3056 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3058 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3061 process_pending_signals(env
);
3064 #endif /* TARGET_M68K */
3067 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3069 target_ulong addr
, val
, tmp
;
3070 target_siginfo_t info
;
3073 addr
= env
->lock_addr
;
3074 tmp
= env
->lock_st_addr
;
3075 env
->lock_addr
= -1;
3076 env
->lock_st_addr
= 0;
3082 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3086 if (val
== env
->lock_value
) {
3088 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3105 info
.si_signo
= TARGET_SIGSEGV
;
3107 info
.si_code
= TARGET_SEGV_MAPERR
;
3108 info
._sifields
._sigfault
._addr
= addr
;
3109 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3112 void cpu_loop(CPUAlphaState
*env
)
3114 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3116 target_siginfo_t info
;
3121 trapnr
= cpu_alpha_exec (env
);
3124 /* All of the traps imply a transition through PALcode, which
3125 implies an REI instruction has been executed. Which means
3126 that the intr_flag should be cleared. */
3131 fprintf(stderr
, "Reset requested. Exit\n");
3135 fprintf(stderr
, "Machine check exception. Exit\n");
3138 case EXCP_SMP_INTERRUPT
:
3139 case EXCP_CLK_INTERRUPT
:
3140 case EXCP_DEV_INTERRUPT
:
3141 fprintf(stderr
, "External interrupt. Exit\n");
3145 env
->lock_addr
= -1;
3146 info
.si_signo
= TARGET_SIGSEGV
;
3148 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3149 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3150 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3151 queue_signal(env
, info
.si_signo
, &info
);
3154 env
->lock_addr
= -1;
3155 info
.si_signo
= TARGET_SIGBUS
;
3157 info
.si_code
= TARGET_BUS_ADRALN
;
3158 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3159 queue_signal(env
, info
.si_signo
, &info
);
3163 env
->lock_addr
= -1;
3164 info
.si_signo
= TARGET_SIGILL
;
3166 info
.si_code
= TARGET_ILL_ILLOPC
;
3167 info
._sifields
._sigfault
._addr
= env
->pc
;
3168 queue_signal(env
, info
.si_signo
, &info
);
3171 env
->lock_addr
= -1;
3172 info
.si_signo
= TARGET_SIGFPE
;
3174 info
.si_code
= TARGET_FPE_FLTINV
;
3175 info
._sifields
._sigfault
._addr
= env
->pc
;
3176 queue_signal(env
, info
.si_signo
, &info
);
3179 /* No-op. Linux simply re-enables the FPU. */
3182 env
->lock_addr
= -1;
3183 switch (env
->error_code
) {
3186 info
.si_signo
= TARGET_SIGTRAP
;
3188 info
.si_code
= TARGET_TRAP_BRKPT
;
3189 info
._sifields
._sigfault
._addr
= env
->pc
;
3190 queue_signal(env
, info
.si_signo
, &info
);
3194 info
.si_signo
= TARGET_SIGTRAP
;
3197 info
._sifields
._sigfault
._addr
= env
->pc
;
3198 queue_signal(env
, info
.si_signo
, &info
);
3202 trapnr
= env
->ir
[IR_V0
];
3203 sysret
= do_syscall(env
, trapnr
,
3204 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3205 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3206 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3208 if (trapnr
== TARGET_NR_sigreturn
3209 || trapnr
== TARGET_NR_rt_sigreturn
) {
3212 /* Syscall writes 0 to V0 to bypass error check, similar
3213 to how this is handled internal to Linux kernel.
3214 (Ab)use trapnr temporarily as boolean indicating error. */
3215 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3216 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3217 env
->ir
[IR_A3
] = trapnr
;
3221 /* ??? We can probably elide the code using page_unprotect
3222 that is checking for self-modifying code. Instead we
3223 could simply call tb_flush here. Until we work out the
3224 changes required to turn off the extra write protection,
3225 this can be a no-op. */
3229 /* Handled in the translator for usermode. */
3233 /* Handled in the translator for usermode. */
3237 info
.si_signo
= TARGET_SIGFPE
;
3238 switch (env
->ir
[IR_A0
]) {
3239 case TARGET_GEN_INTOVF
:
3240 info
.si_code
= TARGET_FPE_INTOVF
;
3242 case TARGET_GEN_INTDIV
:
3243 info
.si_code
= TARGET_FPE_INTDIV
;
3245 case TARGET_GEN_FLTOVF
:
3246 info
.si_code
= TARGET_FPE_FLTOVF
;
3248 case TARGET_GEN_FLTUND
:
3249 info
.si_code
= TARGET_FPE_FLTUND
;
3251 case TARGET_GEN_FLTINV
:
3252 info
.si_code
= TARGET_FPE_FLTINV
;
3254 case TARGET_GEN_FLTINE
:
3255 info
.si_code
= TARGET_FPE_FLTRES
;
3257 case TARGET_GEN_ROPRAND
:
3261 info
.si_signo
= TARGET_SIGTRAP
;
3266 info
._sifields
._sigfault
._addr
= env
->pc
;
3267 queue_signal(env
, info
.si_signo
, &info
);
3274 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3275 if (info
.si_signo
) {
3276 env
->lock_addr
= -1;
3278 info
.si_code
= TARGET_TRAP_BRKPT
;
3279 queue_signal(env
, info
.si_signo
, &info
);
3284 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3286 case EXCP_INTERRUPT
:
3287 /* Just indicate that signals should be handled asap. */
3290 printf ("Unhandled trap: 0x%x\n", trapnr
);
3291 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3294 process_pending_signals (env
);
3297 #endif /* TARGET_ALPHA */
3300 void cpu_loop(CPUS390XState
*env
)
3302 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3304 target_siginfo_t info
;
3309 trapnr
= cpu_s390x_exec(env
);
3312 case EXCP_INTERRUPT
:
3313 /* Just indicate that signals should be handled asap. */
3317 n
= env
->int_svc_code
;
3319 /* syscalls > 255 */
3322 env
->psw
.addr
+= env
->int_svc_ilen
;
3323 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3324 env
->regs
[4], env
->regs
[5],
3325 env
->regs
[6], env
->regs
[7], 0, 0);
3329 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3331 n
= TARGET_TRAP_BRKPT
;
3336 n
= env
->int_pgm_code
;
3339 case PGM_PRIVILEGED
:
3340 sig
= TARGET_SIGILL
;
3341 n
= TARGET_ILL_ILLOPC
;
3343 case PGM_PROTECTION
:
3344 case PGM_ADDRESSING
:
3345 sig
= TARGET_SIGSEGV
;
3346 /* XXX: check env->error_code */
3347 n
= TARGET_SEGV_MAPERR
;
3348 addr
= env
->__excp_addr
;
3351 case PGM_SPECIFICATION
:
3352 case PGM_SPECIAL_OP
:
3355 sig
= TARGET_SIGILL
;
3356 n
= TARGET_ILL_ILLOPN
;
3359 case PGM_FIXPT_OVERFLOW
:
3360 sig
= TARGET_SIGFPE
;
3361 n
= TARGET_FPE_INTOVF
;
3363 case PGM_FIXPT_DIVIDE
:
3364 sig
= TARGET_SIGFPE
;
3365 n
= TARGET_FPE_INTDIV
;
3369 n
= (env
->fpc
>> 8) & 0xff;
3371 /* compare-and-trap */
3374 /* An IEEE exception, simulated or otherwise. */
3376 n
= TARGET_FPE_FLTINV
;
3377 } else if (n
& 0x40) {
3378 n
= TARGET_FPE_FLTDIV
;
3379 } else if (n
& 0x20) {
3380 n
= TARGET_FPE_FLTOVF
;
3381 } else if (n
& 0x10) {
3382 n
= TARGET_FPE_FLTUND
;
3383 } else if (n
& 0x08) {
3384 n
= TARGET_FPE_FLTRES
;
3386 /* ??? Quantum exception; BFP, DFP error. */
3389 sig
= TARGET_SIGFPE
;
3394 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3395 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3401 addr
= env
->psw
.addr
;
3403 info
.si_signo
= sig
;
3406 info
._sifields
._sigfault
._addr
= addr
;
3407 queue_signal(env
, info
.si_signo
, &info
);
3411 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3412 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3415 process_pending_signals (env
);
3419 #endif /* TARGET_S390X */
3421 THREAD CPUState
*thread_cpu
;
3423 void task_settid(TaskState
*ts
)
3425 if (ts
->ts_tid
== 0) {
3426 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3430 void stop_all_tasks(void)
3433 * We trust that when using NPTL, start_exclusive()
3434 * handles thread stopping correctly.
3439 /* Assumes contents are already zeroed. */
3440 void init_task_state(TaskState
*ts
)
3445 ts
->first_free
= ts
->sigqueue_table
;
3446 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3447 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3449 ts
->sigqueue_table
[i
].next
= NULL
;
3452 CPUArchState
*cpu_copy(CPUArchState
*env
)
3454 CPUState
*cpu
= ENV_GET_CPU(env
);
3455 CPUArchState
*new_env
= cpu_init(cpu_model
);
3461 fprintf(stderr
, "cpu_copy: Failed to create new CPU\n");
3465 new_cpu
= ENV_GET_CPU(new_env
);
3467 /* Reset non arch specific state */
3470 memcpy(new_env
, env
, sizeof(CPUArchState
));
3472 /* Clone all break/watchpoints.
3473 Note: Once we support ptrace with hw-debug register access, make sure
3474 BP_CPU break/watchpoints are handled correctly on clone. */
3475 QTAILQ_INIT(&cpu
->breakpoints
);
3476 QTAILQ_INIT(&cpu
->watchpoints
);
3477 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3478 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3480 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3481 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3487 static void handle_arg_help(const char *arg
)
3492 static void handle_arg_log(const char *arg
)
3496 mask
= qemu_str_to_log_mask(arg
);
3498 qemu_print_log_usage(stdout
);
3504 static void handle_arg_log_filename(const char *arg
)
3506 qemu_set_log_filename(arg
);
3509 static void handle_arg_set_env(const char *arg
)
3511 char *r
, *p
, *token
;
3512 r
= p
= strdup(arg
);
3513 while ((token
= strsep(&p
, ",")) != NULL
) {
3514 if (envlist_setenv(envlist
, token
) != 0) {
3521 static void handle_arg_unset_env(const char *arg
)
3523 char *r
, *p
, *token
;
3524 r
= p
= strdup(arg
);
3525 while ((token
= strsep(&p
, ",")) != NULL
) {
3526 if (envlist_unsetenv(envlist
, token
) != 0) {
3533 static void handle_arg_argv0(const char *arg
)
3535 argv0
= strdup(arg
);
3538 static void handle_arg_stack_size(const char *arg
)
3541 guest_stack_size
= strtoul(arg
, &p
, 0);
3542 if (guest_stack_size
== 0) {
3547 guest_stack_size
*= 1024 * 1024;
3548 } else if (*p
== 'k' || *p
== 'K') {
3549 guest_stack_size
*= 1024;
3553 static void handle_arg_ld_prefix(const char *arg
)
3555 interp_prefix
= strdup(arg
);
3558 static void handle_arg_pagesize(const char *arg
)
3560 qemu_host_page_size
= atoi(arg
);
3561 if (qemu_host_page_size
== 0 ||
3562 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3563 fprintf(stderr
, "page size must be a power of two\n");
3568 static void handle_arg_randseed(const char *arg
)
3570 unsigned long long seed
;
3572 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3573 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3579 static void handle_arg_gdb(const char *arg
)
3581 gdbstub_port
= atoi(arg
);
3584 static void handle_arg_uname(const char *arg
)
3586 qemu_uname_release
= strdup(arg
);
3589 static void handle_arg_cpu(const char *arg
)
3591 cpu_model
= strdup(arg
);
3592 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3593 /* XXX: implement xxx_cpu_list for targets that still miss it */
3594 #if defined(cpu_list)
3595 cpu_list(stdout
, &fprintf
);
3601 #if defined(CONFIG_USE_GUEST_BASE)
3602 static void handle_arg_guest_base(const char *arg
)
3604 guest_base
= strtol(arg
, NULL
, 0);
3605 have_guest_base
= 1;
3608 static void handle_arg_reserved_va(const char *arg
)
3612 reserved_va
= strtoul(arg
, &p
, 0);
3626 unsigned long unshifted
= reserved_va
;
3628 reserved_va
<<= shift
;
3629 if (((reserved_va
>> shift
) != unshifted
)
3630 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3631 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3634 fprintf(stderr
, "Reserved virtual address too big\n");
3639 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3645 static void handle_arg_singlestep(const char *arg
)
3650 static void handle_arg_strace(const char *arg
)
3655 static void handle_arg_version(const char *arg
)
3657 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3658 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3662 struct qemu_argument
{
3666 void (*handle_opt
)(const char *arg
);
3667 const char *example
;
3671 static const struct qemu_argument arg_table
[] = {
3672 {"h", "", false, handle_arg_help
,
3673 "", "print this help"},
3674 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3675 "port", "wait gdb connection to 'port'"},
3676 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3677 "path", "set the elf interpreter prefix to 'path'"},
3678 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3679 "size", "set the stack size to 'size' bytes"},
3680 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3681 "model", "select CPU (-cpu help for list)"},
3682 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3683 "var=value", "sets targets environment variable (see below)"},
3684 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3685 "var", "unsets targets environment variable (see below)"},
3686 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3687 "argv0", "forces target process argv[0] to be 'argv0'"},
3688 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3689 "uname", "set qemu uname release string to 'uname'"},
3690 #if defined(CONFIG_USE_GUEST_BASE)
3691 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3692 "address", "set guest_base address to 'address'"},
3693 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3694 "size", "reserve 'size' bytes for guest virtual address space"},
3696 {"d", "QEMU_LOG", true, handle_arg_log
,
3697 "item[,...]", "enable logging of specified items "
3698 "(use '-d help' for a list of items)"},
3699 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3700 "logfile", "write logs to 'logfile' (default stderr)"},
3701 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3702 "pagesize", "set the host page size to 'pagesize'"},
3703 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3704 "", "run in singlestep mode"},
3705 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3706 "", "log system calls"},
3707 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3708 "", "Seed for pseudo-random number generator"},
3709 {"version", "QEMU_VERSION", false, handle_arg_version
,
3710 "", "display version information and exit"},
3711 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3714 static void usage(void)
3716 const struct qemu_argument
*arginfo
;
3720 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3721 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3723 "Options and associated environment variables:\n"
3726 /* Calculate column widths. We must always have at least enough space
3727 * for the column header.
3729 maxarglen
= strlen("Argument");
3730 maxenvlen
= strlen("Env-variable");
3732 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3733 int arglen
= strlen(arginfo
->argv
);
3734 if (arginfo
->has_arg
) {
3735 arglen
+= strlen(arginfo
->example
) + 1;
3737 if (strlen(arginfo
->env
) > maxenvlen
) {
3738 maxenvlen
= strlen(arginfo
->env
);
3740 if (arglen
> maxarglen
) {
3745 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3746 maxenvlen
, "Env-variable");
3748 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3749 if (arginfo
->has_arg
) {
3750 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3751 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3752 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3754 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3755 maxenvlen
, arginfo
->env
,
3762 "QEMU_LD_PREFIX = %s\n"
3763 "QEMU_STACK_SIZE = %ld byte\n",
3768 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3769 "QEMU_UNSET_ENV environment variables to set and unset\n"
3770 "environment variables for the target process.\n"
3771 "It is possible to provide several variables by separating them\n"
3772 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3773 "provide the -E and -U options multiple times.\n"
3774 "The following lines are equivalent:\n"
3775 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3776 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3777 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3778 "Note that if you provide several changes to a single variable\n"
3779 "the last change will stay in effect.\n");
3784 static int parse_args(int argc
, char **argv
)
3788 const struct qemu_argument
*arginfo
;
3790 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3791 if (arginfo
->env
== NULL
) {
3795 r
= getenv(arginfo
->env
);
3797 arginfo
->handle_opt(r
);
3803 if (optind
>= argc
) {
3812 if (!strcmp(r
, "-")) {
3816 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3817 if (!strcmp(r
, arginfo
->argv
)) {
3818 if (arginfo
->has_arg
) {
3819 if (optind
>= argc
) {
3822 arginfo
->handle_opt(argv
[optind
]);
3825 arginfo
->handle_opt(NULL
);
3831 /* no option matched the current argv */
3832 if (arginfo
->handle_opt
== NULL
) {
3837 if (optind
>= argc
) {
3841 filename
= argv
[optind
];
3842 exec_path
= argv
[optind
];
3847 int main(int argc
, char **argv
, char **envp
)
3849 struct target_pt_regs regs1
, *regs
= ®s1
;
3850 struct image_info info1
, *info
= &info1
;
3851 struct linux_binprm bprm
;
3856 char **target_environ
, **wrk
;
3863 module_call_init(MODULE_INIT_QOM
);
3865 if ((envlist
= envlist_create()) == NULL
) {
3866 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3870 /* add current environment into the list */
3871 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3872 (void) envlist_setenv(envlist
, *wrk
);
3875 /* Read the stack limit from the kernel. If it's "unlimited",
3876 then we can do little else besides use the default. */
3879 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3880 && lim
.rlim_cur
!= RLIM_INFINITY
3881 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3882 guest_stack_size
= lim
.rlim_cur
;
3887 #if defined(cpudef_setup)
3888 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3893 optind
= parse_args(argc
, argv
);
3896 memset(regs
, 0, sizeof(struct target_pt_regs
));
3898 /* Zero out image_info */
3899 memset(info
, 0, sizeof(struct image_info
));
3901 memset(&bprm
, 0, sizeof (bprm
));
3903 /* Scan interp_prefix dir for replacement files. */
3904 init_paths(interp_prefix
);
3906 init_qemu_uname_release();
3908 if (cpu_model
== NULL
) {
3909 #if defined(TARGET_I386)
3910 #ifdef TARGET_X86_64
3911 cpu_model
= "qemu64";
3913 cpu_model
= "qemu32";
3915 #elif defined(TARGET_ARM)
3917 #elif defined(TARGET_UNICORE32)
3919 #elif defined(TARGET_M68K)
3921 #elif defined(TARGET_SPARC)
3922 #ifdef TARGET_SPARC64
3923 cpu_model
= "TI UltraSparc II";
3925 cpu_model
= "Fujitsu MB86904";
3927 #elif defined(TARGET_MIPS)
3928 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3933 #elif defined TARGET_OPENRISC
3934 cpu_model
= "or1200";
3935 #elif defined(TARGET_PPC)
3936 # ifdef TARGET_PPC64
3937 cpu_model
= "POWER7";
3946 cpu_exec_init_all();
3947 /* NOTE: we need to init the CPU at this stage to get
3948 qemu_host_page_size */
3949 env
= cpu_init(cpu_model
);
3951 fprintf(stderr
, "Unable to find CPU definition\n");
3954 cpu
= ENV_GET_CPU(env
);
3959 if (getenv("QEMU_STRACE")) {
3963 if (getenv("QEMU_RAND_SEED")) {
3964 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
3967 target_environ
= envlist_to_environ(envlist
, NULL
);
3968 envlist_free(envlist
);
3970 #if defined(CONFIG_USE_GUEST_BASE)
3972 * Now that page sizes are configured in cpu_init() we can do
3973 * proper page alignment for guest_base.
3975 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3977 if (reserved_va
|| have_guest_base
) {
3978 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3980 if (guest_base
== (unsigned long)-1) {
3981 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3982 "space for use as guest address space (check your virtual "
3983 "memory ulimit setting or reserve less using -R option)\n",
3989 mmap_next_start
= reserved_va
;
3992 #endif /* CONFIG_USE_GUEST_BASE */
3995 * Read in mmap_min_addr kernel parameter. This value is used
3996 * When loading the ELF image to determine whether guest_base
3997 * is needed. It is also used in mmap_find_vma.
4002 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4004 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4005 mmap_min_addr
= tmp
;
4006 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4013 * Prepare copy of argv vector for target.
4015 target_argc
= argc
- optind
;
4016 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4017 if (target_argv
== NULL
) {
4018 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4023 * If argv0 is specified (using '-0' switch) we replace
4024 * argv[0] pointer with the given one.
4027 if (argv0
!= NULL
) {
4028 target_argv
[i
++] = strdup(argv0
);
4030 for (; i
< target_argc
; i
++) {
4031 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4033 target_argv
[target_argc
] = NULL
;
4035 ts
= g_malloc0 (sizeof(TaskState
));
4036 init_task_state(ts
);
4037 /* build Task State */
4043 execfd
= qemu_getauxval(AT_EXECFD
);
4045 execfd
= open(filename
, O_RDONLY
);
4047 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4052 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4055 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4059 for (wrk
= target_environ
; *wrk
; wrk
++) {
4063 free(target_environ
);
4065 if (qemu_log_enabled()) {
4066 #if defined(CONFIG_USE_GUEST_BASE)
4067 qemu_log("guest_base 0x%lx\n", guest_base
);
4071 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4072 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4073 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4075 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4077 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4078 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4080 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4081 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4084 target_set_brk(info
->brk
);
4088 #if defined(CONFIG_USE_GUEST_BASE)
4089 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4090 generating the prologue until now so that the prologue can take
4091 the real value of GUEST_BASE into account. */
4092 tcg_prologue_init(&tcg_ctx
);
4095 #if defined(TARGET_I386)
4096 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4097 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4098 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4099 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4100 env
->hflags
|= HF_OSFXSR_MASK
;
4102 #ifndef TARGET_ABI32
4103 /* enable 64 bit mode if possible */
4104 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4105 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4108 env
->cr
[4] |= CR4_PAE_MASK
;
4109 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4110 env
->hflags
|= HF_LMA_MASK
;
4113 /* flags setup : we activate the IRQs by default as in user mode */
4114 env
->eflags
|= IF_MASK
;
4116 /* linux register setup */
4117 #ifndef TARGET_ABI32
4118 env
->regs
[R_EAX
] = regs
->rax
;
4119 env
->regs
[R_EBX
] = regs
->rbx
;
4120 env
->regs
[R_ECX
] = regs
->rcx
;
4121 env
->regs
[R_EDX
] = regs
->rdx
;
4122 env
->regs
[R_ESI
] = regs
->rsi
;
4123 env
->regs
[R_EDI
] = regs
->rdi
;
4124 env
->regs
[R_EBP
] = regs
->rbp
;
4125 env
->regs
[R_ESP
] = regs
->rsp
;
4126 env
->eip
= regs
->rip
;
4128 env
->regs
[R_EAX
] = regs
->eax
;
4129 env
->regs
[R_EBX
] = regs
->ebx
;
4130 env
->regs
[R_ECX
] = regs
->ecx
;
4131 env
->regs
[R_EDX
] = regs
->edx
;
4132 env
->regs
[R_ESI
] = regs
->esi
;
4133 env
->regs
[R_EDI
] = regs
->edi
;
4134 env
->regs
[R_EBP
] = regs
->ebp
;
4135 env
->regs
[R_ESP
] = regs
->esp
;
4136 env
->eip
= regs
->eip
;
4139 /* linux interrupt setup */
4140 #ifndef TARGET_ABI32
4141 env
->idt
.limit
= 511;
4143 env
->idt
.limit
= 255;
4145 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4146 PROT_READ
|PROT_WRITE
,
4147 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4148 idt_table
= g2h(env
->idt
.base
);
4171 /* linux segment setup */
4173 uint64_t *gdt_table
;
4174 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4175 PROT_READ
|PROT_WRITE
,
4176 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4177 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4178 gdt_table
= g2h(env
->gdt
.base
);
4180 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4181 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4182 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4184 /* 64 bit code segment */
4185 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4186 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4188 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4190 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4191 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4192 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4194 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4195 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4197 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4198 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4199 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4200 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4201 /* This hack makes Wine work... */
4202 env
->segs
[R_FS
].selector
= 0;
4204 cpu_x86_load_seg(env
, R_DS
, 0);
4205 cpu_x86_load_seg(env
, R_ES
, 0);
4206 cpu_x86_load_seg(env
, R_FS
, 0);
4207 cpu_x86_load_seg(env
, R_GS
, 0);
4209 #elif defined(TARGET_AARCH64)
4213 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4215 "The selected ARM CPU does not support 64 bit mode\n");
4219 for (i
= 0; i
< 31; i
++) {
4220 env
->xregs
[i
] = regs
->regs
[i
];
4223 env
->xregs
[31] = regs
->sp
;
4225 #elif defined(TARGET_ARM)
4228 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4229 for(i
= 0; i
< 16; i
++) {
4230 env
->regs
[i
] = regs
->uregs
[i
];
4233 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4234 && (info
->elf_flags
& EF_ARM_BE8
)) {
4235 env
->bswap_code
= 1;
4238 #elif defined(TARGET_UNICORE32)
4241 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4242 for (i
= 0; i
< 32; i
++) {
4243 env
->regs
[i
] = regs
->uregs
[i
];
4246 #elif defined(TARGET_SPARC)
4250 env
->npc
= regs
->npc
;
4252 for(i
= 0; i
< 8; i
++)
4253 env
->gregs
[i
] = regs
->u_regs
[i
];
4254 for(i
= 0; i
< 8; i
++)
4255 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4257 #elif defined(TARGET_PPC)
4261 #if defined(TARGET_PPC64)
4262 #if defined(TARGET_ABI32)
4263 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4265 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4268 env
->nip
= regs
->nip
;
4269 for(i
= 0; i
< 32; i
++) {
4270 env
->gpr
[i
] = regs
->gpr
[i
];
4273 #elif defined(TARGET_M68K)
4276 env
->dregs
[0] = regs
->d0
;
4277 env
->dregs
[1] = regs
->d1
;
4278 env
->dregs
[2] = regs
->d2
;
4279 env
->dregs
[3] = regs
->d3
;
4280 env
->dregs
[4] = regs
->d4
;
4281 env
->dregs
[5] = regs
->d5
;
4282 env
->dregs
[6] = regs
->d6
;
4283 env
->dregs
[7] = regs
->d7
;
4284 env
->aregs
[0] = regs
->a0
;
4285 env
->aregs
[1] = regs
->a1
;
4286 env
->aregs
[2] = regs
->a2
;
4287 env
->aregs
[3] = regs
->a3
;
4288 env
->aregs
[4] = regs
->a4
;
4289 env
->aregs
[5] = regs
->a5
;
4290 env
->aregs
[6] = regs
->a6
;
4291 env
->aregs
[7] = regs
->usp
;
4293 ts
->sim_syscalls
= 1;
4295 #elif defined(TARGET_MICROBLAZE)
4297 env
->regs
[0] = regs
->r0
;
4298 env
->regs
[1] = regs
->r1
;
4299 env
->regs
[2] = regs
->r2
;
4300 env
->regs
[3] = regs
->r3
;
4301 env
->regs
[4] = regs
->r4
;
4302 env
->regs
[5] = regs
->r5
;
4303 env
->regs
[6] = regs
->r6
;
4304 env
->regs
[7] = regs
->r7
;
4305 env
->regs
[8] = regs
->r8
;
4306 env
->regs
[9] = regs
->r9
;
4307 env
->regs
[10] = regs
->r10
;
4308 env
->regs
[11] = regs
->r11
;
4309 env
->regs
[12] = regs
->r12
;
4310 env
->regs
[13] = regs
->r13
;
4311 env
->regs
[14] = regs
->r14
;
4312 env
->regs
[15] = regs
->r15
;
4313 env
->regs
[16] = regs
->r16
;
4314 env
->regs
[17] = regs
->r17
;
4315 env
->regs
[18] = regs
->r18
;
4316 env
->regs
[19] = regs
->r19
;
4317 env
->regs
[20] = regs
->r20
;
4318 env
->regs
[21] = regs
->r21
;
4319 env
->regs
[22] = regs
->r22
;
4320 env
->regs
[23] = regs
->r23
;
4321 env
->regs
[24] = regs
->r24
;
4322 env
->regs
[25] = regs
->r25
;
4323 env
->regs
[26] = regs
->r26
;
4324 env
->regs
[27] = regs
->r27
;
4325 env
->regs
[28] = regs
->r28
;
4326 env
->regs
[29] = regs
->r29
;
4327 env
->regs
[30] = regs
->r30
;
4328 env
->regs
[31] = regs
->r31
;
4329 env
->sregs
[SR_PC
] = regs
->pc
;
4331 #elif defined(TARGET_MIPS)
4335 for(i
= 0; i
< 32; i
++) {
4336 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4338 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4339 if (regs
->cp0_epc
& 1) {
4340 env
->hflags
|= MIPS_HFLAG_M16
;
4343 #elif defined(TARGET_OPENRISC)
4347 for (i
= 0; i
< 32; i
++) {
4348 env
->gpr
[i
] = regs
->gpr
[i
];
4354 #elif defined(TARGET_SH4)
4358 for(i
= 0; i
< 16; i
++) {
4359 env
->gregs
[i
] = regs
->regs
[i
];
4363 #elif defined(TARGET_ALPHA)
4367 for(i
= 0; i
< 28; i
++) {
4368 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4370 env
->ir
[IR_SP
] = regs
->usp
;
4373 #elif defined(TARGET_CRIS)
4375 env
->regs
[0] = regs
->r0
;
4376 env
->regs
[1] = regs
->r1
;
4377 env
->regs
[2] = regs
->r2
;
4378 env
->regs
[3] = regs
->r3
;
4379 env
->regs
[4] = regs
->r4
;
4380 env
->regs
[5] = regs
->r5
;
4381 env
->regs
[6] = regs
->r6
;
4382 env
->regs
[7] = regs
->r7
;
4383 env
->regs
[8] = regs
->r8
;
4384 env
->regs
[9] = regs
->r9
;
4385 env
->regs
[10] = regs
->r10
;
4386 env
->regs
[11] = regs
->r11
;
4387 env
->regs
[12] = regs
->r12
;
4388 env
->regs
[13] = regs
->r13
;
4389 env
->regs
[14] = info
->start_stack
;
4390 env
->regs
[15] = regs
->acr
;
4391 env
->pc
= regs
->erp
;
4393 #elif defined(TARGET_S390X)
4396 for (i
= 0; i
< 16; i
++) {
4397 env
->regs
[i
] = regs
->gprs
[i
];
4399 env
->psw
.mask
= regs
->psw
.mask
;
4400 env
->psw
.addr
= regs
->psw
.addr
;
4403 #error unsupported target CPU
4406 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4407 ts
->stack_base
= info
->start_stack
;
4408 ts
->heap_base
= info
->brk
;
4409 /* This will be filled in on the first SYS_HEAPINFO call. */
4414 if (gdbserver_start(gdbstub_port
) < 0) {
4415 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4419 gdb_handlesig(cpu
, 0);