4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
44 static const char *cpu_model
;
45 unsigned long mmap_min_addr
;
46 #if defined(CONFIG_USE_GUEST_BASE)
47 unsigned long guest_base
;
49 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
51 * When running 32-on-64 we should make sure we can fit all of the possible
52 * guest address space into a contiguous chunk of virtual host memory.
54 * This way we will never overlap with our own libraries or binaries or stack
55 * or anything else that QEMU maps.
58 /* MIPS only supports 31 bits of virtual address space for user space */
59 unsigned long reserved_va
= 0x77000000;
61 unsigned long reserved_va
= 0xf7000000;
64 unsigned long reserved_va
;
68 static void usage(void);
70 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
71 const char *qemu_uname_release
;
73 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
74 we allocate a bigger stack. Need a better solution, for example
75 by remapping the process stack directly at the right place */
76 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
78 void gemu_log(const char *fmt
, ...)
83 vfprintf(stderr
, fmt
, ap
);
87 #if defined(TARGET_I386)
88 int cpu_get_pic_interrupt(CPUX86State
*env
)
94 /***********************************************************/
95 /* Helper routines for implementing atomic operations. */
97 /* To implement exclusive operations we force all cpus to syncronise.
98 We don't require a full sync, only that no cpus are executing guest code.
99 The alternative is to map target atomic ops onto host equivalents,
100 which requires quite a lot of per host/target work. */
101 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
102 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
104 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
105 static int pending_cpus
;
107 /* Make sure everything is in a consistent state for calling fork(). */
108 void fork_start(void)
110 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
111 pthread_mutex_lock(&exclusive_lock
);
115 void fork_end(int child
)
117 mmap_fork_end(child
);
119 CPUState
*cpu
, *next_cpu
;
120 /* Child processes created by fork() only have a single thread.
121 Discard information about the parent threads. */
122 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
123 if (cpu
!= thread_cpu
) {
124 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
128 pthread_mutex_init(&exclusive_lock
, NULL
);
129 pthread_mutex_init(&cpu_list_mutex
, NULL
);
130 pthread_cond_init(&exclusive_cond
, NULL
);
131 pthread_cond_init(&exclusive_resume
, NULL
);
132 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
133 gdbserver_fork(thread_cpu
);
135 pthread_mutex_unlock(&exclusive_lock
);
136 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
140 /* Wait for pending exclusive operations to complete. The exclusive lock
142 static inline void exclusive_idle(void)
144 while (pending_cpus
) {
145 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
149 /* Start an exclusive operation.
150 Must only be called from outside cpu_arm_exec. */
151 static inline void start_exclusive(void)
155 pthread_mutex_lock(&exclusive_lock
);
159 /* Make all other cpus stop executing. */
160 CPU_FOREACH(other_cpu
) {
161 if (other_cpu
->running
) {
166 if (pending_cpus
> 1) {
167 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
171 /* Finish an exclusive operation. */
172 static inline void __attribute__((unused
)) end_exclusive(void)
175 pthread_cond_broadcast(&exclusive_resume
);
176 pthread_mutex_unlock(&exclusive_lock
);
179 /* Wait for exclusive ops to finish, and begin cpu execution. */
180 static inline void cpu_exec_start(CPUState
*cpu
)
182 pthread_mutex_lock(&exclusive_lock
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Mark cpu as not executing, and release pending exclusive ops. */
189 static inline void cpu_exec_end(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
192 cpu
->running
= false;
193 if (pending_cpus
> 1) {
195 if (pending_cpus
== 1) {
196 pthread_cond_signal(&exclusive_cond
);
200 pthread_mutex_unlock(&exclusive_lock
);
203 void cpu_list_lock(void)
205 pthread_mutex_lock(&cpu_list_mutex
);
208 void cpu_list_unlock(void)
210 pthread_mutex_unlock(&cpu_list_mutex
);
215 /***********************************************************/
216 /* CPUX86 core interface */
218 uint64_t cpu_get_tsc(CPUX86State
*env
)
220 return cpu_get_real_ticks();
223 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
228 e1
= (addr
<< 16) | (limit
& 0xffff);
229 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
236 static uint64_t *idt_table
;
238 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
239 uint64_t addr
, unsigned int sel
)
242 e1
= (addr
& 0xffff) | (sel
<< 16);
243 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
247 p
[2] = tswap32(addr
>> 32);
250 /* only dpl matters as we do only user space emulation */
251 static void set_idt(int n
, unsigned int dpl
)
253 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
256 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
257 uint32_t addr
, unsigned int sel
)
260 e1
= (addr
& 0xffff) | (sel
<< 16);
261 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
267 /* only dpl matters as we do only user space emulation */
268 static void set_idt(int n
, unsigned int dpl
)
270 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
274 void cpu_loop(CPUX86State
*env
)
276 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
279 target_siginfo_t info
;
283 trapnr
= cpu_x86_exec(cs
);
287 /* linux syscall from int $0x80 */
288 env
->regs
[R_EAX
] = do_syscall(env
,
300 /* linux syscall from syscall instruction */
301 env
->regs
[R_EAX
] = do_syscall(env
,
314 info
.si_signo
= TARGET_SIGBUS
;
316 info
.si_code
= TARGET_SI_KERNEL
;
317 info
._sifields
._sigfault
._addr
= 0;
318 queue_signal(env
, info
.si_signo
, &info
);
321 /* XXX: potential problem if ABI32 */
322 #ifndef TARGET_X86_64
323 if (env
->eflags
& VM_MASK
) {
324 handle_vm86_fault(env
);
328 info
.si_signo
= TARGET_SIGSEGV
;
330 info
.si_code
= TARGET_SI_KERNEL
;
331 info
._sifields
._sigfault
._addr
= 0;
332 queue_signal(env
, info
.si_signo
, &info
);
336 info
.si_signo
= TARGET_SIGSEGV
;
338 if (!(env
->error_code
& 1))
339 info
.si_code
= TARGET_SEGV_MAPERR
;
341 info
.si_code
= TARGET_SEGV_ACCERR
;
342 info
._sifields
._sigfault
._addr
= env
->cr
[2];
343 queue_signal(env
, info
.si_signo
, &info
);
346 #ifndef TARGET_X86_64
347 if (env
->eflags
& VM_MASK
) {
348 handle_vm86_trap(env
, trapnr
);
352 /* division by zero */
353 info
.si_signo
= TARGET_SIGFPE
;
355 info
.si_code
= TARGET_FPE_INTDIV
;
356 info
._sifields
._sigfault
._addr
= env
->eip
;
357 queue_signal(env
, info
.si_signo
, &info
);
362 #ifndef TARGET_X86_64
363 if (env
->eflags
& VM_MASK
) {
364 handle_vm86_trap(env
, trapnr
);
368 info
.si_signo
= TARGET_SIGTRAP
;
370 if (trapnr
== EXCP01_DB
) {
371 info
.si_code
= TARGET_TRAP_BRKPT
;
372 info
._sifields
._sigfault
._addr
= env
->eip
;
374 info
.si_code
= TARGET_SI_KERNEL
;
375 info
._sifields
._sigfault
._addr
= 0;
377 queue_signal(env
, info
.si_signo
, &info
);
382 #ifndef TARGET_X86_64
383 if (env
->eflags
& VM_MASK
) {
384 handle_vm86_trap(env
, trapnr
);
388 info
.si_signo
= TARGET_SIGSEGV
;
390 info
.si_code
= TARGET_SI_KERNEL
;
391 info
._sifields
._sigfault
._addr
= 0;
392 queue_signal(env
, info
.si_signo
, &info
);
396 info
.si_signo
= TARGET_SIGILL
;
398 info
.si_code
= TARGET_ILL_ILLOPN
;
399 info
._sifields
._sigfault
._addr
= env
->eip
;
400 queue_signal(env
, info
.si_signo
, &info
);
403 /* just indicate that signals should be handled asap */
409 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
414 info
.si_code
= TARGET_TRAP_BRKPT
;
415 queue_signal(env
, info
.si_signo
, &info
);
420 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
421 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
425 process_pending_signals(env
);
432 #define get_user_code_u32(x, gaddr, doswap) \
433 ({ abi_long __r = get_user_u32((x), (gaddr)); \
434 if (!__r && (doswap)) { \
440 #define get_user_code_u16(x, gaddr, doswap) \
441 ({ abi_long __r = get_user_u16((x), (gaddr)); \
442 if (!__r && (doswap)) { \
449 /* Commpage handling -- there is no commpage for AArch64 */
452 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
454 * r0 = pointer to oldval
455 * r1 = pointer to newval
456 * r2 = pointer to target value
459 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
460 * C set if *ptr was changed, clear if no exchange happened
462 * Note segv's in kernel helpers are a bit tricky, we can set the
463 * data address sensibly but the PC address is just the entry point.
465 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
467 uint64_t oldval
, newval
, val
;
469 target_siginfo_t info
;
471 /* Based on the 32 bit code in do_kernel_trap */
473 /* XXX: This only works between threads, not between processes.
474 It's probably possible to implement this with native host
475 operations. However things like ldrex/strex are much harder so
476 there's not much point trying. */
478 cpsr
= cpsr_read(env
);
481 if (get_user_u64(oldval
, env
->regs
[0])) {
482 env
->exception
.vaddress
= env
->regs
[0];
486 if (get_user_u64(newval
, env
->regs
[1])) {
487 env
->exception
.vaddress
= env
->regs
[1];
491 if (get_user_u64(val
, addr
)) {
492 env
->exception
.vaddress
= addr
;
499 if (put_user_u64(val
, addr
)) {
500 env
->exception
.vaddress
= addr
;
510 cpsr_write(env
, cpsr
, CPSR_C
);
516 /* We get the PC of the entry address - which is as good as anything,
517 on a real kernel what you get depends on which mode it uses. */
518 info
.si_signo
= TARGET_SIGSEGV
;
520 /* XXX: check env->error_code */
521 info
.si_code
= TARGET_SEGV_MAPERR
;
522 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
523 queue_signal(env
, info
.si_signo
, &info
);
526 /* Handle a jump to the kernel code page. */
528 do_kernel_trap(CPUARMState
*env
)
534 switch (env
->regs
[15]) {
535 case 0xffff0fa0: /* __kernel_memory_barrier */
536 /* ??? No-op. Will need to do better for SMP. */
538 case 0xffff0fc0: /* __kernel_cmpxchg */
539 /* XXX: This only works between threads, not between processes.
540 It's probably possible to implement this with native host
541 operations. However things like ldrex/strex are much harder so
542 there's not much point trying. */
544 cpsr
= cpsr_read(env
);
546 /* FIXME: This should SEGV if the access fails. */
547 if (get_user_u32(val
, addr
))
549 if (val
== env
->regs
[0]) {
551 /* FIXME: Check for segfaults. */
552 put_user_u32(val
, addr
);
559 cpsr_write(env
, cpsr
, CPSR_C
);
562 case 0xffff0fe0: /* __kernel_get_tls */
563 env
->regs
[0] = cpu_get_tls(env
);
565 case 0xffff0f60: /* __kernel_cmpxchg64 */
566 arm_kernel_cmpxchg64_helper(env
);
572 /* Jump back to the caller. */
573 addr
= env
->regs
[14];
578 env
->regs
[15] = addr
;
583 /* Store exclusive handling for AArch32 */
584 static int do_strex(CPUARMState
*env
)
592 if (env
->exclusive_addr
!= env
->exclusive_test
) {
595 /* We know we're always AArch32 so the address is in uint32_t range
596 * unless it was the -1 exclusive-monitor-lost value (which won't
597 * match exclusive_test above).
599 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
600 addr
= env
->exclusive_addr
;
601 size
= env
->exclusive_info
& 0xf;
604 segv
= get_user_u8(val
, addr
);
607 segv
= get_user_u16(val
, addr
);
611 segv
= get_user_u32(val
, addr
);
617 env
->exception
.vaddress
= addr
;
622 segv
= get_user_u32(valhi
, addr
+ 4);
624 env
->exception
.vaddress
= addr
+ 4;
627 val
= deposit64(val
, 32, 32, valhi
);
629 if (val
!= env
->exclusive_val
) {
633 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
636 segv
= put_user_u8(val
, addr
);
639 segv
= put_user_u16(val
, addr
);
643 segv
= put_user_u32(val
, addr
);
647 env
->exception
.vaddress
= addr
;
651 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
652 segv
= put_user_u32(val
, addr
+ 4);
654 env
->exception
.vaddress
= addr
+ 4;
661 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
667 void cpu_loop(CPUARMState
*env
)
669 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
671 unsigned int n
, insn
;
672 target_siginfo_t info
;
677 trapnr
= cpu_arm_exec(cs
);
682 TaskState
*ts
= cs
->opaque
;
686 /* we handle the FPU emulation here, as Linux */
687 /* we get the opcode */
688 /* FIXME - what to do if get_user() fails? */
689 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
691 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
692 if (rc
== 0) { /* illegal instruction */
693 info
.si_signo
= TARGET_SIGILL
;
695 info
.si_code
= TARGET_ILL_ILLOPN
;
696 info
._sifields
._sigfault
._addr
= env
->regs
[15];
697 queue_signal(env
, info
.si_signo
, &info
);
698 } else if (rc
< 0) { /* FP exception */
701 /* translate softfloat flags to FPSR flags */
702 if (-rc
& float_flag_invalid
)
704 if (-rc
& float_flag_divbyzero
)
706 if (-rc
& float_flag_overflow
)
708 if (-rc
& float_flag_underflow
)
710 if (-rc
& float_flag_inexact
)
713 FPSR fpsr
= ts
->fpa
.fpsr
;
714 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
716 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
717 info
.si_signo
= TARGET_SIGFPE
;
720 /* ordered by priority, least first */
721 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
722 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
723 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
724 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
725 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
727 info
._sifields
._sigfault
._addr
= env
->regs
[15];
728 queue_signal(env
, info
.si_signo
, &info
);
733 /* accumulate unenabled exceptions */
734 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
736 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
738 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
740 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
742 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
745 } else { /* everything OK */
756 if (trapnr
== EXCP_BKPT
) {
758 /* FIXME - what to do if get_user() fails? */
759 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
763 /* FIXME - what to do if get_user() fails? */
764 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
765 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
770 /* FIXME - what to do if get_user() fails? */
771 get_user_code_u16(insn
, env
->regs
[15] - 2,
775 /* FIXME - what to do if get_user() fails? */
776 get_user_code_u32(insn
, env
->regs
[15] - 4,
782 if (n
== ARM_NR_cacheflush
) {
784 } else if (n
== ARM_NR_semihosting
785 || n
== ARM_NR_thumb_semihosting
) {
786 env
->regs
[0] = do_arm_semihosting (env
);
787 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
789 if (env
->thumb
|| n
== 0) {
792 n
-= ARM_SYSCALL_BASE
;
795 if ( n
> ARM_NR_BASE
) {
797 case ARM_NR_cacheflush
:
801 cpu_set_tls(env
, env
->regs
[0]);
804 case ARM_NR_breakpoint
:
805 env
->regs
[15] -= env
->thumb
? 2 : 4;
808 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
810 env
->regs
[0] = -TARGET_ENOSYS
;
814 env
->regs
[0] = do_syscall(env
,
830 /* just indicate that signals should be handled asap */
833 if (!do_strex(env
)) {
836 /* fall through for segv */
837 case EXCP_PREFETCH_ABORT
:
838 case EXCP_DATA_ABORT
:
839 addr
= env
->exception
.vaddress
;
841 info
.si_signo
= TARGET_SIGSEGV
;
843 /* XXX: check env->error_code */
844 info
.si_code
= TARGET_SEGV_MAPERR
;
845 info
._sifields
._sigfault
._addr
= addr
;
846 queue_signal(env
, info
.si_signo
, &info
);
854 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
859 info
.si_code
= TARGET_TRAP_BRKPT
;
860 queue_signal(env
, info
.si_signo
, &info
);
864 case EXCP_KERNEL_TRAP
:
865 if (do_kernel_trap(env
))
870 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
872 cpu_dump_state(cs
, stderr
, fprintf
, 0);
875 process_pending_signals(env
);
882 * Handle AArch64 store-release exclusive
884 * rs = gets the status result of store exclusive
885 * rt = is the register that is stored
886 * rt2 = is the second register store (in STP)
889 static int do_strex_a64(CPUARMState
*env
)
900 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
901 size
= extract32(env
->exclusive_info
, 0, 2);
902 is_pair
= extract32(env
->exclusive_info
, 2, 1);
903 rs
= extract32(env
->exclusive_info
, 4, 5);
904 rt
= extract32(env
->exclusive_info
, 9, 5);
905 rt2
= extract32(env
->exclusive_info
, 14, 5);
907 addr
= env
->exclusive_addr
;
909 if (addr
!= env
->exclusive_test
) {
915 segv
= get_user_u8(val
, addr
);
918 segv
= get_user_u16(val
, addr
);
921 segv
= get_user_u32(val
, addr
);
924 segv
= get_user_u64(val
, addr
);
930 env
->exception
.vaddress
= addr
;
933 if (val
!= env
->exclusive_val
) {
938 segv
= get_user_u32(val
, addr
+ 4);
940 segv
= get_user_u64(val
, addr
+ 8);
943 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
946 if (val
!= env
->exclusive_high
) {
950 /* handle the zero register */
951 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
954 segv
= put_user_u8(val
, addr
);
957 segv
= put_user_u16(val
, addr
);
960 segv
= put_user_u32(val
, addr
);
963 segv
= put_user_u64(val
, addr
);
970 /* handle the zero register */
971 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
973 segv
= put_user_u32(val
, addr
+ 4);
975 segv
= put_user_u64(val
, addr
+ 8);
978 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
985 /* rs == 31 encodes a write to the ZR, thus throwing away
986 * the status return. This is rather silly but valid.
992 /* instruction faulted, PC does not advance */
993 /* either way a strex releases any exclusive lock we have */
994 env
->exclusive_addr
= -1;
999 /* AArch64 main loop */
1000 void cpu_loop(CPUARMState
*env
)
1002 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1004 target_siginfo_t info
;
1008 trapnr
= cpu_arm_exec(cs
);
1013 env
->xregs
[0] = do_syscall(env
,
1023 case EXCP_INTERRUPT
:
1024 /* just indicate that signals should be handled asap */
1027 info
.si_signo
= TARGET_SIGILL
;
1029 info
.si_code
= TARGET_ILL_ILLOPN
;
1030 info
._sifields
._sigfault
._addr
= env
->pc
;
1031 queue_signal(env
, info
.si_signo
, &info
);
1034 if (!do_strex_a64(env
)) {
1037 /* fall through for segv */
1038 case EXCP_PREFETCH_ABORT
:
1039 case EXCP_DATA_ABORT
:
1040 info
.si_signo
= TARGET_SIGSEGV
;
1042 /* XXX: check env->error_code */
1043 info
.si_code
= TARGET_SEGV_MAPERR
;
1044 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1045 queue_signal(env
, info
.si_signo
, &info
);
1049 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1051 info
.si_signo
= sig
;
1053 info
.si_code
= TARGET_TRAP_BRKPT
;
1054 queue_signal(env
, info
.si_signo
, &info
);
1058 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1060 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1063 process_pending_signals(env
);
1064 /* Exception return on AArch64 always clears the exclusive monitor,
1065 * so any return to running guest code implies this.
1066 * A strex (successful or otherwise) also clears the monitor, so
1067 * we don't need to specialcase EXCP_STREX.
1069 env
->exclusive_addr
= -1;
1072 #endif /* ndef TARGET_ABI32 */
1076 #ifdef TARGET_UNICORE32
1078 void cpu_loop(CPUUniCore32State
*env
)
1080 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1082 unsigned int n
, insn
;
1083 target_siginfo_t info
;
1087 trapnr
= uc32_cpu_exec(cs
);
1090 case UC32_EXCP_PRIV
:
1093 get_user_u32(insn
, env
->regs
[31] - 4);
1094 n
= insn
& 0xffffff;
1096 if (n
>= UC32_SYSCALL_BASE
) {
1098 n
-= UC32_SYSCALL_BASE
;
1099 if (n
== UC32_SYSCALL_NR_set_tls
) {
1100 cpu_set_tls(env
, env
->regs
[0]);
1103 env
->regs
[0] = do_syscall(env
,
1118 case UC32_EXCP_DTRAP
:
1119 case UC32_EXCP_ITRAP
:
1120 info
.si_signo
= TARGET_SIGSEGV
;
1122 /* XXX: check env->error_code */
1123 info
.si_code
= TARGET_SEGV_MAPERR
;
1124 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1125 queue_signal(env
, info
.si_signo
, &info
);
1127 case EXCP_INTERRUPT
:
1128 /* just indicate that signals should be handled asap */
1134 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1136 info
.si_signo
= sig
;
1138 info
.si_code
= TARGET_TRAP_BRKPT
;
1139 queue_signal(env
, info
.si_signo
, &info
);
1146 process_pending_signals(env
);
1150 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1151 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1157 #define SPARC64_STACK_BIAS 2047
1161 /* WARNING: dealing with register windows _is_ complicated. More info
1162 can be found at http://www.sics.se/~psm/sparcstack.html */
1163 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1165 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1166 /* wrap handling : if cwp is on the last window, then we use the
1167 registers 'after' the end */
1168 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1169 index
+= 16 * env
->nwindows
;
1173 /* save the register window 'cwp1' */
1174 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1179 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1180 #ifdef TARGET_SPARC64
1182 sp_ptr
+= SPARC64_STACK_BIAS
;
1184 #if defined(DEBUG_WIN)
1185 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1188 for(i
= 0; i
< 16; i
++) {
1189 /* FIXME - what to do if put_user() fails? */
1190 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1191 sp_ptr
+= sizeof(abi_ulong
);
1195 static void save_window(CPUSPARCState
*env
)
1197 #ifndef TARGET_SPARC64
1198 unsigned int new_wim
;
1199 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1200 ((1LL << env
->nwindows
) - 1);
1201 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1204 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1210 static void restore_window(CPUSPARCState
*env
)
1212 #ifndef TARGET_SPARC64
1213 unsigned int new_wim
;
1215 unsigned int i
, cwp1
;
1218 #ifndef TARGET_SPARC64
1219 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1220 ((1LL << env
->nwindows
) - 1);
1223 /* restore the invalid window */
1224 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1225 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1226 #ifdef TARGET_SPARC64
1228 sp_ptr
+= SPARC64_STACK_BIAS
;
1230 #if defined(DEBUG_WIN)
1231 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1234 for(i
= 0; i
< 16; i
++) {
1235 /* FIXME - what to do if get_user() fails? */
1236 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1237 sp_ptr
+= sizeof(abi_ulong
);
1239 #ifdef TARGET_SPARC64
1241 if (env
->cleanwin
< env
->nwindows
- 1)
1249 static void flush_windows(CPUSPARCState
*env
)
1255 /* if restore would invoke restore_window(), then we can stop */
1256 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1257 #ifndef TARGET_SPARC64
1258 if (env
->wim
& (1 << cwp1
))
1261 if (env
->canrestore
== 0)
1266 save_window_offset(env
, cwp1
);
1269 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1270 #ifndef TARGET_SPARC64
1271 /* set wim so that restore will reload the registers */
1272 env
->wim
= 1 << cwp1
;
1274 #if defined(DEBUG_WIN)
1275 printf("flush_windows: nb=%d\n", offset
- 1);
1279 void cpu_loop (CPUSPARCState
*env
)
1281 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1284 target_siginfo_t info
;
1288 trapnr
= cpu_sparc_exec(cs
);
1291 /* Compute PSR before exposing state. */
1292 if (env
->cc_op
!= CC_OP_FLAGS
) {
1297 #ifndef TARGET_SPARC64
1304 ret
= do_syscall (env
, env
->gregs
[1],
1305 env
->regwptr
[0], env
->regwptr
[1],
1306 env
->regwptr
[2], env
->regwptr
[3],
1307 env
->regwptr
[4], env
->regwptr
[5],
1309 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1310 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1311 env
->xcc
|= PSR_CARRY
;
1313 env
->psr
|= PSR_CARRY
;
1317 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1318 env
->xcc
&= ~PSR_CARRY
;
1320 env
->psr
&= ~PSR_CARRY
;
1323 env
->regwptr
[0] = ret
;
1324 /* next instruction */
1326 env
->npc
= env
->npc
+ 4;
1328 case 0x83: /* flush windows */
1333 /* next instruction */
1335 env
->npc
= env
->npc
+ 4;
1337 #ifndef TARGET_SPARC64
1338 case TT_WIN_OVF
: /* window overflow */
1341 case TT_WIN_UNF
: /* window underflow */
1342 restore_window(env
);
1347 info
.si_signo
= TARGET_SIGSEGV
;
1349 /* XXX: check env->error_code */
1350 info
.si_code
= TARGET_SEGV_MAPERR
;
1351 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1352 queue_signal(env
, info
.si_signo
, &info
);
1356 case TT_SPILL
: /* window overflow */
1359 case TT_FILL
: /* window underflow */
1360 restore_window(env
);
1365 info
.si_signo
= TARGET_SIGSEGV
;
1367 /* XXX: check env->error_code */
1368 info
.si_code
= TARGET_SEGV_MAPERR
;
1369 if (trapnr
== TT_DFAULT
)
1370 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1372 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1373 queue_signal(env
, info
.si_signo
, &info
);
1376 #ifndef TARGET_ABI32
1379 sparc64_get_context(env
);
1383 sparc64_set_context(env
);
1387 case EXCP_INTERRUPT
:
1388 /* just indicate that signals should be handled asap */
1392 info
.si_signo
= TARGET_SIGILL
;
1394 info
.si_code
= TARGET_ILL_ILLOPC
;
1395 info
._sifields
._sigfault
._addr
= env
->pc
;
1396 queue_signal(env
, info
.si_signo
, &info
);
1403 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1406 info
.si_signo
= sig
;
1408 info
.si_code
= TARGET_TRAP_BRKPT
;
1409 queue_signal(env
, info
.si_signo
, &info
);
1414 printf ("Unhandled trap: 0x%x\n", trapnr
);
1415 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1418 process_pending_signals (env
);
1425 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1427 return cpu_get_real_ticks();
1430 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1432 return cpu_ppc_get_tb(env
);
1435 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1437 return cpu_ppc_get_tb(env
) >> 32;
1440 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1442 return cpu_ppc_get_tb(env
);
1445 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1447 return cpu_ppc_get_tb(env
) >> 32;
1450 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1451 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1453 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1455 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1458 /* XXX: to be fixed */
1459 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1464 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1469 #define EXCP_DUMP(env, fmt, ...) \
1471 CPUState *cs = ENV_GET_CPU(env); \
1472 fprintf(stderr, fmt , ## __VA_ARGS__); \
1473 cpu_dump_state(cs, stderr, fprintf, 0); \
1474 qemu_log(fmt, ## __VA_ARGS__); \
1475 if (qemu_log_enabled()) { \
1476 log_cpu_state(cs, 0); \
1480 static int do_store_exclusive(CPUPPCState
*env
)
1483 target_ulong page_addr
;
1484 target_ulong val
, val2
__attribute__((unused
)) = 0;
1488 addr
= env
->reserve_ea
;
1489 page_addr
= addr
& TARGET_PAGE_MASK
;
1492 flags
= page_get_flags(page_addr
);
1493 if ((flags
& PAGE_READ
) == 0) {
1496 int reg
= env
->reserve_info
& 0x1f;
1497 int size
= env
->reserve_info
>> 5;
1500 if (addr
== env
->reserve_addr
) {
1502 case 1: segv
= get_user_u8(val
, addr
); break;
1503 case 2: segv
= get_user_u16(val
, addr
); break;
1504 case 4: segv
= get_user_u32(val
, addr
); break;
1505 #if defined(TARGET_PPC64)
1506 case 8: segv
= get_user_u64(val
, addr
); break;
1508 segv
= get_user_u64(val
, addr
);
1510 segv
= get_user_u64(val2
, addr
+ 8);
1517 if (!segv
&& val
== env
->reserve_val
) {
1518 val
= env
->gpr
[reg
];
1520 case 1: segv
= put_user_u8(val
, addr
); break;
1521 case 2: segv
= put_user_u16(val
, addr
); break;
1522 case 4: segv
= put_user_u32(val
, addr
); break;
1523 #if defined(TARGET_PPC64)
1524 case 8: segv
= put_user_u64(val
, addr
); break;
1526 if (val2
== env
->reserve_val2
) {
1529 val
= env
->gpr
[reg
+1];
1531 val2
= env
->gpr
[reg
+1];
1533 segv
= put_user_u64(val
, addr
);
1535 segv
= put_user_u64(val2
, addr
+ 8);
1548 env
->crf
[0] = (stored
<< 1) | xer_so
;
1549 env
->reserve_addr
= (target_ulong
)-1;
1559 void cpu_loop(CPUPPCState
*env
)
1561 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1562 target_siginfo_t info
;
1568 trapnr
= cpu_ppc_exec(cs
);
1571 case POWERPC_EXCP_NONE
:
1574 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1575 cpu_abort(cs
, "Critical interrupt while in user mode. "
1578 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1579 cpu_abort(cs
, "Machine check exception while in user mode. "
1582 case POWERPC_EXCP_DSI
: /* Data storage exception */
1583 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1585 /* XXX: check this. Seems bugged */
1586 switch (env
->error_code
& 0xFF000000) {
1588 info
.si_signo
= TARGET_SIGSEGV
;
1590 info
.si_code
= TARGET_SEGV_MAPERR
;
1593 info
.si_signo
= TARGET_SIGILL
;
1595 info
.si_code
= TARGET_ILL_ILLADR
;
1598 info
.si_signo
= TARGET_SIGSEGV
;
1600 info
.si_code
= TARGET_SEGV_ACCERR
;
1603 /* Let's send a regular segfault... */
1604 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1606 info
.si_signo
= TARGET_SIGSEGV
;
1608 info
.si_code
= TARGET_SEGV_MAPERR
;
1611 info
._sifields
._sigfault
._addr
= env
->nip
;
1612 queue_signal(env
, info
.si_signo
, &info
);
1614 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1615 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1616 "\n", env
->spr
[SPR_SRR0
]);
1617 /* XXX: check this */
1618 switch (env
->error_code
& 0xFF000000) {
1620 info
.si_signo
= TARGET_SIGSEGV
;
1622 info
.si_code
= TARGET_SEGV_MAPERR
;
1626 info
.si_signo
= TARGET_SIGSEGV
;
1628 info
.si_code
= TARGET_SEGV_ACCERR
;
1631 /* Let's send a regular segfault... */
1632 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1634 info
.si_signo
= TARGET_SIGSEGV
;
1636 info
.si_code
= TARGET_SEGV_MAPERR
;
1639 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1640 queue_signal(env
, info
.si_signo
, &info
);
1642 case POWERPC_EXCP_EXTERNAL
: /* External input */
1643 cpu_abort(cs
, "External interrupt while in user mode. "
1646 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1647 EXCP_DUMP(env
, "Unaligned memory access\n");
1648 /* XXX: check this */
1649 info
.si_signo
= TARGET_SIGBUS
;
1651 info
.si_code
= TARGET_BUS_ADRALN
;
1652 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1653 queue_signal(env
, info
.si_signo
, &info
);
1655 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1656 /* XXX: check this */
1657 switch (env
->error_code
& ~0xF) {
1658 case POWERPC_EXCP_FP
:
1659 EXCP_DUMP(env
, "Floating point program exception\n");
1660 info
.si_signo
= TARGET_SIGFPE
;
1662 switch (env
->error_code
& 0xF) {
1663 case POWERPC_EXCP_FP_OX
:
1664 info
.si_code
= TARGET_FPE_FLTOVF
;
1666 case POWERPC_EXCP_FP_UX
:
1667 info
.si_code
= TARGET_FPE_FLTUND
;
1669 case POWERPC_EXCP_FP_ZX
:
1670 case POWERPC_EXCP_FP_VXZDZ
:
1671 info
.si_code
= TARGET_FPE_FLTDIV
;
1673 case POWERPC_EXCP_FP_XX
:
1674 info
.si_code
= TARGET_FPE_FLTRES
;
1676 case POWERPC_EXCP_FP_VXSOFT
:
1677 info
.si_code
= TARGET_FPE_FLTINV
;
1679 case POWERPC_EXCP_FP_VXSNAN
:
1680 case POWERPC_EXCP_FP_VXISI
:
1681 case POWERPC_EXCP_FP_VXIDI
:
1682 case POWERPC_EXCP_FP_VXIMZ
:
1683 case POWERPC_EXCP_FP_VXVC
:
1684 case POWERPC_EXCP_FP_VXSQRT
:
1685 case POWERPC_EXCP_FP_VXCVI
:
1686 info
.si_code
= TARGET_FPE_FLTSUB
;
1689 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1694 case POWERPC_EXCP_INVAL
:
1695 EXCP_DUMP(env
, "Invalid instruction\n");
1696 info
.si_signo
= TARGET_SIGILL
;
1698 switch (env
->error_code
& 0xF) {
1699 case POWERPC_EXCP_INVAL_INVAL
:
1700 info
.si_code
= TARGET_ILL_ILLOPC
;
1702 case POWERPC_EXCP_INVAL_LSWX
:
1703 info
.si_code
= TARGET_ILL_ILLOPN
;
1705 case POWERPC_EXCP_INVAL_SPR
:
1706 info
.si_code
= TARGET_ILL_PRVREG
;
1708 case POWERPC_EXCP_INVAL_FP
:
1709 info
.si_code
= TARGET_ILL_COPROC
;
1712 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1713 env
->error_code
& 0xF);
1714 info
.si_code
= TARGET_ILL_ILLADR
;
1718 case POWERPC_EXCP_PRIV
:
1719 EXCP_DUMP(env
, "Privilege violation\n");
1720 info
.si_signo
= TARGET_SIGILL
;
1722 switch (env
->error_code
& 0xF) {
1723 case POWERPC_EXCP_PRIV_OPC
:
1724 info
.si_code
= TARGET_ILL_PRVOPC
;
1726 case POWERPC_EXCP_PRIV_REG
:
1727 info
.si_code
= TARGET_ILL_PRVREG
;
1730 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1731 env
->error_code
& 0xF);
1732 info
.si_code
= TARGET_ILL_PRVOPC
;
1736 case POWERPC_EXCP_TRAP
:
1737 cpu_abort(cs
, "Tried to call a TRAP\n");
1740 /* Should not happen ! */
1741 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1745 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1746 queue_signal(env
, info
.si_signo
, &info
);
1748 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1749 EXCP_DUMP(env
, "No floating point allowed\n");
1750 info
.si_signo
= TARGET_SIGILL
;
1752 info
.si_code
= TARGET_ILL_COPROC
;
1753 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1754 queue_signal(env
, info
.si_signo
, &info
);
1756 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1757 cpu_abort(cs
, "Syscall exception while in user mode. "
1760 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1761 EXCP_DUMP(env
, "No APU instruction allowed\n");
1762 info
.si_signo
= TARGET_SIGILL
;
1764 info
.si_code
= TARGET_ILL_COPROC
;
1765 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1766 queue_signal(env
, info
.si_signo
, &info
);
1768 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1769 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1772 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1773 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1776 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1777 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1780 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1781 cpu_abort(cs
, "Data TLB exception while in user mode. "
1784 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1785 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1788 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1789 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1790 info
.si_signo
= TARGET_SIGILL
;
1792 info
.si_code
= TARGET_ILL_COPROC
;
1793 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1794 queue_signal(env
, info
.si_signo
, &info
);
1796 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1797 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1799 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1800 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1802 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1803 cpu_abort(cs
, "Performance monitor exception not handled\n");
1805 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1806 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1809 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1810 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1813 case POWERPC_EXCP_RESET
: /* System reset exception */
1814 cpu_abort(cs
, "Reset interrupt while in user mode. "
1817 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1818 cpu_abort(cs
, "Data segment exception while in user mode. "
1821 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1822 cpu_abort(cs
, "Instruction segment exception "
1823 "while in user mode. Aborting\n");
1825 /* PowerPC 64 with hypervisor mode support */
1826 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1827 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1828 "while in user mode. Aborting\n");
1830 case POWERPC_EXCP_TRACE
: /* Trace exception */
1832 * we use this exception to emulate step-by-step execution mode.
1835 /* PowerPC 64 with hypervisor mode support */
1836 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1837 cpu_abort(cs
, "Hypervisor data storage exception "
1838 "while in user mode. Aborting\n");
1840 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1841 cpu_abort(cs
, "Hypervisor instruction storage exception "
1842 "while in user mode. Aborting\n");
1844 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1845 cpu_abort(cs
, "Hypervisor data segment exception "
1846 "while in user mode. Aborting\n");
1848 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1849 cpu_abort(cs
, "Hypervisor instruction segment exception "
1850 "while in user mode. Aborting\n");
1852 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1853 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1854 info
.si_signo
= TARGET_SIGILL
;
1856 info
.si_code
= TARGET_ILL_COPROC
;
1857 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1858 queue_signal(env
, info
.si_signo
, &info
);
1860 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1861 cpu_abort(cs
, "Programmable interval timer interrupt "
1862 "while in user mode. Aborting\n");
1864 case POWERPC_EXCP_IO
: /* IO error exception */
1865 cpu_abort(cs
, "IO error exception while in user mode. "
1868 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1869 cpu_abort(cs
, "Run mode exception while in user mode. "
1872 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1873 cpu_abort(cs
, "Emulation trap exception not handled\n");
1875 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1876 cpu_abort(cs
, "Instruction fetch TLB exception "
1877 "while in user-mode. Aborting");
1879 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1880 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1883 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1884 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1887 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1888 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1890 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1891 cpu_abort(cs
, "Instruction address breakpoint exception "
1894 case POWERPC_EXCP_SMI
: /* System management interrupt */
1895 cpu_abort(cs
, "System management interrupt while in user mode. "
1898 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1899 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1902 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1903 cpu_abort(cs
, "Performance monitor exception not handled\n");
1905 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1906 cpu_abort(cs
, "Vector assist exception not handled\n");
1908 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1909 cpu_abort(cs
, "Soft patch exception not handled\n");
1911 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1912 cpu_abort(cs
, "Maintenance exception while in user mode. "
1915 case POWERPC_EXCP_STOP
: /* stop translation */
1916 /* We did invalidate the instruction cache. Go on */
1918 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1919 /* We just stopped because of a branch. Go on */
1921 case POWERPC_EXCP_SYSCALL_USER
:
1922 /* system call in user-mode emulation */
1924 * PPC ABI uses overflow flag in cr0 to signal an error
1927 env
->crf
[0] &= ~0x1;
1928 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1929 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1931 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1932 /* Returning from a successful sigreturn syscall.
1933 Avoid corrupting register state. */
1936 if (ret
> (target_ulong
)(-515)) {
1942 case POWERPC_EXCP_STCX
:
1943 if (do_store_exclusive(env
)) {
1944 info
.si_signo
= TARGET_SIGSEGV
;
1946 info
.si_code
= TARGET_SEGV_MAPERR
;
1947 info
._sifields
._sigfault
._addr
= env
->nip
;
1948 queue_signal(env
, info
.si_signo
, &info
);
1955 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1957 info
.si_signo
= sig
;
1959 info
.si_code
= TARGET_TRAP_BRKPT
;
1960 queue_signal(env
, info
.si_signo
, &info
);
1964 case EXCP_INTERRUPT
:
1965 /* just indicate that signals should be handled asap */
1968 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1971 process_pending_signals(env
);
1978 # ifdef TARGET_ABI_MIPSO32
1979 # define MIPS_SYS(name, args) args,
1980 static const uint8_t mips_syscall_args
[] = {
1981 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1982 MIPS_SYS(sys_exit
, 1)
1983 MIPS_SYS(sys_fork
, 0)
1984 MIPS_SYS(sys_read
, 3)
1985 MIPS_SYS(sys_write
, 3)
1986 MIPS_SYS(sys_open
, 3) /* 4005 */
1987 MIPS_SYS(sys_close
, 1)
1988 MIPS_SYS(sys_waitpid
, 3)
1989 MIPS_SYS(sys_creat
, 2)
1990 MIPS_SYS(sys_link
, 2)
1991 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1992 MIPS_SYS(sys_execve
, 0)
1993 MIPS_SYS(sys_chdir
, 1)
1994 MIPS_SYS(sys_time
, 1)
1995 MIPS_SYS(sys_mknod
, 3)
1996 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1997 MIPS_SYS(sys_lchown
, 3)
1998 MIPS_SYS(sys_ni_syscall
, 0)
1999 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2000 MIPS_SYS(sys_lseek
, 3)
2001 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2002 MIPS_SYS(sys_mount
, 5)
2003 MIPS_SYS(sys_umount
, 1)
2004 MIPS_SYS(sys_setuid
, 1)
2005 MIPS_SYS(sys_getuid
, 0)
2006 MIPS_SYS(sys_stime
, 1) /* 4025 */
2007 MIPS_SYS(sys_ptrace
, 4)
2008 MIPS_SYS(sys_alarm
, 1)
2009 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2010 MIPS_SYS(sys_pause
, 0)
2011 MIPS_SYS(sys_utime
, 2) /* 4030 */
2012 MIPS_SYS(sys_ni_syscall
, 0)
2013 MIPS_SYS(sys_ni_syscall
, 0)
2014 MIPS_SYS(sys_access
, 2)
2015 MIPS_SYS(sys_nice
, 1)
2016 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2017 MIPS_SYS(sys_sync
, 0)
2018 MIPS_SYS(sys_kill
, 2)
2019 MIPS_SYS(sys_rename
, 2)
2020 MIPS_SYS(sys_mkdir
, 2)
2021 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2022 MIPS_SYS(sys_dup
, 1)
2023 MIPS_SYS(sys_pipe
, 0)
2024 MIPS_SYS(sys_times
, 1)
2025 MIPS_SYS(sys_ni_syscall
, 0)
2026 MIPS_SYS(sys_brk
, 1) /* 4045 */
2027 MIPS_SYS(sys_setgid
, 1)
2028 MIPS_SYS(sys_getgid
, 0)
2029 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2030 MIPS_SYS(sys_geteuid
, 0)
2031 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2032 MIPS_SYS(sys_acct
, 0)
2033 MIPS_SYS(sys_umount2
, 2)
2034 MIPS_SYS(sys_ni_syscall
, 0)
2035 MIPS_SYS(sys_ioctl
, 3)
2036 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2037 MIPS_SYS(sys_ni_syscall
, 2)
2038 MIPS_SYS(sys_setpgid
, 2)
2039 MIPS_SYS(sys_ni_syscall
, 0)
2040 MIPS_SYS(sys_olduname
, 1)
2041 MIPS_SYS(sys_umask
, 1) /* 4060 */
2042 MIPS_SYS(sys_chroot
, 1)
2043 MIPS_SYS(sys_ustat
, 2)
2044 MIPS_SYS(sys_dup2
, 2)
2045 MIPS_SYS(sys_getppid
, 0)
2046 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2047 MIPS_SYS(sys_setsid
, 0)
2048 MIPS_SYS(sys_sigaction
, 3)
2049 MIPS_SYS(sys_sgetmask
, 0)
2050 MIPS_SYS(sys_ssetmask
, 1)
2051 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2052 MIPS_SYS(sys_setregid
, 2)
2053 MIPS_SYS(sys_sigsuspend
, 0)
2054 MIPS_SYS(sys_sigpending
, 1)
2055 MIPS_SYS(sys_sethostname
, 2)
2056 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2057 MIPS_SYS(sys_getrlimit
, 2)
2058 MIPS_SYS(sys_getrusage
, 2)
2059 MIPS_SYS(sys_gettimeofday
, 2)
2060 MIPS_SYS(sys_settimeofday
, 2)
2061 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2062 MIPS_SYS(sys_setgroups
, 2)
2063 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2064 MIPS_SYS(sys_symlink
, 2)
2065 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2066 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2067 MIPS_SYS(sys_uselib
, 1)
2068 MIPS_SYS(sys_swapon
, 2)
2069 MIPS_SYS(sys_reboot
, 3)
2070 MIPS_SYS(old_readdir
, 3)
2071 MIPS_SYS(old_mmap
, 6) /* 4090 */
2072 MIPS_SYS(sys_munmap
, 2)
2073 MIPS_SYS(sys_truncate
, 2)
2074 MIPS_SYS(sys_ftruncate
, 2)
2075 MIPS_SYS(sys_fchmod
, 2)
2076 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2077 MIPS_SYS(sys_getpriority
, 2)
2078 MIPS_SYS(sys_setpriority
, 3)
2079 MIPS_SYS(sys_ni_syscall
, 0)
2080 MIPS_SYS(sys_statfs
, 2)
2081 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2082 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2083 MIPS_SYS(sys_socketcall
, 2)
2084 MIPS_SYS(sys_syslog
, 3)
2085 MIPS_SYS(sys_setitimer
, 3)
2086 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2087 MIPS_SYS(sys_newstat
, 2)
2088 MIPS_SYS(sys_newlstat
, 2)
2089 MIPS_SYS(sys_newfstat
, 2)
2090 MIPS_SYS(sys_uname
, 1)
2091 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2092 MIPS_SYS(sys_vhangup
, 0)
2093 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2094 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2095 MIPS_SYS(sys_wait4
, 4)
2096 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2097 MIPS_SYS(sys_sysinfo
, 1)
2098 MIPS_SYS(sys_ipc
, 6)
2099 MIPS_SYS(sys_fsync
, 1)
2100 MIPS_SYS(sys_sigreturn
, 0)
2101 MIPS_SYS(sys_clone
, 6) /* 4120 */
2102 MIPS_SYS(sys_setdomainname
, 2)
2103 MIPS_SYS(sys_newuname
, 1)
2104 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2105 MIPS_SYS(sys_adjtimex
, 1)
2106 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2107 MIPS_SYS(sys_sigprocmask
, 3)
2108 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2109 MIPS_SYS(sys_init_module
, 5)
2110 MIPS_SYS(sys_delete_module
, 1)
2111 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2112 MIPS_SYS(sys_quotactl
, 0)
2113 MIPS_SYS(sys_getpgid
, 1)
2114 MIPS_SYS(sys_fchdir
, 1)
2115 MIPS_SYS(sys_bdflush
, 2)
2116 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2117 MIPS_SYS(sys_personality
, 1)
2118 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2119 MIPS_SYS(sys_setfsuid
, 1)
2120 MIPS_SYS(sys_setfsgid
, 1)
2121 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2122 MIPS_SYS(sys_getdents
, 3)
2123 MIPS_SYS(sys_select
, 5)
2124 MIPS_SYS(sys_flock
, 2)
2125 MIPS_SYS(sys_msync
, 3)
2126 MIPS_SYS(sys_readv
, 3) /* 4145 */
2127 MIPS_SYS(sys_writev
, 3)
2128 MIPS_SYS(sys_cacheflush
, 3)
2129 MIPS_SYS(sys_cachectl
, 3)
2130 MIPS_SYS(sys_sysmips
, 4)
2131 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2132 MIPS_SYS(sys_getsid
, 1)
2133 MIPS_SYS(sys_fdatasync
, 0)
2134 MIPS_SYS(sys_sysctl
, 1)
2135 MIPS_SYS(sys_mlock
, 2)
2136 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2137 MIPS_SYS(sys_mlockall
, 1)
2138 MIPS_SYS(sys_munlockall
, 0)
2139 MIPS_SYS(sys_sched_setparam
, 2)
2140 MIPS_SYS(sys_sched_getparam
, 2)
2141 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2142 MIPS_SYS(sys_sched_getscheduler
, 1)
2143 MIPS_SYS(sys_sched_yield
, 0)
2144 MIPS_SYS(sys_sched_get_priority_max
, 1)
2145 MIPS_SYS(sys_sched_get_priority_min
, 1)
2146 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2147 MIPS_SYS(sys_nanosleep
, 2)
2148 MIPS_SYS(sys_mremap
, 5)
2149 MIPS_SYS(sys_accept
, 3)
2150 MIPS_SYS(sys_bind
, 3)
2151 MIPS_SYS(sys_connect
, 3) /* 4170 */
2152 MIPS_SYS(sys_getpeername
, 3)
2153 MIPS_SYS(sys_getsockname
, 3)
2154 MIPS_SYS(sys_getsockopt
, 5)
2155 MIPS_SYS(sys_listen
, 2)
2156 MIPS_SYS(sys_recv
, 4) /* 4175 */
2157 MIPS_SYS(sys_recvfrom
, 6)
2158 MIPS_SYS(sys_recvmsg
, 3)
2159 MIPS_SYS(sys_send
, 4)
2160 MIPS_SYS(sys_sendmsg
, 3)
2161 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2162 MIPS_SYS(sys_setsockopt
, 5)
2163 MIPS_SYS(sys_shutdown
, 2)
2164 MIPS_SYS(sys_socket
, 3)
2165 MIPS_SYS(sys_socketpair
, 4)
2166 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2167 MIPS_SYS(sys_getresuid
, 3)
2168 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2169 MIPS_SYS(sys_poll
, 3)
2170 MIPS_SYS(sys_nfsservctl
, 3)
2171 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2172 MIPS_SYS(sys_getresgid
, 3)
2173 MIPS_SYS(sys_prctl
, 5)
2174 MIPS_SYS(sys_rt_sigreturn
, 0)
2175 MIPS_SYS(sys_rt_sigaction
, 4)
2176 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2177 MIPS_SYS(sys_rt_sigpending
, 2)
2178 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2179 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2180 MIPS_SYS(sys_rt_sigsuspend
, 0)
2181 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2182 MIPS_SYS(sys_pwrite64
, 6)
2183 MIPS_SYS(sys_chown
, 3)
2184 MIPS_SYS(sys_getcwd
, 2)
2185 MIPS_SYS(sys_capget
, 2)
2186 MIPS_SYS(sys_capset
, 2) /* 4205 */
2187 MIPS_SYS(sys_sigaltstack
, 2)
2188 MIPS_SYS(sys_sendfile
, 4)
2189 MIPS_SYS(sys_ni_syscall
, 0)
2190 MIPS_SYS(sys_ni_syscall
, 0)
2191 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2192 MIPS_SYS(sys_truncate64
, 4)
2193 MIPS_SYS(sys_ftruncate64
, 4)
2194 MIPS_SYS(sys_stat64
, 2)
2195 MIPS_SYS(sys_lstat64
, 2)
2196 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2197 MIPS_SYS(sys_pivot_root
, 2)
2198 MIPS_SYS(sys_mincore
, 3)
2199 MIPS_SYS(sys_madvise
, 3)
2200 MIPS_SYS(sys_getdents64
, 3)
2201 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2202 MIPS_SYS(sys_ni_syscall
, 0)
2203 MIPS_SYS(sys_gettid
, 0)
2204 MIPS_SYS(sys_readahead
, 5)
2205 MIPS_SYS(sys_setxattr
, 5)
2206 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2207 MIPS_SYS(sys_fsetxattr
, 5)
2208 MIPS_SYS(sys_getxattr
, 4)
2209 MIPS_SYS(sys_lgetxattr
, 4)
2210 MIPS_SYS(sys_fgetxattr
, 4)
2211 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2212 MIPS_SYS(sys_llistxattr
, 3)
2213 MIPS_SYS(sys_flistxattr
, 3)
2214 MIPS_SYS(sys_removexattr
, 2)
2215 MIPS_SYS(sys_lremovexattr
, 2)
2216 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2217 MIPS_SYS(sys_tkill
, 2)
2218 MIPS_SYS(sys_sendfile64
, 5)
2219 MIPS_SYS(sys_futex
, 6)
2220 MIPS_SYS(sys_sched_setaffinity
, 3)
2221 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2222 MIPS_SYS(sys_io_setup
, 2)
2223 MIPS_SYS(sys_io_destroy
, 1)
2224 MIPS_SYS(sys_io_getevents
, 5)
2225 MIPS_SYS(sys_io_submit
, 3)
2226 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2227 MIPS_SYS(sys_exit_group
, 1)
2228 MIPS_SYS(sys_lookup_dcookie
, 3)
2229 MIPS_SYS(sys_epoll_create
, 1)
2230 MIPS_SYS(sys_epoll_ctl
, 4)
2231 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2232 MIPS_SYS(sys_remap_file_pages
, 5)
2233 MIPS_SYS(sys_set_tid_address
, 1)
2234 MIPS_SYS(sys_restart_syscall
, 0)
2235 MIPS_SYS(sys_fadvise64_64
, 7)
2236 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2237 MIPS_SYS(sys_fstatfs64
, 2)
2238 MIPS_SYS(sys_timer_create
, 3)
2239 MIPS_SYS(sys_timer_settime
, 4)
2240 MIPS_SYS(sys_timer_gettime
, 2)
2241 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2242 MIPS_SYS(sys_timer_delete
, 1)
2243 MIPS_SYS(sys_clock_settime
, 2)
2244 MIPS_SYS(sys_clock_gettime
, 2)
2245 MIPS_SYS(sys_clock_getres
, 2)
2246 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2247 MIPS_SYS(sys_tgkill
, 3)
2248 MIPS_SYS(sys_utimes
, 2)
2249 MIPS_SYS(sys_mbind
, 4)
2250 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2251 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2252 MIPS_SYS(sys_mq_open
, 4)
2253 MIPS_SYS(sys_mq_unlink
, 1)
2254 MIPS_SYS(sys_mq_timedsend
, 5)
2255 MIPS_SYS(sys_mq_timedreceive
, 5)
2256 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2257 MIPS_SYS(sys_mq_getsetattr
, 3)
2258 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2259 MIPS_SYS(sys_waitid
, 4)
2260 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2261 MIPS_SYS(sys_add_key
, 5)
2262 MIPS_SYS(sys_request_key
, 4)
2263 MIPS_SYS(sys_keyctl
, 5)
2264 MIPS_SYS(sys_set_thread_area
, 1)
2265 MIPS_SYS(sys_inotify_init
, 0)
2266 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2267 MIPS_SYS(sys_inotify_rm_watch
, 2)
2268 MIPS_SYS(sys_migrate_pages
, 4)
2269 MIPS_SYS(sys_openat
, 4)
2270 MIPS_SYS(sys_mkdirat
, 3)
2271 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2272 MIPS_SYS(sys_fchownat
, 5)
2273 MIPS_SYS(sys_futimesat
, 3)
2274 MIPS_SYS(sys_fstatat64
, 4)
2275 MIPS_SYS(sys_unlinkat
, 3)
2276 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2277 MIPS_SYS(sys_linkat
, 5)
2278 MIPS_SYS(sys_symlinkat
, 3)
2279 MIPS_SYS(sys_readlinkat
, 4)
2280 MIPS_SYS(sys_fchmodat
, 3)
2281 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2282 MIPS_SYS(sys_pselect6
, 6)
2283 MIPS_SYS(sys_ppoll
, 5)
2284 MIPS_SYS(sys_unshare
, 1)
2285 MIPS_SYS(sys_splice
, 6)
2286 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2287 MIPS_SYS(sys_tee
, 4)
2288 MIPS_SYS(sys_vmsplice
, 4)
2289 MIPS_SYS(sys_move_pages
, 6)
2290 MIPS_SYS(sys_set_robust_list
, 2)
2291 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2292 MIPS_SYS(sys_kexec_load
, 4)
2293 MIPS_SYS(sys_getcpu
, 3)
2294 MIPS_SYS(sys_epoll_pwait
, 6)
2295 MIPS_SYS(sys_ioprio_set
, 3)
2296 MIPS_SYS(sys_ioprio_get
, 2)
2297 MIPS_SYS(sys_utimensat
, 4)
2298 MIPS_SYS(sys_signalfd
, 3)
2299 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2300 MIPS_SYS(sys_eventfd
, 1)
2301 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2302 MIPS_SYS(sys_timerfd_create
, 2)
2303 MIPS_SYS(sys_timerfd_gettime
, 2)
2304 MIPS_SYS(sys_timerfd_settime
, 4)
2305 MIPS_SYS(sys_signalfd4
, 4)
2306 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2307 MIPS_SYS(sys_epoll_create1
, 1)
2308 MIPS_SYS(sys_dup3
, 3)
2309 MIPS_SYS(sys_pipe2
, 2)
2310 MIPS_SYS(sys_inotify_init1
, 1)
2311 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2312 MIPS_SYS(sys_pwritev
, 6)
2313 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2314 MIPS_SYS(sys_perf_event_open
, 5)
2315 MIPS_SYS(sys_accept4
, 4)
2316 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2317 MIPS_SYS(sys_fanotify_init
, 2)
2318 MIPS_SYS(sys_fanotify_mark
, 6)
2319 MIPS_SYS(sys_prlimit64
, 4)
2320 MIPS_SYS(sys_name_to_handle_at
, 5)
2321 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2322 MIPS_SYS(sys_clock_adjtime
, 2)
2323 MIPS_SYS(sys_syncfs
, 1)
2328 static int do_store_exclusive(CPUMIPSState
*env
)
2331 target_ulong page_addr
;
2339 page_addr
= addr
& TARGET_PAGE_MASK
;
2342 flags
= page_get_flags(page_addr
);
2343 if ((flags
& PAGE_READ
) == 0) {
2346 reg
= env
->llreg
& 0x1f;
2347 d
= (env
->llreg
& 0x20) != 0;
2349 segv
= get_user_s64(val
, addr
);
2351 segv
= get_user_s32(val
, addr
);
2354 if (val
!= env
->llval
) {
2355 env
->active_tc
.gpr
[reg
] = 0;
2358 segv
= put_user_u64(env
->llnewval
, addr
);
2360 segv
= put_user_u32(env
->llnewval
, addr
);
2363 env
->active_tc
.gpr
[reg
] = 1;
2370 env
->active_tc
.PC
+= 4;
2383 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2391 info
->si_signo
= TARGET_SIGFPE
;
2393 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2394 queue_signal(env
, info
->si_signo
, &*info
);
2398 info
->si_signo
= TARGET_SIGTRAP
;
2400 queue_signal(env
, info
->si_signo
, &*info
);
2408 void cpu_loop(CPUMIPSState
*env
)
2410 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2411 target_siginfo_t info
;
2414 # ifdef TARGET_ABI_MIPSO32
2415 unsigned int syscall_num
;
2420 trapnr
= cpu_mips_exec(cs
);
2424 env
->active_tc
.PC
+= 4;
2425 # ifdef TARGET_ABI_MIPSO32
2426 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2427 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2428 ret
= -TARGET_ENOSYS
;
2432 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2434 nb_args
= mips_syscall_args
[syscall_num
];
2435 sp_reg
= env
->active_tc
.gpr
[29];
2437 /* these arguments are taken from the stack */
2439 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2443 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2447 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2451 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2457 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2458 env
->active_tc
.gpr
[4],
2459 env
->active_tc
.gpr
[5],
2460 env
->active_tc
.gpr
[6],
2461 env
->active_tc
.gpr
[7],
2462 arg5
, arg6
, arg7
, arg8
);
2466 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2467 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2468 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2469 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2470 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2472 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2473 /* Returning from a successful sigreturn syscall.
2474 Avoid clobbering register state. */
2477 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2478 env
->active_tc
.gpr
[7] = 1; /* error flag */
2481 env
->active_tc
.gpr
[7] = 0; /* error flag */
2483 env
->active_tc
.gpr
[2] = ret
;
2489 info
.si_signo
= TARGET_SIGSEGV
;
2491 /* XXX: check env->error_code */
2492 info
.si_code
= TARGET_SEGV_MAPERR
;
2493 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2494 queue_signal(env
, info
.si_signo
, &info
);
2498 info
.si_signo
= TARGET_SIGILL
;
2501 queue_signal(env
, info
.si_signo
, &info
);
2503 case EXCP_INTERRUPT
:
2504 /* just indicate that signals should be handled asap */
2510 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2513 info
.si_signo
= sig
;
2515 info
.si_code
= TARGET_TRAP_BRKPT
;
2516 queue_signal(env
, info
.si_signo
, &info
);
2521 if (do_store_exclusive(env
)) {
2522 info
.si_signo
= TARGET_SIGSEGV
;
2524 info
.si_code
= TARGET_SEGV_MAPERR
;
2525 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2526 queue_signal(env
, info
.si_signo
, &info
);
2530 info
.si_signo
= TARGET_SIGILL
;
2532 info
.si_code
= TARGET_ILL_ILLOPC
;
2533 queue_signal(env
, info
.si_signo
, &info
);
2535 /* The code below was inspired by the MIPS Linux kernel trap
2536 * handling code in arch/mips/kernel/traps.c.
2540 abi_ulong trap_instr
;
2543 if (env
->hflags
& MIPS_HFLAG_M16
) {
2544 if (env
->insn_flags
& ASE_MICROMIPS
) {
2545 /* microMIPS mode */
2546 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2551 if ((trap_instr
>> 10) == 0x11) {
2552 /* 16-bit instruction */
2553 code
= trap_instr
& 0xf;
2555 /* 32-bit instruction */
2558 ret
= get_user_u16(instr_lo
,
2559 env
->active_tc
.PC
+ 2);
2563 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2564 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2565 /* Unfortunately, microMIPS also suffers from
2566 the old assembler bug... */
2567 if (code
>= (1 << 10)) {
2573 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2577 code
= (trap_instr
>> 6) & 0x3f;
2580 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2585 /* As described in the original Linux kernel code, the
2586 * below checks on 'code' are to work around an old
2589 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2590 if (code
>= (1 << 10)) {
2595 if (do_break(env
, &info
, code
) != 0) {
2602 abi_ulong trap_instr
;
2603 unsigned int code
= 0;
2605 if (env
->hflags
& MIPS_HFLAG_M16
) {
2606 /* microMIPS mode */
2609 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2610 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2612 trap_instr
= (instr
[0] << 16) | instr
[1];
2614 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2621 /* The immediate versions don't provide a code. */
2622 if (!(trap_instr
& 0xFC000000)) {
2623 if (env
->hflags
& MIPS_HFLAG_M16
) {
2624 /* microMIPS mode */
2625 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2627 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2631 if (do_break(env
, &info
, code
) != 0) {
2638 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2640 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2643 process_pending_signals(env
);
2648 #ifdef TARGET_OPENRISC
2650 void cpu_loop(CPUOpenRISCState
*env
)
2652 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2657 trapnr
= cpu_openrisc_exec(cs
);
2663 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2667 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2668 gdbsig
= TARGET_SIGBUS
;
2672 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2673 gdbsig
= TARGET_SIGSEGV
;
2676 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2679 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2680 gdbsig
= TARGET_SIGBUS
;
2683 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2684 gdbsig
= TARGET_SIGILL
;
2687 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2691 qemu_log("\nTLB miss\n");
2694 qemu_log("\nRange\n");
2695 gdbsig
= TARGET_SIGSEGV
;
2698 env
->pc
+= 4; /* 0xc00; */
2699 env
->gpr
[11] = do_syscall(env
,
2700 env
->gpr
[11], /* return value */
2701 env
->gpr
[3], /* r3 - r7 are params */
2709 qemu_log("\nFloating point error\n");
2712 qemu_log("\nTrap\n");
2713 gdbsig
= TARGET_SIGTRAP
;
2719 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2721 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2722 gdbsig
= TARGET_SIGILL
;
2726 gdb_handlesig(cs
, gdbsig
);
2727 if (gdbsig
!= TARGET_SIGTRAP
) {
2732 process_pending_signals(env
);
2736 #endif /* TARGET_OPENRISC */
2739 void cpu_loop(CPUSH4State
*env
)
2741 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2743 target_siginfo_t info
;
2747 trapnr
= cpu_sh4_exec(cs
);
2753 ret
= do_syscall(env
,
2762 env
->gregs
[0] = ret
;
2764 case EXCP_INTERRUPT
:
2765 /* just indicate that signals should be handled asap */
2771 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2774 info
.si_signo
= sig
;
2776 info
.si_code
= TARGET_TRAP_BRKPT
;
2777 queue_signal(env
, info
.si_signo
, &info
);
2783 info
.si_signo
= TARGET_SIGSEGV
;
2785 info
.si_code
= TARGET_SEGV_MAPERR
;
2786 info
._sifields
._sigfault
._addr
= env
->tea
;
2787 queue_signal(env
, info
.si_signo
, &info
);
2791 printf ("Unhandled trap: 0x%x\n", trapnr
);
2792 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2795 process_pending_signals (env
);
2801 void cpu_loop(CPUCRISState
*env
)
2803 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2805 target_siginfo_t info
;
2809 trapnr
= cpu_cris_exec(cs
);
2814 info
.si_signo
= TARGET_SIGSEGV
;
2816 /* XXX: check env->error_code */
2817 info
.si_code
= TARGET_SEGV_MAPERR
;
2818 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2819 queue_signal(env
, info
.si_signo
, &info
);
2822 case EXCP_INTERRUPT
:
2823 /* just indicate that signals should be handled asap */
2826 ret
= do_syscall(env
,
2835 env
->regs
[10] = ret
;
2841 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2844 info
.si_signo
= sig
;
2846 info
.si_code
= TARGET_TRAP_BRKPT
;
2847 queue_signal(env
, info
.si_signo
, &info
);
2852 printf ("Unhandled trap: 0x%x\n", trapnr
);
2853 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2856 process_pending_signals (env
);
2861 #ifdef TARGET_MICROBLAZE
2862 void cpu_loop(CPUMBState
*env
)
2864 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2866 target_siginfo_t info
;
2870 trapnr
= cpu_mb_exec(cs
);
2875 info
.si_signo
= TARGET_SIGSEGV
;
2877 /* XXX: check env->error_code */
2878 info
.si_code
= TARGET_SEGV_MAPERR
;
2879 info
._sifields
._sigfault
._addr
= 0;
2880 queue_signal(env
, info
.si_signo
, &info
);
2883 case EXCP_INTERRUPT
:
2884 /* just indicate that signals should be handled asap */
2887 /* Return address is 4 bytes after the call. */
2889 env
->sregs
[SR_PC
] = env
->regs
[14];
2890 ret
= do_syscall(env
,
2902 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2903 if (env
->iflags
& D_FLAG
) {
2904 env
->sregs
[SR_ESR
] |= 1 << 12;
2905 env
->sregs
[SR_PC
] -= 4;
2906 /* FIXME: if branch was immed, replay the imm as well. */
2909 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2911 switch (env
->sregs
[SR_ESR
] & 31) {
2912 case ESR_EC_DIVZERO
:
2913 info
.si_signo
= TARGET_SIGFPE
;
2915 info
.si_code
= TARGET_FPE_FLTDIV
;
2916 info
._sifields
._sigfault
._addr
= 0;
2917 queue_signal(env
, info
.si_signo
, &info
);
2920 info
.si_signo
= TARGET_SIGFPE
;
2922 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2923 info
.si_code
= TARGET_FPE_FLTINV
;
2925 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2926 info
.si_code
= TARGET_FPE_FLTDIV
;
2928 info
._sifields
._sigfault
._addr
= 0;
2929 queue_signal(env
, info
.si_signo
, &info
);
2932 printf ("Unhandled hw-exception: 0x%x\n",
2933 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2934 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2943 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2946 info
.si_signo
= sig
;
2948 info
.si_code
= TARGET_TRAP_BRKPT
;
2949 queue_signal(env
, info
.si_signo
, &info
);
2954 printf ("Unhandled trap: 0x%x\n", trapnr
);
2955 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2958 process_pending_signals (env
);
2965 void cpu_loop(CPUM68KState
*env
)
2967 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2970 target_siginfo_t info
;
2971 TaskState
*ts
= cs
->opaque
;
2975 trapnr
= cpu_m68k_exec(cs
);
2980 if (ts
->sim_syscalls
) {
2982 get_user_u16(nr
, env
->pc
+ 2);
2984 do_m68k_simcall(env
, nr
);
2990 case EXCP_HALT_INSN
:
2991 /* Semihosing syscall. */
2993 do_m68k_semihosting(env
, env
->dregs
[0]);
2997 case EXCP_UNSUPPORTED
:
2999 info
.si_signo
= TARGET_SIGILL
;
3001 info
.si_code
= TARGET_ILL_ILLOPN
;
3002 info
._sifields
._sigfault
._addr
= env
->pc
;
3003 queue_signal(env
, info
.si_signo
, &info
);
3007 ts
->sim_syscalls
= 0;
3010 env
->dregs
[0] = do_syscall(env
,
3021 case EXCP_INTERRUPT
:
3022 /* just indicate that signals should be handled asap */
3026 info
.si_signo
= TARGET_SIGSEGV
;
3028 /* XXX: check env->error_code */
3029 info
.si_code
= TARGET_SEGV_MAPERR
;
3030 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3031 queue_signal(env
, info
.si_signo
, &info
);
3038 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3041 info
.si_signo
= sig
;
3043 info
.si_code
= TARGET_TRAP_BRKPT
;
3044 queue_signal(env
, info
.si_signo
, &info
);
3049 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3051 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3054 process_pending_signals(env
);
3057 #endif /* TARGET_M68K */
3060 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3062 target_ulong addr
, val
, tmp
;
3063 target_siginfo_t info
;
3066 addr
= env
->lock_addr
;
3067 tmp
= env
->lock_st_addr
;
3068 env
->lock_addr
= -1;
3069 env
->lock_st_addr
= 0;
3075 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3079 if (val
== env
->lock_value
) {
3081 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3098 info
.si_signo
= TARGET_SIGSEGV
;
3100 info
.si_code
= TARGET_SEGV_MAPERR
;
3101 info
._sifields
._sigfault
._addr
= addr
;
3102 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3105 void cpu_loop(CPUAlphaState
*env
)
3107 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3109 target_siginfo_t info
;
3114 trapnr
= cpu_alpha_exec(cs
);
3117 /* All of the traps imply a transition through PALcode, which
3118 implies an REI instruction has been executed. Which means
3119 that the intr_flag should be cleared. */
3124 fprintf(stderr
, "Reset requested. Exit\n");
3128 fprintf(stderr
, "Machine check exception. Exit\n");
3131 case EXCP_SMP_INTERRUPT
:
3132 case EXCP_CLK_INTERRUPT
:
3133 case EXCP_DEV_INTERRUPT
:
3134 fprintf(stderr
, "External interrupt. Exit\n");
3138 env
->lock_addr
= -1;
3139 info
.si_signo
= TARGET_SIGSEGV
;
3141 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3142 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3143 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3144 queue_signal(env
, info
.si_signo
, &info
);
3147 env
->lock_addr
= -1;
3148 info
.si_signo
= TARGET_SIGBUS
;
3150 info
.si_code
= TARGET_BUS_ADRALN
;
3151 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3152 queue_signal(env
, info
.si_signo
, &info
);
3156 env
->lock_addr
= -1;
3157 info
.si_signo
= TARGET_SIGILL
;
3159 info
.si_code
= TARGET_ILL_ILLOPC
;
3160 info
._sifields
._sigfault
._addr
= env
->pc
;
3161 queue_signal(env
, info
.si_signo
, &info
);
3164 env
->lock_addr
= -1;
3165 info
.si_signo
= TARGET_SIGFPE
;
3167 info
.si_code
= TARGET_FPE_FLTINV
;
3168 info
._sifields
._sigfault
._addr
= env
->pc
;
3169 queue_signal(env
, info
.si_signo
, &info
);
3172 /* No-op. Linux simply re-enables the FPU. */
3175 env
->lock_addr
= -1;
3176 switch (env
->error_code
) {
3179 info
.si_signo
= TARGET_SIGTRAP
;
3181 info
.si_code
= TARGET_TRAP_BRKPT
;
3182 info
._sifields
._sigfault
._addr
= env
->pc
;
3183 queue_signal(env
, info
.si_signo
, &info
);
3187 info
.si_signo
= TARGET_SIGTRAP
;
3190 info
._sifields
._sigfault
._addr
= env
->pc
;
3191 queue_signal(env
, info
.si_signo
, &info
);
3195 trapnr
= env
->ir
[IR_V0
];
3196 sysret
= do_syscall(env
, trapnr
,
3197 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3198 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3199 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3201 if (trapnr
== TARGET_NR_sigreturn
3202 || trapnr
== TARGET_NR_rt_sigreturn
) {
3205 /* Syscall writes 0 to V0 to bypass error check, similar
3206 to how this is handled internal to Linux kernel.
3207 (Ab)use trapnr temporarily as boolean indicating error. */
3208 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3209 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3210 env
->ir
[IR_A3
] = trapnr
;
3214 /* ??? We can probably elide the code using page_unprotect
3215 that is checking for self-modifying code. Instead we
3216 could simply call tb_flush here. Until we work out the
3217 changes required to turn off the extra write protection,
3218 this can be a no-op. */
3222 /* Handled in the translator for usermode. */
3226 /* Handled in the translator for usermode. */
3230 info
.si_signo
= TARGET_SIGFPE
;
3231 switch (env
->ir
[IR_A0
]) {
3232 case TARGET_GEN_INTOVF
:
3233 info
.si_code
= TARGET_FPE_INTOVF
;
3235 case TARGET_GEN_INTDIV
:
3236 info
.si_code
= TARGET_FPE_INTDIV
;
3238 case TARGET_GEN_FLTOVF
:
3239 info
.si_code
= TARGET_FPE_FLTOVF
;
3241 case TARGET_GEN_FLTUND
:
3242 info
.si_code
= TARGET_FPE_FLTUND
;
3244 case TARGET_GEN_FLTINV
:
3245 info
.si_code
= TARGET_FPE_FLTINV
;
3247 case TARGET_GEN_FLTINE
:
3248 info
.si_code
= TARGET_FPE_FLTRES
;
3250 case TARGET_GEN_ROPRAND
:
3254 info
.si_signo
= TARGET_SIGTRAP
;
3259 info
._sifields
._sigfault
._addr
= env
->pc
;
3260 queue_signal(env
, info
.si_signo
, &info
);
3267 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3268 if (info
.si_signo
) {
3269 env
->lock_addr
= -1;
3271 info
.si_code
= TARGET_TRAP_BRKPT
;
3272 queue_signal(env
, info
.si_signo
, &info
);
3277 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3279 case EXCP_INTERRUPT
:
3280 /* Just indicate that signals should be handled asap. */
3283 printf ("Unhandled trap: 0x%x\n", trapnr
);
3284 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3287 process_pending_signals (env
);
3290 #endif /* TARGET_ALPHA */
3293 void cpu_loop(CPUS390XState
*env
)
3295 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3297 target_siginfo_t info
;
3302 trapnr
= cpu_s390x_exec(cs
);
3305 case EXCP_INTERRUPT
:
3306 /* Just indicate that signals should be handled asap. */
3310 n
= env
->int_svc_code
;
3312 /* syscalls > 255 */
3315 env
->psw
.addr
+= env
->int_svc_ilen
;
3316 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3317 env
->regs
[4], env
->regs
[5],
3318 env
->regs
[6], env
->regs
[7], 0, 0);
3322 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3324 n
= TARGET_TRAP_BRKPT
;
3329 n
= env
->int_pgm_code
;
3332 case PGM_PRIVILEGED
:
3333 sig
= TARGET_SIGILL
;
3334 n
= TARGET_ILL_ILLOPC
;
3336 case PGM_PROTECTION
:
3337 case PGM_ADDRESSING
:
3338 sig
= TARGET_SIGSEGV
;
3339 /* XXX: check env->error_code */
3340 n
= TARGET_SEGV_MAPERR
;
3341 addr
= env
->__excp_addr
;
3344 case PGM_SPECIFICATION
:
3345 case PGM_SPECIAL_OP
:
3348 sig
= TARGET_SIGILL
;
3349 n
= TARGET_ILL_ILLOPN
;
3352 case PGM_FIXPT_OVERFLOW
:
3353 sig
= TARGET_SIGFPE
;
3354 n
= TARGET_FPE_INTOVF
;
3356 case PGM_FIXPT_DIVIDE
:
3357 sig
= TARGET_SIGFPE
;
3358 n
= TARGET_FPE_INTDIV
;
3362 n
= (env
->fpc
>> 8) & 0xff;
3364 /* compare-and-trap */
3367 /* An IEEE exception, simulated or otherwise. */
3369 n
= TARGET_FPE_FLTINV
;
3370 } else if (n
& 0x40) {
3371 n
= TARGET_FPE_FLTDIV
;
3372 } else if (n
& 0x20) {
3373 n
= TARGET_FPE_FLTOVF
;
3374 } else if (n
& 0x10) {
3375 n
= TARGET_FPE_FLTUND
;
3376 } else if (n
& 0x08) {
3377 n
= TARGET_FPE_FLTRES
;
3379 /* ??? Quantum exception; BFP, DFP error. */
3382 sig
= TARGET_SIGFPE
;
3387 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3388 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3394 addr
= env
->psw
.addr
;
3396 info
.si_signo
= sig
;
3399 info
._sifields
._sigfault
._addr
= addr
;
3400 queue_signal(env
, info
.si_signo
, &info
);
3404 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3405 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3408 process_pending_signals (env
);
3412 #endif /* TARGET_S390X */
3414 THREAD CPUState
*thread_cpu
;
3416 void task_settid(TaskState
*ts
)
3418 if (ts
->ts_tid
== 0) {
3419 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3423 void stop_all_tasks(void)
3426 * We trust that when using NPTL, start_exclusive()
3427 * handles thread stopping correctly.
3432 /* Assumes contents are already zeroed. */
3433 void init_task_state(TaskState
*ts
)
3438 ts
->first_free
= ts
->sigqueue_table
;
3439 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3440 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3442 ts
->sigqueue_table
[i
].next
= NULL
;
3445 CPUArchState
*cpu_copy(CPUArchState
*env
)
3447 CPUState
*cpu
= ENV_GET_CPU(env
);
3448 CPUState
*new_cpu
= cpu_init(cpu_model
);
3449 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3453 /* Reset non arch specific state */
3456 memcpy(new_env
, env
, sizeof(CPUArchState
));
3458 /* Clone all break/watchpoints.
3459 Note: Once we support ptrace with hw-debug register access, make sure
3460 BP_CPU break/watchpoints are handled correctly on clone. */
3461 QTAILQ_INIT(&new_cpu
->breakpoints
);
3462 QTAILQ_INIT(&new_cpu
->watchpoints
);
3463 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3464 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3466 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3467 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3473 static void handle_arg_help(const char *arg
)
3478 static void handle_arg_log(const char *arg
)
3482 mask
= qemu_str_to_log_mask(arg
);
3484 qemu_print_log_usage(stdout
);
3490 static void handle_arg_log_filename(const char *arg
)
3492 qemu_set_log_filename(arg
);
3495 static void handle_arg_set_env(const char *arg
)
3497 char *r
, *p
, *token
;
3498 r
= p
= strdup(arg
);
3499 while ((token
= strsep(&p
, ",")) != NULL
) {
3500 if (envlist_setenv(envlist
, token
) != 0) {
3507 static void handle_arg_unset_env(const char *arg
)
3509 char *r
, *p
, *token
;
3510 r
= p
= strdup(arg
);
3511 while ((token
= strsep(&p
, ",")) != NULL
) {
3512 if (envlist_unsetenv(envlist
, token
) != 0) {
3519 static void handle_arg_argv0(const char *arg
)
3521 argv0
= strdup(arg
);
3524 static void handle_arg_stack_size(const char *arg
)
3527 guest_stack_size
= strtoul(arg
, &p
, 0);
3528 if (guest_stack_size
== 0) {
3533 guest_stack_size
*= 1024 * 1024;
3534 } else if (*p
== 'k' || *p
== 'K') {
3535 guest_stack_size
*= 1024;
3539 static void handle_arg_ld_prefix(const char *arg
)
3541 interp_prefix
= strdup(arg
);
3544 static void handle_arg_pagesize(const char *arg
)
3546 qemu_host_page_size
= atoi(arg
);
3547 if (qemu_host_page_size
== 0 ||
3548 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3549 fprintf(stderr
, "page size must be a power of two\n");
3554 static void handle_arg_randseed(const char *arg
)
3556 unsigned long long seed
;
3558 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3559 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3565 static void handle_arg_gdb(const char *arg
)
3567 gdbstub_port
= atoi(arg
);
3570 static void handle_arg_uname(const char *arg
)
3572 qemu_uname_release
= strdup(arg
);
3575 static void handle_arg_cpu(const char *arg
)
3577 cpu_model
= strdup(arg
);
3578 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3579 /* XXX: implement xxx_cpu_list for targets that still miss it */
3580 #if defined(cpu_list)
3581 cpu_list(stdout
, &fprintf
);
3587 #if defined(CONFIG_USE_GUEST_BASE)
3588 static void handle_arg_guest_base(const char *arg
)
3590 guest_base
= strtol(arg
, NULL
, 0);
3591 have_guest_base
= 1;
3594 static void handle_arg_reserved_va(const char *arg
)
3598 reserved_va
= strtoul(arg
, &p
, 0);
3612 unsigned long unshifted
= reserved_va
;
3614 reserved_va
<<= shift
;
3615 if (((reserved_va
>> shift
) != unshifted
)
3616 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3617 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3620 fprintf(stderr
, "Reserved virtual address too big\n");
3625 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3631 static void handle_arg_singlestep(const char *arg
)
3636 static void handle_arg_strace(const char *arg
)
3641 static void handle_arg_version(const char *arg
)
3643 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3644 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3648 struct qemu_argument
{
3652 void (*handle_opt
)(const char *arg
);
3653 const char *example
;
3657 static const struct qemu_argument arg_table
[] = {
3658 {"h", "", false, handle_arg_help
,
3659 "", "print this help"},
3660 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3661 "port", "wait gdb connection to 'port'"},
3662 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3663 "path", "set the elf interpreter prefix to 'path'"},
3664 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3665 "size", "set the stack size to 'size' bytes"},
3666 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3667 "model", "select CPU (-cpu help for list)"},
3668 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3669 "var=value", "sets targets environment variable (see below)"},
3670 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3671 "var", "unsets targets environment variable (see below)"},
3672 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3673 "argv0", "forces target process argv[0] to be 'argv0'"},
3674 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3675 "uname", "set qemu uname release string to 'uname'"},
3676 #if defined(CONFIG_USE_GUEST_BASE)
3677 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3678 "address", "set guest_base address to 'address'"},
3679 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3680 "size", "reserve 'size' bytes for guest virtual address space"},
3682 {"d", "QEMU_LOG", true, handle_arg_log
,
3683 "item[,...]", "enable logging of specified items "
3684 "(use '-d help' for a list of items)"},
3685 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3686 "logfile", "write logs to 'logfile' (default stderr)"},
3687 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3688 "pagesize", "set the host page size to 'pagesize'"},
3689 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3690 "", "run in singlestep mode"},
3691 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3692 "", "log system calls"},
3693 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3694 "", "Seed for pseudo-random number generator"},
3695 {"version", "QEMU_VERSION", false, handle_arg_version
,
3696 "", "display version information and exit"},
3697 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3700 static void usage(void)
3702 const struct qemu_argument
*arginfo
;
3706 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3707 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3709 "Options and associated environment variables:\n"
3712 /* Calculate column widths. We must always have at least enough space
3713 * for the column header.
3715 maxarglen
= strlen("Argument");
3716 maxenvlen
= strlen("Env-variable");
3718 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3719 int arglen
= strlen(arginfo
->argv
);
3720 if (arginfo
->has_arg
) {
3721 arglen
+= strlen(arginfo
->example
) + 1;
3723 if (strlen(arginfo
->env
) > maxenvlen
) {
3724 maxenvlen
= strlen(arginfo
->env
);
3726 if (arglen
> maxarglen
) {
3731 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3732 maxenvlen
, "Env-variable");
3734 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3735 if (arginfo
->has_arg
) {
3736 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3737 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3738 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3740 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3741 maxenvlen
, arginfo
->env
,
3748 "QEMU_LD_PREFIX = %s\n"
3749 "QEMU_STACK_SIZE = %ld byte\n",
3754 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3755 "QEMU_UNSET_ENV environment variables to set and unset\n"
3756 "environment variables for the target process.\n"
3757 "It is possible to provide several variables by separating them\n"
3758 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3759 "provide the -E and -U options multiple times.\n"
3760 "The following lines are equivalent:\n"
3761 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3762 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3763 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3764 "Note that if you provide several changes to a single variable\n"
3765 "the last change will stay in effect.\n");
3770 static int parse_args(int argc
, char **argv
)
3774 const struct qemu_argument
*arginfo
;
3776 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3777 if (arginfo
->env
== NULL
) {
3781 r
= getenv(arginfo
->env
);
3783 arginfo
->handle_opt(r
);
3789 if (optind
>= argc
) {
3798 if (!strcmp(r
, "-")) {
3802 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3803 if (!strcmp(r
, arginfo
->argv
)) {
3804 if (arginfo
->has_arg
) {
3805 if (optind
>= argc
) {
3808 arginfo
->handle_opt(argv
[optind
]);
3811 arginfo
->handle_opt(NULL
);
3817 /* no option matched the current argv */
3818 if (arginfo
->handle_opt
== NULL
) {
3823 if (optind
>= argc
) {
3827 filename
= argv
[optind
];
3828 exec_path
= argv
[optind
];
3833 int main(int argc
, char **argv
, char **envp
)
3835 struct target_pt_regs regs1
, *regs
= ®s1
;
3836 struct image_info info1
, *info
= &info1
;
3837 struct linux_binprm bprm
;
3842 char **target_environ
, **wrk
;
3849 module_call_init(MODULE_INIT_QOM
);
3851 if ((envlist
= envlist_create()) == NULL
) {
3852 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3856 /* add current environment into the list */
3857 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3858 (void) envlist_setenv(envlist
, *wrk
);
3861 /* Read the stack limit from the kernel. If it's "unlimited",
3862 then we can do little else besides use the default. */
3865 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3866 && lim
.rlim_cur
!= RLIM_INFINITY
3867 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3868 guest_stack_size
= lim
.rlim_cur
;
3873 #if defined(cpudef_setup)
3874 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3879 optind
= parse_args(argc
, argv
);
3882 memset(regs
, 0, sizeof(struct target_pt_regs
));
3884 /* Zero out image_info */
3885 memset(info
, 0, sizeof(struct image_info
));
3887 memset(&bprm
, 0, sizeof (bprm
));
3889 /* Scan interp_prefix dir for replacement files. */
3890 init_paths(interp_prefix
);
3892 init_qemu_uname_release();
3894 if (cpu_model
== NULL
) {
3895 #if defined(TARGET_I386)
3896 #ifdef TARGET_X86_64
3897 cpu_model
= "qemu64";
3899 cpu_model
= "qemu32";
3901 #elif defined(TARGET_ARM)
3903 #elif defined(TARGET_UNICORE32)
3905 #elif defined(TARGET_M68K)
3907 #elif defined(TARGET_SPARC)
3908 #ifdef TARGET_SPARC64
3909 cpu_model
= "TI UltraSparc II";
3911 cpu_model
= "Fujitsu MB86904";
3913 #elif defined(TARGET_MIPS)
3914 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3919 #elif defined TARGET_OPENRISC
3920 cpu_model
= "or1200";
3921 #elif defined(TARGET_PPC)
3922 # ifdef TARGET_PPC64
3923 cpu_model
= "POWER7";
3927 #elif defined TARGET_SH4
3928 cpu_model
= TYPE_SH7785_CPU
;
3934 /* NOTE: we need to init the CPU at this stage to get
3935 qemu_host_page_size */
3936 cpu
= cpu_init(cpu_model
);
3938 fprintf(stderr
, "Unable to find CPU definition\n");
3946 if (getenv("QEMU_STRACE")) {
3950 if (getenv("QEMU_RAND_SEED")) {
3951 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
3954 target_environ
= envlist_to_environ(envlist
, NULL
);
3955 envlist_free(envlist
);
3957 #if defined(CONFIG_USE_GUEST_BASE)
3959 * Now that page sizes are configured in cpu_init() we can do
3960 * proper page alignment for guest_base.
3962 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3964 if (reserved_va
|| have_guest_base
) {
3965 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3967 if (guest_base
== (unsigned long)-1) {
3968 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3969 "space for use as guest address space (check your virtual "
3970 "memory ulimit setting or reserve less using -R option)\n",
3976 mmap_next_start
= reserved_va
;
3979 #endif /* CONFIG_USE_GUEST_BASE */
3982 * Read in mmap_min_addr kernel parameter. This value is used
3983 * When loading the ELF image to determine whether guest_base
3984 * is needed. It is also used in mmap_find_vma.
3989 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3991 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3992 mmap_min_addr
= tmp
;
3993 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4000 * Prepare copy of argv vector for target.
4002 target_argc
= argc
- optind
;
4003 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4004 if (target_argv
== NULL
) {
4005 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4010 * If argv0 is specified (using '-0' switch) we replace
4011 * argv[0] pointer with the given one.
4014 if (argv0
!= NULL
) {
4015 target_argv
[i
++] = strdup(argv0
);
4017 for (; i
< target_argc
; i
++) {
4018 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4020 target_argv
[target_argc
] = NULL
;
4022 ts
= g_malloc0 (sizeof(TaskState
));
4023 init_task_state(ts
);
4024 /* build Task State */
4030 execfd
= qemu_getauxval(AT_EXECFD
);
4032 execfd
= open(filename
, O_RDONLY
);
4034 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4039 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4042 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4046 for (wrk
= target_environ
; *wrk
; wrk
++) {
4050 free(target_environ
);
4052 if (qemu_log_enabled()) {
4053 #if defined(CONFIG_USE_GUEST_BASE)
4054 qemu_log("guest_base 0x%lx\n", guest_base
);
4058 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4059 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4060 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4062 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4064 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4065 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4067 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4068 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4071 target_set_brk(info
->brk
);
4075 #if defined(CONFIG_USE_GUEST_BASE)
4076 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4077 generating the prologue until now so that the prologue can take
4078 the real value of GUEST_BASE into account. */
4079 tcg_prologue_init(&tcg_ctx
);
4082 #if defined(TARGET_I386)
4083 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4084 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4085 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4086 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4087 env
->hflags
|= HF_OSFXSR_MASK
;
4089 #ifndef TARGET_ABI32
4090 /* enable 64 bit mode if possible */
4091 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4092 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4095 env
->cr
[4] |= CR4_PAE_MASK
;
4096 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4097 env
->hflags
|= HF_LMA_MASK
;
4100 /* flags setup : we activate the IRQs by default as in user mode */
4101 env
->eflags
|= IF_MASK
;
4103 /* linux register setup */
4104 #ifndef TARGET_ABI32
4105 env
->regs
[R_EAX
] = regs
->rax
;
4106 env
->regs
[R_EBX
] = regs
->rbx
;
4107 env
->regs
[R_ECX
] = regs
->rcx
;
4108 env
->regs
[R_EDX
] = regs
->rdx
;
4109 env
->regs
[R_ESI
] = regs
->rsi
;
4110 env
->regs
[R_EDI
] = regs
->rdi
;
4111 env
->regs
[R_EBP
] = regs
->rbp
;
4112 env
->regs
[R_ESP
] = regs
->rsp
;
4113 env
->eip
= regs
->rip
;
4115 env
->regs
[R_EAX
] = regs
->eax
;
4116 env
->regs
[R_EBX
] = regs
->ebx
;
4117 env
->regs
[R_ECX
] = regs
->ecx
;
4118 env
->regs
[R_EDX
] = regs
->edx
;
4119 env
->regs
[R_ESI
] = regs
->esi
;
4120 env
->regs
[R_EDI
] = regs
->edi
;
4121 env
->regs
[R_EBP
] = regs
->ebp
;
4122 env
->regs
[R_ESP
] = regs
->esp
;
4123 env
->eip
= regs
->eip
;
4126 /* linux interrupt setup */
4127 #ifndef TARGET_ABI32
4128 env
->idt
.limit
= 511;
4130 env
->idt
.limit
= 255;
4132 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4133 PROT_READ
|PROT_WRITE
,
4134 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4135 idt_table
= g2h(env
->idt
.base
);
4158 /* linux segment setup */
4160 uint64_t *gdt_table
;
4161 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4162 PROT_READ
|PROT_WRITE
,
4163 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4164 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4165 gdt_table
= g2h(env
->gdt
.base
);
4167 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4168 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4169 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4171 /* 64 bit code segment */
4172 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4173 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4175 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4177 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4178 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4179 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4181 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4182 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4184 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4185 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4186 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4187 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4188 /* This hack makes Wine work... */
4189 env
->segs
[R_FS
].selector
= 0;
4191 cpu_x86_load_seg(env
, R_DS
, 0);
4192 cpu_x86_load_seg(env
, R_ES
, 0);
4193 cpu_x86_load_seg(env
, R_FS
, 0);
4194 cpu_x86_load_seg(env
, R_GS
, 0);
4196 #elif defined(TARGET_AARCH64)
4200 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4202 "The selected ARM CPU does not support 64 bit mode\n");
4206 for (i
= 0; i
< 31; i
++) {
4207 env
->xregs
[i
] = regs
->regs
[i
];
4210 env
->xregs
[31] = regs
->sp
;
4212 #elif defined(TARGET_ARM)
4215 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4216 for(i
= 0; i
< 16; i
++) {
4217 env
->regs
[i
] = regs
->uregs
[i
];
4220 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4221 && (info
->elf_flags
& EF_ARM_BE8
)) {
4222 env
->bswap_code
= 1;
4225 #elif defined(TARGET_UNICORE32)
4228 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4229 for (i
= 0; i
< 32; i
++) {
4230 env
->regs
[i
] = regs
->uregs
[i
];
4233 #elif defined(TARGET_SPARC)
4237 env
->npc
= regs
->npc
;
4239 for(i
= 0; i
< 8; i
++)
4240 env
->gregs
[i
] = regs
->u_regs
[i
];
4241 for(i
= 0; i
< 8; i
++)
4242 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4244 #elif defined(TARGET_PPC)
4248 #if defined(TARGET_PPC64)
4249 #if defined(TARGET_ABI32)
4250 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4252 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4255 env
->nip
= regs
->nip
;
4256 for(i
= 0; i
< 32; i
++) {
4257 env
->gpr
[i
] = regs
->gpr
[i
];
4260 #elif defined(TARGET_M68K)
4263 env
->dregs
[0] = regs
->d0
;
4264 env
->dregs
[1] = regs
->d1
;
4265 env
->dregs
[2] = regs
->d2
;
4266 env
->dregs
[3] = regs
->d3
;
4267 env
->dregs
[4] = regs
->d4
;
4268 env
->dregs
[5] = regs
->d5
;
4269 env
->dregs
[6] = regs
->d6
;
4270 env
->dregs
[7] = regs
->d7
;
4271 env
->aregs
[0] = regs
->a0
;
4272 env
->aregs
[1] = regs
->a1
;
4273 env
->aregs
[2] = regs
->a2
;
4274 env
->aregs
[3] = regs
->a3
;
4275 env
->aregs
[4] = regs
->a4
;
4276 env
->aregs
[5] = regs
->a5
;
4277 env
->aregs
[6] = regs
->a6
;
4278 env
->aregs
[7] = regs
->usp
;
4280 ts
->sim_syscalls
= 1;
4282 #elif defined(TARGET_MICROBLAZE)
4284 env
->regs
[0] = regs
->r0
;
4285 env
->regs
[1] = regs
->r1
;
4286 env
->regs
[2] = regs
->r2
;
4287 env
->regs
[3] = regs
->r3
;
4288 env
->regs
[4] = regs
->r4
;
4289 env
->regs
[5] = regs
->r5
;
4290 env
->regs
[6] = regs
->r6
;
4291 env
->regs
[7] = regs
->r7
;
4292 env
->regs
[8] = regs
->r8
;
4293 env
->regs
[9] = regs
->r9
;
4294 env
->regs
[10] = regs
->r10
;
4295 env
->regs
[11] = regs
->r11
;
4296 env
->regs
[12] = regs
->r12
;
4297 env
->regs
[13] = regs
->r13
;
4298 env
->regs
[14] = regs
->r14
;
4299 env
->regs
[15] = regs
->r15
;
4300 env
->regs
[16] = regs
->r16
;
4301 env
->regs
[17] = regs
->r17
;
4302 env
->regs
[18] = regs
->r18
;
4303 env
->regs
[19] = regs
->r19
;
4304 env
->regs
[20] = regs
->r20
;
4305 env
->regs
[21] = regs
->r21
;
4306 env
->regs
[22] = regs
->r22
;
4307 env
->regs
[23] = regs
->r23
;
4308 env
->regs
[24] = regs
->r24
;
4309 env
->regs
[25] = regs
->r25
;
4310 env
->regs
[26] = regs
->r26
;
4311 env
->regs
[27] = regs
->r27
;
4312 env
->regs
[28] = regs
->r28
;
4313 env
->regs
[29] = regs
->r29
;
4314 env
->regs
[30] = regs
->r30
;
4315 env
->regs
[31] = regs
->r31
;
4316 env
->sregs
[SR_PC
] = regs
->pc
;
4318 #elif defined(TARGET_MIPS)
4322 for(i
= 0; i
< 32; i
++) {
4323 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4325 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4326 if (regs
->cp0_epc
& 1) {
4327 env
->hflags
|= MIPS_HFLAG_M16
;
4330 #elif defined(TARGET_OPENRISC)
4334 for (i
= 0; i
< 32; i
++) {
4335 env
->gpr
[i
] = regs
->gpr
[i
];
4341 #elif defined(TARGET_SH4)
4345 for(i
= 0; i
< 16; i
++) {
4346 env
->gregs
[i
] = regs
->regs
[i
];
4350 #elif defined(TARGET_ALPHA)
4354 for(i
= 0; i
< 28; i
++) {
4355 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4357 env
->ir
[IR_SP
] = regs
->usp
;
4360 #elif defined(TARGET_CRIS)
4362 env
->regs
[0] = regs
->r0
;
4363 env
->regs
[1] = regs
->r1
;
4364 env
->regs
[2] = regs
->r2
;
4365 env
->regs
[3] = regs
->r3
;
4366 env
->regs
[4] = regs
->r4
;
4367 env
->regs
[5] = regs
->r5
;
4368 env
->regs
[6] = regs
->r6
;
4369 env
->regs
[7] = regs
->r7
;
4370 env
->regs
[8] = regs
->r8
;
4371 env
->regs
[9] = regs
->r9
;
4372 env
->regs
[10] = regs
->r10
;
4373 env
->regs
[11] = regs
->r11
;
4374 env
->regs
[12] = regs
->r12
;
4375 env
->regs
[13] = regs
->r13
;
4376 env
->regs
[14] = info
->start_stack
;
4377 env
->regs
[15] = regs
->acr
;
4378 env
->pc
= regs
->erp
;
4380 #elif defined(TARGET_S390X)
4383 for (i
= 0; i
< 16; i
++) {
4384 env
->regs
[i
] = regs
->gprs
[i
];
4386 env
->psw
.mask
= regs
->psw
.mask
;
4387 env
->psw
.addr
= regs
->psw
.addr
;
4390 #error unsupported target CPU
4393 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4394 ts
->stack_base
= info
->start_stack
;
4395 ts
->heap_base
= info
->brk
;
4396 /* This will be filled in on the first SYS_HEAPINFO call. */
4401 if (gdbserver_start(gdbstub_port
) < 0) {
4402 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4406 gdb_handlesig(cpu
, 0);