4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
44 static const char *cpu_model
;
45 unsigned long mmap_min_addr
;
46 #if defined(CONFIG_USE_GUEST_BASE)
47 unsigned long guest_base
;
49 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
51 * When running 32-on-64 we should make sure we can fit all of the possible
52 * guest address space into a contiguous chunk of virtual host memory.
54 * This way we will never overlap with our own libraries or binaries or stack
55 * or anything else that QEMU maps.
58 /* MIPS only supports 31 bits of virtual address space for user space */
59 unsigned long reserved_va
= 0x77000000;
61 unsigned long reserved_va
= 0xf7000000;
64 unsigned long reserved_va
;
68 static void usage(void);
70 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
71 const char *qemu_uname_release
;
73 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
74 we allocate a bigger stack. Need a better solution, for example
75 by remapping the process stack directly at the right place */
76 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
78 void gemu_log(const char *fmt
, ...)
83 vfprintf(stderr
, fmt
, ap
);
87 #if defined(TARGET_I386)
88 int cpu_get_pic_interrupt(CPUX86State
*env
)
94 /***********************************************************/
95 /* Helper routines for implementing atomic operations. */
97 /* To implement exclusive operations we force all cpus to syncronise.
98 We don't require a full sync, only that no cpus are executing guest code.
99 The alternative is to map target atomic ops onto host equivalents,
100 which requires quite a lot of per host/target work. */
101 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
102 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
104 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
105 static int pending_cpus
;
107 /* Make sure everything is in a consistent state for calling fork(). */
108 void fork_start(void)
110 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
111 pthread_mutex_lock(&exclusive_lock
);
115 void fork_end(int child
)
117 mmap_fork_end(child
);
119 CPUState
*cpu
, *next_cpu
;
120 /* Child processes created by fork() only have a single thread.
121 Discard information about the parent threads. */
122 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
123 if (cpu
!= thread_cpu
) {
124 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
128 pthread_mutex_init(&exclusive_lock
, NULL
);
129 pthread_mutex_init(&cpu_list_mutex
, NULL
);
130 pthread_cond_init(&exclusive_cond
, NULL
);
131 pthread_cond_init(&exclusive_resume
, NULL
);
132 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
133 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
135 pthread_mutex_unlock(&exclusive_lock
);
136 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
140 /* Wait for pending exclusive operations to complete. The exclusive lock
142 static inline void exclusive_idle(void)
144 while (pending_cpus
) {
145 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
149 /* Start an exclusive operation.
150 Must only be called from outside cpu_arm_exec. */
151 static inline void start_exclusive(void)
155 pthread_mutex_lock(&exclusive_lock
);
159 /* Make all other cpus stop executing. */
160 CPU_FOREACH(other_cpu
) {
161 if (other_cpu
->running
) {
166 if (pending_cpus
> 1) {
167 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
171 /* Finish an exclusive operation. */
172 static inline void __attribute__((unused
)) end_exclusive(void)
175 pthread_cond_broadcast(&exclusive_resume
);
176 pthread_mutex_unlock(&exclusive_lock
);
179 /* Wait for exclusive ops to finish, and begin cpu execution. */
180 static inline void cpu_exec_start(CPUState
*cpu
)
182 pthread_mutex_lock(&exclusive_lock
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Mark cpu as not executing, and release pending exclusive ops. */
189 static inline void cpu_exec_end(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
192 cpu
->running
= false;
193 if (pending_cpus
> 1) {
195 if (pending_cpus
== 1) {
196 pthread_cond_signal(&exclusive_cond
);
200 pthread_mutex_unlock(&exclusive_lock
);
203 void cpu_list_lock(void)
205 pthread_mutex_lock(&cpu_list_mutex
);
208 void cpu_list_unlock(void)
210 pthread_mutex_unlock(&cpu_list_mutex
);
215 /***********************************************************/
216 /* CPUX86 core interface */
218 void cpu_smm_update(CPUX86State
*env
)
222 uint64_t cpu_get_tsc(CPUX86State
*env
)
224 return cpu_get_real_ticks();
227 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
232 e1
= (addr
<< 16) | (limit
& 0xffff);
233 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
240 static uint64_t *idt_table
;
242 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
243 uint64_t addr
, unsigned int sel
)
246 e1
= (addr
& 0xffff) | (sel
<< 16);
247 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
251 p
[2] = tswap32(addr
>> 32);
254 /* only dpl matters as we do only user space emulation */
255 static void set_idt(int n
, unsigned int dpl
)
257 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
260 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
261 uint32_t addr
, unsigned int sel
)
264 e1
= (addr
& 0xffff) | (sel
<< 16);
265 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
271 /* only dpl matters as we do only user space emulation */
272 static void set_idt(int n
, unsigned int dpl
)
274 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
278 void cpu_loop(CPUX86State
*env
)
280 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
283 target_siginfo_t info
;
287 trapnr
= cpu_x86_exec(env
);
291 /* linux syscall from int $0x80 */
292 env
->regs
[R_EAX
] = do_syscall(env
,
304 /* linux syscall from syscall instruction */
305 env
->regs
[R_EAX
] = do_syscall(env
,
318 info
.si_signo
= TARGET_SIGBUS
;
320 info
.si_code
= TARGET_SI_KERNEL
;
321 info
._sifields
._sigfault
._addr
= 0;
322 queue_signal(env
, info
.si_signo
, &info
);
325 /* XXX: potential problem if ABI32 */
326 #ifndef TARGET_X86_64
327 if (env
->eflags
& VM_MASK
) {
328 handle_vm86_fault(env
);
332 info
.si_signo
= TARGET_SIGSEGV
;
334 info
.si_code
= TARGET_SI_KERNEL
;
335 info
._sifields
._sigfault
._addr
= 0;
336 queue_signal(env
, info
.si_signo
, &info
);
340 info
.si_signo
= TARGET_SIGSEGV
;
342 if (!(env
->error_code
& 1))
343 info
.si_code
= TARGET_SEGV_MAPERR
;
345 info
.si_code
= TARGET_SEGV_ACCERR
;
346 info
._sifields
._sigfault
._addr
= env
->cr
[2];
347 queue_signal(env
, info
.si_signo
, &info
);
350 #ifndef TARGET_X86_64
351 if (env
->eflags
& VM_MASK
) {
352 handle_vm86_trap(env
, trapnr
);
356 /* division by zero */
357 info
.si_signo
= TARGET_SIGFPE
;
359 info
.si_code
= TARGET_FPE_INTDIV
;
360 info
._sifields
._sigfault
._addr
= env
->eip
;
361 queue_signal(env
, info
.si_signo
, &info
);
366 #ifndef TARGET_X86_64
367 if (env
->eflags
& VM_MASK
) {
368 handle_vm86_trap(env
, trapnr
);
372 info
.si_signo
= TARGET_SIGTRAP
;
374 if (trapnr
== EXCP01_DB
) {
375 info
.si_code
= TARGET_TRAP_BRKPT
;
376 info
._sifields
._sigfault
._addr
= env
->eip
;
378 info
.si_code
= TARGET_SI_KERNEL
;
379 info
._sifields
._sigfault
._addr
= 0;
381 queue_signal(env
, info
.si_signo
, &info
);
386 #ifndef TARGET_X86_64
387 if (env
->eflags
& VM_MASK
) {
388 handle_vm86_trap(env
, trapnr
);
392 info
.si_signo
= TARGET_SIGSEGV
;
394 info
.si_code
= TARGET_SI_KERNEL
;
395 info
._sifields
._sigfault
._addr
= 0;
396 queue_signal(env
, info
.si_signo
, &info
);
400 info
.si_signo
= TARGET_SIGILL
;
402 info
.si_code
= TARGET_ILL_ILLOPN
;
403 info
._sifields
._sigfault
._addr
= env
->eip
;
404 queue_signal(env
, info
.si_signo
, &info
);
407 /* just indicate that signals should be handled asap */
413 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
418 info
.si_code
= TARGET_TRAP_BRKPT
;
419 queue_signal(env
, info
.si_signo
, &info
);
424 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
425 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
429 process_pending_signals(env
);
436 #define get_user_code_u32(x, gaddr, doswap) \
437 ({ abi_long __r = get_user_u32((x), (gaddr)); \
438 if (!__r && (doswap)) { \
444 #define get_user_code_u16(x, gaddr, doswap) \
445 ({ abi_long __r = get_user_u16((x), (gaddr)); \
446 if (!__r && (doswap)) { \
453 /* Commpage handling -- there is no commpage for AArch64 */
456 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
458 * r0 = pointer to oldval
459 * r1 = pointer to newval
460 * r2 = pointer to target value
463 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
464 * C set if *ptr was changed, clear if no exchange happened
466 * Note segv's in kernel helpers are a bit tricky, we can set the
467 * data address sensibly but the PC address is just the entry point.
469 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
471 uint64_t oldval
, newval
, val
;
473 target_siginfo_t info
;
475 /* Based on the 32 bit code in do_kernel_trap */
477 /* XXX: This only works between threads, not between processes.
478 It's probably possible to implement this with native host
479 operations. However things like ldrex/strex are much harder so
480 there's not much point trying. */
482 cpsr
= cpsr_read(env
);
485 if (get_user_u64(oldval
, env
->regs
[0])) {
486 env
->exception
.vaddress
= env
->regs
[0];
490 if (get_user_u64(newval
, env
->regs
[1])) {
491 env
->exception
.vaddress
= env
->regs
[1];
495 if (get_user_u64(val
, addr
)) {
496 env
->exception
.vaddress
= addr
;
503 if (put_user_u64(val
, addr
)) {
504 env
->exception
.vaddress
= addr
;
514 cpsr_write(env
, cpsr
, CPSR_C
);
520 /* We get the PC of the entry address - which is as good as anything,
521 on a real kernel what you get depends on which mode it uses. */
522 info
.si_signo
= TARGET_SIGSEGV
;
524 /* XXX: check env->error_code */
525 info
.si_code
= TARGET_SEGV_MAPERR
;
526 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
527 queue_signal(env
, info
.si_signo
, &info
);
530 /* Handle a jump to the kernel code page. */
532 do_kernel_trap(CPUARMState
*env
)
538 switch (env
->regs
[15]) {
539 case 0xffff0fa0: /* __kernel_memory_barrier */
540 /* ??? No-op. Will need to do better for SMP. */
542 case 0xffff0fc0: /* __kernel_cmpxchg */
543 /* XXX: This only works between threads, not between processes.
544 It's probably possible to implement this with native host
545 operations. However things like ldrex/strex are much harder so
546 there's not much point trying. */
548 cpsr
= cpsr_read(env
);
550 /* FIXME: This should SEGV if the access fails. */
551 if (get_user_u32(val
, addr
))
553 if (val
== env
->regs
[0]) {
555 /* FIXME: Check for segfaults. */
556 put_user_u32(val
, addr
);
563 cpsr_write(env
, cpsr
, CPSR_C
);
566 case 0xffff0fe0: /* __kernel_get_tls */
567 env
->regs
[0] = cpu_get_tls(env
);
569 case 0xffff0f60: /* __kernel_cmpxchg64 */
570 arm_kernel_cmpxchg64_helper(env
);
576 /* Jump back to the caller. */
577 addr
= env
->regs
[14];
582 env
->regs
[15] = addr
;
587 /* Store exclusive handling for AArch32 */
588 static int do_strex(CPUARMState
*env
)
596 if (env
->exclusive_addr
!= env
->exclusive_test
) {
599 /* We know we're always AArch32 so the address is in uint32_t range
600 * unless it was the -1 exclusive-monitor-lost value (which won't
601 * match exclusive_test above).
603 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
604 addr
= env
->exclusive_addr
;
605 size
= env
->exclusive_info
& 0xf;
608 segv
= get_user_u8(val
, addr
);
611 segv
= get_user_u16(val
, addr
);
615 segv
= get_user_u32(val
, addr
);
621 env
->exception
.vaddress
= addr
;
626 segv
= get_user_u32(valhi
, addr
+ 4);
628 env
->exception
.vaddress
= addr
+ 4;
631 val
= deposit64(val
, 32, 32, valhi
);
633 if (val
!= env
->exclusive_val
) {
637 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
640 segv
= put_user_u8(val
, addr
);
643 segv
= put_user_u16(val
, addr
);
647 segv
= put_user_u32(val
, addr
);
651 env
->exception
.vaddress
= addr
;
655 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
656 segv
= put_user_u32(val
, addr
+ 4);
658 env
->exception
.vaddress
= addr
+ 4;
665 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
671 void cpu_loop(CPUARMState
*env
)
673 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
675 unsigned int n
, insn
;
676 target_siginfo_t info
;
681 trapnr
= cpu_arm_exec(env
);
686 TaskState
*ts
= cs
->opaque
;
690 /* we handle the FPU emulation here, as Linux */
691 /* we get the opcode */
692 /* FIXME - what to do if get_user() fails? */
693 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
695 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
696 if (rc
== 0) { /* illegal instruction */
697 info
.si_signo
= TARGET_SIGILL
;
699 info
.si_code
= TARGET_ILL_ILLOPN
;
700 info
._sifields
._sigfault
._addr
= env
->regs
[15];
701 queue_signal(env
, info
.si_signo
, &info
);
702 } else if (rc
< 0) { /* FP exception */
705 /* translate softfloat flags to FPSR flags */
706 if (-rc
& float_flag_invalid
)
708 if (-rc
& float_flag_divbyzero
)
710 if (-rc
& float_flag_overflow
)
712 if (-rc
& float_flag_underflow
)
714 if (-rc
& float_flag_inexact
)
717 FPSR fpsr
= ts
->fpa
.fpsr
;
718 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
720 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
721 info
.si_signo
= TARGET_SIGFPE
;
724 /* ordered by priority, least first */
725 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
726 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
727 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
728 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
729 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
731 info
._sifields
._sigfault
._addr
= env
->regs
[15];
732 queue_signal(env
, info
.si_signo
, &info
);
737 /* accumulate unenabled exceptions */
738 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
740 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
742 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
744 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
746 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
749 } else { /* everything OK */
760 if (trapnr
== EXCP_BKPT
) {
762 /* FIXME - what to do if get_user() fails? */
763 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
767 /* FIXME - what to do if get_user() fails? */
768 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
769 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
774 /* FIXME - what to do if get_user() fails? */
775 get_user_code_u16(insn
, env
->regs
[15] - 2,
779 /* FIXME - what to do if get_user() fails? */
780 get_user_code_u32(insn
, env
->regs
[15] - 4,
786 if (n
== ARM_NR_cacheflush
) {
788 } else if (n
== ARM_NR_semihosting
789 || n
== ARM_NR_thumb_semihosting
) {
790 env
->regs
[0] = do_arm_semihosting (env
);
791 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
793 if (env
->thumb
|| n
== 0) {
796 n
-= ARM_SYSCALL_BASE
;
799 if ( n
> ARM_NR_BASE
) {
801 case ARM_NR_cacheflush
:
805 cpu_set_tls(env
, env
->regs
[0]);
808 case ARM_NR_breakpoint
:
809 env
->regs
[15] -= env
->thumb
? 2 : 4;
812 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
814 env
->regs
[0] = -TARGET_ENOSYS
;
818 env
->regs
[0] = do_syscall(env
,
834 /* just indicate that signals should be handled asap */
837 if (!do_strex(env
)) {
840 /* fall through for segv */
841 case EXCP_PREFETCH_ABORT
:
842 case EXCP_DATA_ABORT
:
843 addr
= env
->exception
.vaddress
;
845 info
.si_signo
= TARGET_SIGSEGV
;
847 /* XXX: check env->error_code */
848 info
.si_code
= TARGET_SEGV_MAPERR
;
849 info
._sifields
._sigfault
._addr
= addr
;
850 queue_signal(env
, info
.si_signo
, &info
);
858 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
863 info
.si_code
= TARGET_TRAP_BRKPT
;
864 queue_signal(env
, info
.si_signo
, &info
);
868 case EXCP_KERNEL_TRAP
:
869 if (do_kernel_trap(env
))
874 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
876 cpu_dump_state(cs
, stderr
, fprintf
, 0);
879 process_pending_signals(env
);
886 * Handle AArch64 store-release exclusive
888 * rs = gets the status result of store exclusive
889 * rt = is the register that is stored
890 * rt2 = is the second register store (in STP)
893 static int do_strex_a64(CPUARMState
*env
)
904 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
905 size
= extract32(env
->exclusive_info
, 0, 2);
906 is_pair
= extract32(env
->exclusive_info
, 2, 1);
907 rs
= extract32(env
->exclusive_info
, 4, 5);
908 rt
= extract32(env
->exclusive_info
, 9, 5);
909 rt2
= extract32(env
->exclusive_info
, 14, 5);
911 addr
= env
->exclusive_addr
;
913 if (addr
!= env
->exclusive_test
) {
919 segv
= get_user_u8(val
, addr
);
922 segv
= get_user_u16(val
, addr
);
925 segv
= get_user_u32(val
, addr
);
928 segv
= get_user_u64(val
, addr
);
934 env
->exception
.vaddress
= addr
;
937 if (val
!= env
->exclusive_val
) {
942 segv
= get_user_u32(val
, addr
+ 4);
944 segv
= get_user_u64(val
, addr
+ 8);
947 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
950 if (val
!= env
->exclusive_high
) {
954 /* handle the zero register */
955 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
958 segv
= put_user_u8(val
, addr
);
961 segv
= put_user_u16(val
, addr
);
964 segv
= put_user_u32(val
, addr
);
967 segv
= put_user_u64(val
, addr
);
974 /* handle the zero register */
975 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
977 segv
= put_user_u32(val
, addr
+ 4);
979 segv
= put_user_u64(val
, addr
+ 8);
982 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
989 /* rs == 31 encodes a write to the ZR, thus throwing away
990 * the status return. This is rather silly but valid.
996 /* instruction faulted, PC does not advance */
997 /* either way a strex releases any exclusive lock we have */
998 env
->exclusive_addr
= -1;
1003 /* AArch64 main loop */
1004 void cpu_loop(CPUARMState
*env
)
1006 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1008 target_siginfo_t info
;
1012 trapnr
= cpu_arm_exec(env
);
1017 env
->xregs
[0] = do_syscall(env
,
1027 case EXCP_INTERRUPT
:
1028 /* just indicate that signals should be handled asap */
1031 info
.si_signo
= TARGET_SIGILL
;
1033 info
.si_code
= TARGET_ILL_ILLOPN
;
1034 info
._sifields
._sigfault
._addr
= env
->pc
;
1035 queue_signal(env
, info
.si_signo
, &info
);
1038 if (!do_strex_a64(env
)) {
1041 /* fall through for segv */
1042 case EXCP_PREFETCH_ABORT
:
1043 case EXCP_DATA_ABORT
:
1044 info
.si_signo
= TARGET_SIGSEGV
;
1046 /* XXX: check env->error_code */
1047 info
.si_code
= TARGET_SEGV_MAPERR
;
1048 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1049 queue_signal(env
, info
.si_signo
, &info
);
1053 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1055 info
.si_signo
= sig
;
1057 info
.si_code
= TARGET_TRAP_BRKPT
;
1058 queue_signal(env
, info
.si_signo
, &info
);
1062 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1064 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1067 process_pending_signals(env
);
1068 /* Exception return on AArch64 always clears the exclusive monitor,
1069 * so any return to running guest code implies this.
1070 * A strex (successful or otherwise) also clears the monitor, so
1071 * we don't need to specialcase EXCP_STREX.
1073 env
->exclusive_addr
= -1;
1076 #endif /* ndef TARGET_ABI32 */
1080 #ifdef TARGET_UNICORE32
1082 void cpu_loop(CPUUniCore32State
*env
)
1084 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1086 unsigned int n
, insn
;
1087 target_siginfo_t info
;
1091 trapnr
= uc32_cpu_exec(env
);
1094 case UC32_EXCP_PRIV
:
1097 get_user_u32(insn
, env
->regs
[31] - 4);
1098 n
= insn
& 0xffffff;
1100 if (n
>= UC32_SYSCALL_BASE
) {
1102 n
-= UC32_SYSCALL_BASE
;
1103 if (n
== UC32_SYSCALL_NR_set_tls
) {
1104 cpu_set_tls(env
, env
->regs
[0]);
1107 env
->regs
[0] = do_syscall(env
,
1122 case UC32_EXCP_DTRAP
:
1123 case UC32_EXCP_ITRAP
:
1124 info
.si_signo
= TARGET_SIGSEGV
;
1126 /* XXX: check env->error_code */
1127 info
.si_code
= TARGET_SEGV_MAPERR
;
1128 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1129 queue_signal(env
, info
.si_signo
, &info
);
1131 case EXCP_INTERRUPT
:
1132 /* just indicate that signals should be handled asap */
1138 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1140 info
.si_signo
= sig
;
1142 info
.si_code
= TARGET_TRAP_BRKPT
;
1143 queue_signal(env
, info
.si_signo
, &info
);
1150 process_pending_signals(env
);
1154 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1155 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1161 #define SPARC64_STACK_BIAS 2047
1165 /* WARNING: dealing with register windows _is_ complicated. More info
1166 can be found at http://www.sics.se/~psm/sparcstack.html */
1167 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1169 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1170 /* wrap handling : if cwp is on the last window, then we use the
1171 registers 'after' the end */
1172 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1173 index
+= 16 * env
->nwindows
;
1177 /* save the register window 'cwp1' */
1178 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1183 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1184 #ifdef TARGET_SPARC64
1186 sp_ptr
+= SPARC64_STACK_BIAS
;
1188 #if defined(DEBUG_WIN)
1189 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1192 for(i
= 0; i
< 16; i
++) {
1193 /* FIXME - what to do if put_user() fails? */
1194 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1195 sp_ptr
+= sizeof(abi_ulong
);
1199 static void save_window(CPUSPARCState
*env
)
1201 #ifndef TARGET_SPARC64
1202 unsigned int new_wim
;
1203 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1204 ((1LL << env
->nwindows
) - 1);
1205 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1208 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1214 static void restore_window(CPUSPARCState
*env
)
1216 #ifndef TARGET_SPARC64
1217 unsigned int new_wim
;
1219 unsigned int i
, cwp1
;
1222 #ifndef TARGET_SPARC64
1223 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1224 ((1LL << env
->nwindows
) - 1);
1227 /* restore the invalid window */
1228 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1229 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1230 #ifdef TARGET_SPARC64
1232 sp_ptr
+= SPARC64_STACK_BIAS
;
1234 #if defined(DEBUG_WIN)
1235 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1238 for(i
= 0; i
< 16; i
++) {
1239 /* FIXME - what to do if get_user() fails? */
1240 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1241 sp_ptr
+= sizeof(abi_ulong
);
1243 #ifdef TARGET_SPARC64
1245 if (env
->cleanwin
< env
->nwindows
- 1)
1253 static void flush_windows(CPUSPARCState
*env
)
1259 /* if restore would invoke restore_window(), then we can stop */
1260 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1261 #ifndef TARGET_SPARC64
1262 if (env
->wim
& (1 << cwp1
))
1265 if (env
->canrestore
== 0)
1270 save_window_offset(env
, cwp1
);
1273 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1274 #ifndef TARGET_SPARC64
1275 /* set wim so that restore will reload the registers */
1276 env
->wim
= 1 << cwp1
;
1278 #if defined(DEBUG_WIN)
1279 printf("flush_windows: nb=%d\n", offset
- 1);
1283 void cpu_loop (CPUSPARCState
*env
)
1285 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1288 target_siginfo_t info
;
1292 trapnr
= cpu_sparc_exec (env
);
1295 /* Compute PSR before exposing state. */
1296 if (env
->cc_op
!= CC_OP_FLAGS
) {
1301 #ifndef TARGET_SPARC64
1308 ret
= do_syscall (env
, env
->gregs
[1],
1309 env
->regwptr
[0], env
->regwptr
[1],
1310 env
->regwptr
[2], env
->regwptr
[3],
1311 env
->regwptr
[4], env
->regwptr
[5],
1313 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1314 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1315 env
->xcc
|= PSR_CARRY
;
1317 env
->psr
|= PSR_CARRY
;
1321 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1322 env
->xcc
&= ~PSR_CARRY
;
1324 env
->psr
&= ~PSR_CARRY
;
1327 env
->regwptr
[0] = ret
;
1328 /* next instruction */
1330 env
->npc
= env
->npc
+ 4;
1332 case 0x83: /* flush windows */
1337 /* next instruction */
1339 env
->npc
= env
->npc
+ 4;
1341 #ifndef TARGET_SPARC64
1342 case TT_WIN_OVF
: /* window overflow */
1345 case TT_WIN_UNF
: /* window underflow */
1346 restore_window(env
);
1351 info
.si_signo
= TARGET_SIGSEGV
;
1353 /* XXX: check env->error_code */
1354 info
.si_code
= TARGET_SEGV_MAPERR
;
1355 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1356 queue_signal(env
, info
.si_signo
, &info
);
1360 case TT_SPILL
: /* window overflow */
1363 case TT_FILL
: /* window underflow */
1364 restore_window(env
);
1369 info
.si_signo
= TARGET_SIGSEGV
;
1371 /* XXX: check env->error_code */
1372 info
.si_code
= TARGET_SEGV_MAPERR
;
1373 if (trapnr
== TT_DFAULT
)
1374 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1376 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1377 queue_signal(env
, info
.si_signo
, &info
);
1380 #ifndef TARGET_ABI32
1383 sparc64_get_context(env
);
1387 sparc64_set_context(env
);
1391 case EXCP_INTERRUPT
:
1392 /* just indicate that signals should be handled asap */
1396 info
.si_signo
= TARGET_SIGILL
;
1398 info
.si_code
= TARGET_ILL_ILLOPC
;
1399 info
._sifields
._sigfault
._addr
= env
->pc
;
1400 queue_signal(env
, info
.si_signo
, &info
);
1407 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1410 info
.si_signo
= sig
;
1412 info
.si_code
= TARGET_TRAP_BRKPT
;
1413 queue_signal(env
, info
.si_signo
, &info
);
1418 printf ("Unhandled trap: 0x%x\n", trapnr
);
1419 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1422 process_pending_signals (env
);
1429 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1435 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1437 return cpu_ppc_get_tb(env
);
1440 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1442 return cpu_ppc_get_tb(env
) >> 32;
1445 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1447 return cpu_ppc_get_tb(env
);
1450 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1452 return cpu_ppc_get_tb(env
) >> 32;
1455 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1456 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1458 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1460 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1463 /* XXX: to be fixed */
1464 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1469 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1474 #define EXCP_DUMP(env, fmt, ...) \
1476 CPUState *cs = ENV_GET_CPU(env); \
1477 fprintf(stderr, fmt , ## __VA_ARGS__); \
1478 cpu_dump_state(cs, stderr, fprintf, 0); \
1479 qemu_log(fmt, ## __VA_ARGS__); \
1480 if (qemu_log_enabled()) { \
1481 log_cpu_state(cs, 0); \
1485 static int do_store_exclusive(CPUPPCState
*env
)
1488 target_ulong page_addr
;
1489 target_ulong val
, val2
__attribute__((unused
)) = 0;
1493 addr
= env
->reserve_ea
;
1494 page_addr
= addr
& TARGET_PAGE_MASK
;
1497 flags
= page_get_flags(page_addr
);
1498 if ((flags
& PAGE_READ
) == 0) {
1501 int reg
= env
->reserve_info
& 0x1f;
1502 int size
= env
->reserve_info
>> 5;
1505 if (addr
== env
->reserve_addr
) {
1507 case 1: segv
= get_user_u8(val
, addr
); break;
1508 case 2: segv
= get_user_u16(val
, addr
); break;
1509 case 4: segv
= get_user_u32(val
, addr
); break;
1510 #if defined(TARGET_PPC64)
1511 case 8: segv
= get_user_u64(val
, addr
); break;
1513 segv
= get_user_u64(val
, addr
);
1515 segv
= get_user_u64(val2
, addr
+ 8);
1522 if (!segv
&& val
== env
->reserve_val
) {
1523 val
= env
->gpr
[reg
];
1525 case 1: segv
= put_user_u8(val
, addr
); break;
1526 case 2: segv
= put_user_u16(val
, addr
); break;
1527 case 4: segv
= put_user_u32(val
, addr
); break;
1528 #if defined(TARGET_PPC64)
1529 case 8: segv
= put_user_u64(val
, addr
); break;
1531 if (val2
== env
->reserve_val2
) {
1534 val
= env
->gpr
[reg
+1];
1536 val2
= env
->gpr
[reg
+1];
1538 segv
= put_user_u64(val
, addr
);
1540 segv
= put_user_u64(val2
, addr
+ 8);
1553 env
->crf
[0] = (stored
<< 1) | xer_so
;
1554 env
->reserve_addr
= (target_ulong
)-1;
1564 void cpu_loop(CPUPPCState
*env
)
1566 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1567 target_siginfo_t info
;
1573 trapnr
= cpu_ppc_exec(env
);
1576 case POWERPC_EXCP_NONE
:
1579 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1580 cpu_abort(cs
, "Critical interrupt while in user mode. "
1583 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1584 cpu_abort(cs
, "Machine check exception while in user mode. "
1587 case POWERPC_EXCP_DSI
: /* Data storage exception */
1588 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1590 /* XXX: check this. Seems bugged */
1591 switch (env
->error_code
& 0xFF000000) {
1593 info
.si_signo
= TARGET_SIGSEGV
;
1595 info
.si_code
= TARGET_SEGV_MAPERR
;
1598 info
.si_signo
= TARGET_SIGILL
;
1600 info
.si_code
= TARGET_ILL_ILLADR
;
1603 info
.si_signo
= TARGET_SIGSEGV
;
1605 info
.si_code
= TARGET_SEGV_ACCERR
;
1608 /* Let's send a regular segfault... */
1609 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1611 info
.si_signo
= TARGET_SIGSEGV
;
1613 info
.si_code
= TARGET_SEGV_MAPERR
;
1616 info
._sifields
._sigfault
._addr
= env
->nip
;
1617 queue_signal(env
, info
.si_signo
, &info
);
1619 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1620 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1621 "\n", env
->spr
[SPR_SRR0
]);
1622 /* XXX: check this */
1623 switch (env
->error_code
& 0xFF000000) {
1625 info
.si_signo
= TARGET_SIGSEGV
;
1627 info
.si_code
= TARGET_SEGV_MAPERR
;
1631 info
.si_signo
= TARGET_SIGSEGV
;
1633 info
.si_code
= TARGET_SEGV_ACCERR
;
1636 /* Let's send a regular segfault... */
1637 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1639 info
.si_signo
= TARGET_SIGSEGV
;
1641 info
.si_code
= TARGET_SEGV_MAPERR
;
1644 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1645 queue_signal(env
, info
.si_signo
, &info
);
1647 case POWERPC_EXCP_EXTERNAL
: /* External input */
1648 cpu_abort(cs
, "External interrupt while in user mode. "
1651 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1652 EXCP_DUMP(env
, "Unaligned memory access\n");
1653 /* XXX: check this */
1654 info
.si_signo
= TARGET_SIGBUS
;
1656 info
.si_code
= TARGET_BUS_ADRALN
;
1657 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1658 queue_signal(env
, info
.si_signo
, &info
);
1660 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1661 /* XXX: check this */
1662 switch (env
->error_code
& ~0xF) {
1663 case POWERPC_EXCP_FP
:
1664 EXCP_DUMP(env
, "Floating point program exception\n");
1665 info
.si_signo
= TARGET_SIGFPE
;
1667 switch (env
->error_code
& 0xF) {
1668 case POWERPC_EXCP_FP_OX
:
1669 info
.si_code
= TARGET_FPE_FLTOVF
;
1671 case POWERPC_EXCP_FP_UX
:
1672 info
.si_code
= TARGET_FPE_FLTUND
;
1674 case POWERPC_EXCP_FP_ZX
:
1675 case POWERPC_EXCP_FP_VXZDZ
:
1676 info
.si_code
= TARGET_FPE_FLTDIV
;
1678 case POWERPC_EXCP_FP_XX
:
1679 info
.si_code
= TARGET_FPE_FLTRES
;
1681 case POWERPC_EXCP_FP_VXSOFT
:
1682 info
.si_code
= TARGET_FPE_FLTINV
;
1684 case POWERPC_EXCP_FP_VXSNAN
:
1685 case POWERPC_EXCP_FP_VXISI
:
1686 case POWERPC_EXCP_FP_VXIDI
:
1687 case POWERPC_EXCP_FP_VXIMZ
:
1688 case POWERPC_EXCP_FP_VXVC
:
1689 case POWERPC_EXCP_FP_VXSQRT
:
1690 case POWERPC_EXCP_FP_VXCVI
:
1691 info
.si_code
= TARGET_FPE_FLTSUB
;
1694 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1699 case POWERPC_EXCP_INVAL
:
1700 EXCP_DUMP(env
, "Invalid instruction\n");
1701 info
.si_signo
= TARGET_SIGILL
;
1703 switch (env
->error_code
& 0xF) {
1704 case POWERPC_EXCP_INVAL_INVAL
:
1705 info
.si_code
= TARGET_ILL_ILLOPC
;
1707 case POWERPC_EXCP_INVAL_LSWX
:
1708 info
.si_code
= TARGET_ILL_ILLOPN
;
1710 case POWERPC_EXCP_INVAL_SPR
:
1711 info
.si_code
= TARGET_ILL_PRVREG
;
1713 case POWERPC_EXCP_INVAL_FP
:
1714 info
.si_code
= TARGET_ILL_COPROC
;
1717 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1718 env
->error_code
& 0xF);
1719 info
.si_code
= TARGET_ILL_ILLADR
;
1723 case POWERPC_EXCP_PRIV
:
1724 EXCP_DUMP(env
, "Privilege violation\n");
1725 info
.si_signo
= TARGET_SIGILL
;
1727 switch (env
->error_code
& 0xF) {
1728 case POWERPC_EXCP_PRIV_OPC
:
1729 info
.si_code
= TARGET_ILL_PRVOPC
;
1731 case POWERPC_EXCP_PRIV_REG
:
1732 info
.si_code
= TARGET_ILL_PRVREG
;
1735 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1736 env
->error_code
& 0xF);
1737 info
.si_code
= TARGET_ILL_PRVOPC
;
1741 case POWERPC_EXCP_TRAP
:
1742 cpu_abort(cs
, "Tried to call a TRAP\n");
1745 /* Should not happen ! */
1746 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1750 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1751 queue_signal(env
, info
.si_signo
, &info
);
1753 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1754 EXCP_DUMP(env
, "No floating point allowed\n");
1755 info
.si_signo
= TARGET_SIGILL
;
1757 info
.si_code
= TARGET_ILL_COPROC
;
1758 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1759 queue_signal(env
, info
.si_signo
, &info
);
1761 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1762 cpu_abort(cs
, "Syscall exception while in user mode. "
1765 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1766 EXCP_DUMP(env
, "No APU instruction allowed\n");
1767 info
.si_signo
= TARGET_SIGILL
;
1769 info
.si_code
= TARGET_ILL_COPROC
;
1770 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1771 queue_signal(env
, info
.si_signo
, &info
);
1773 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1774 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1777 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1778 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1781 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1782 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1785 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1786 cpu_abort(cs
, "Data TLB exception while in user mode. "
1789 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1790 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1793 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1794 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1795 info
.si_signo
= TARGET_SIGILL
;
1797 info
.si_code
= TARGET_ILL_COPROC
;
1798 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1799 queue_signal(env
, info
.si_signo
, &info
);
1801 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1802 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1804 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1805 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1807 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1808 cpu_abort(cs
, "Performance monitor exception not handled\n");
1810 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1811 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1814 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1815 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1818 case POWERPC_EXCP_RESET
: /* System reset exception */
1819 cpu_abort(cs
, "Reset interrupt while in user mode. "
1822 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1823 cpu_abort(cs
, "Data segment exception while in user mode. "
1826 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1827 cpu_abort(cs
, "Instruction segment exception "
1828 "while in user mode. Aborting\n");
1830 /* PowerPC 64 with hypervisor mode support */
1831 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1832 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1833 "while in user mode. Aborting\n");
1835 case POWERPC_EXCP_TRACE
: /* Trace exception */
1837 * we use this exception to emulate step-by-step execution mode.
1840 /* PowerPC 64 with hypervisor mode support */
1841 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1842 cpu_abort(cs
, "Hypervisor data storage exception "
1843 "while in user mode. Aborting\n");
1845 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1846 cpu_abort(cs
, "Hypervisor instruction storage exception "
1847 "while in user mode. Aborting\n");
1849 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1850 cpu_abort(cs
, "Hypervisor data segment exception "
1851 "while in user mode. Aborting\n");
1853 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1854 cpu_abort(cs
, "Hypervisor instruction segment exception "
1855 "while in user mode. Aborting\n");
1857 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1858 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1859 info
.si_signo
= TARGET_SIGILL
;
1861 info
.si_code
= TARGET_ILL_COPROC
;
1862 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1863 queue_signal(env
, info
.si_signo
, &info
);
1865 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1866 cpu_abort(cs
, "Programmable interval timer interrupt "
1867 "while in user mode. Aborting\n");
1869 case POWERPC_EXCP_IO
: /* IO error exception */
1870 cpu_abort(cs
, "IO error exception while in user mode. "
1873 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1874 cpu_abort(cs
, "Run mode exception while in user mode. "
1877 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1878 cpu_abort(cs
, "Emulation trap exception not handled\n");
1880 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1881 cpu_abort(cs
, "Instruction fetch TLB exception "
1882 "while in user-mode. Aborting");
1884 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1885 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1888 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1889 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1892 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1893 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1895 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1896 cpu_abort(cs
, "Instruction address breakpoint exception "
1899 case POWERPC_EXCP_SMI
: /* System management interrupt */
1900 cpu_abort(cs
, "System management interrupt while in user mode. "
1903 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1904 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1907 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1908 cpu_abort(cs
, "Performance monitor exception not handled\n");
1910 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1911 cpu_abort(cs
, "Vector assist exception not handled\n");
1913 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1914 cpu_abort(cs
, "Soft patch exception not handled\n");
1916 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1917 cpu_abort(cs
, "Maintenance exception while in user mode. "
1920 case POWERPC_EXCP_STOP
: /* stop translation */
1921 /* We did invalidate the instruction cache. Go on */
1923 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1924 /* We just stopped because of a branch. Go on */
1926 case POWERPC_EXCP_SYSCALL_USER
:
1927 /* system call in user-mode emulation */
1929 * PPC ABI uses overflow flag in cr0 to signal an error
1932 env
->crf
[0] &= ~0x1;
1933 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1934 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1936 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1937 /* Returning from a successful sigreturn syscall.
1938 Avoid corrupting register state. */
1941 if (ret
> (target_ulong
)(-515)) {
1947 case POWERPC_EXCP_STCX
:
1948 if (do_store_exclusive(env
)) {
1949 info
.si_signo
= TARGET_SIGSEGV
;
1951 info
.si_code
= TARGET_SEGV_MAPERR
;
1952 info
._sifields
._sigfault
._addr
= env
->nip
;
1953 queue_signal(env
, info
.si_signo
, &info
);
1960 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1962 info
.si_signo
= sig
;
1964 info
.si_code
= TARGET_TRAP_BRKPT
;
1965 queue_signal(env
, info
.si_signo
, &info
);
1969 case EXCP_INTERRUPT
:
1970 /* just indicate that signals should be handled asap */
1973 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1976 process_pending_signals(env
);
1983 # ifdef TARGET_ABI_MIPSO32
1984 # define MIPS_SYS(name, args) args,
1985 static const uint8_t mips_syscall_args
[] = {
1986 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1987 MIPS_SYS(sys_exit
, 1)
1988 MIPS_SYS(sys_fork
, 0)
1989 MIPS_SYS(sys_read
, 3)
1990 MIPS_SYS(sys_write
, 3)
1991 MIPS_SYS(sys_open
, 3) /* 4005 */
1992 MIPS_SYS(sys_close
, 1)
1993 MIPS_SYS(sys_waitpid
, 3)
1994 MIPS_SYS(sys_creat
, 2)
1995 MIPS_SYS(sys_link
, 2)
1996 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1997 MIPS_SYS(sys_execve
, 0)
1998 MIPS_SYS(sys_chdir
, 1)
1999 MIPS_SYS(sys_time
, 1)
2000 MIPS_SYS(sys_mknod
, 3)
2001 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2002 MIPS_SYS(sys_lchown
, 3)
2003 MIPS_SYS(sys_ni_syscall
, 0)
2004 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2005 MIPS_SYS(sys_lseek
, 3)
2006 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2007 MIPS_SYS(sys_mount
, 5)
2008 MIPS_SYS(sys_umount
, 1)
2009 MIPS_SYS(sys_setuid
, 1)
2010 MIPS_SYS(sys_getuid
, 0)
2011 MIPS_SYS(sys_stime
, 1) /* 4025 */
2012 MIPS_SYS(sys_ptrace
, 4)
2013 MIPS_SYS(sys_alarm
, 1)
2014 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2015 MIPS_SYS(sys_pause
, 0)
2016 MIPS_SYS(sys_utime
, 2) /* 4030 */
2017 MIPS_SYS(sys_ni_syscall
, 0)
2018 MIPS_SYS(sys_ni_syscall
, 0)
2019 MIPS_SYS(sys_access
, 2)
2020 MIPS_SYS(sys_nice
, 1)
2021 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2022 MIPS_SYS(sys_sync
, 0)
2023 MIPS_SYS(sys_kill
, 2)
2024 MIPS_SYS(sys_rename
, 2)
2025 MIPS_SYS(sys_mkdir
, 2)
2026 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2027 MIPS_SYS(sys_dup
, 1)
2028 MIPS_SYS(sys_pipe
, 0)
2029 MIPS_SYS(sys_times
, 1)
2030 MIPS_SYS(sys_ni_syscall
, 0)
2031 MIPS_SYS(sys_brk
, 1) /* 4045 */
2032 MIPS_SYS(sys_setgid
, 1)
2033 MIPS_SYS(sys_getgid
, 0)
2034 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2035 MIPS_SYS(sys_geteuid
, 0)
2036 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2037 MIPS_SYS(sys_acct
, 0)
2038 MIPS_SYS(sys_umount2
, 2)
2039 MIPS_SYS(sys_ni_syscall
, 0)
2040 MIPS_SYS(sys_ioctl
, 3)
2041 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2042 MIPS_SYS(sys_ni_syscall
, 2)
2043 MIPS_SYS(sys_setpgid
, 2)
2044 MIPS_SYS(sys_ni_syscall
, 0)
2045 MIPS_SYS(sys_olduname
, 1)
2046 MIPS_SYS(sys_umask
, 1) /* 4060 */
2047 MIPS_SYS(sys_chroot
, 1)
2048 MIPS_SYS(sys_ustat
, 2)
2049 MIPS_SYS(sys_dup2
, 2)
2050 MIPS_SYS(sys_getppid
, 0)
2051 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2052 MIPS_SYS(sys_setsid
, 0)
2053 MIPS_SYS(sys_sigaction
, 3)
2054 MIPS_SYS(sys_sgetmask
, 0)
2055 MIPS_SYS(sys_ssetmask
, 1)
2056 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2057 MIPS_SYS(sys_setregid
, 2)
2058 MIPS_SYS(sys_sigsuspend
, 0)
2059 MIPS_SYS(sys_sigpending
, 1)
2060 MIPS_SYS(sys_sethostname
, 2)
2061 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2062 MIPS_SYS(sys_getrlimit
, 2)
2063 MIPS_SYS(sys_getrusage
, 2)
2064 MIPS_SYS(sys_gettimeofday
, 2)
2065 MIPS_SYS(sys_settimeofday
, 2)
2066 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2067 MIPS_SYS(sys_setgroups
, 2)
2068 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2069 MIPS_SYS(sys_symlink
, 2)
2070 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2071 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2072 MIPS_SYS(sys_uselib
, 1)
2073 MIPS_SYS(sys_swapon
, 2)
2074 MIPS_SYS(sys_reboot
, 3)
2075 MIPS_SYS(old_readdir
, 3)
2076 MIPS_SYS(old_mmap
, 6) /* 4090 */
2077 MIPS_SYS(sys_munmap
, 2)
2078 MIPS_SYS(sys_truncate
, 2)
2079 MIPS_SYS(sys_ftruncate
, 2)
2080 MIPS_SYS(sys_fchmod
, 2)
2081 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2082 MIPS_SYS(sys_getpriority
, 2)
2083 MIPS_SYS(sys_setpriority
, 3)
2084 MIPS_SYS(sys_ni_syscall
, 0)
2085 MIPS_SYS(sys_statfs
, 2)
2086 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2087 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2088 MIPS_SYS(sys_socketcall
, 2)
2089 MIPS_SYS(sys_syslog
, 3)
2090 MIPS_SYS(sys_setitimer
, 3)
2091 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2092 MIPS_SYS(sys_newstat
, 2)
2093 MIPS_SYS(sys_newlstat
, 2)
2094 MIPS_SYS(sys_newfstat
, 2)
2095 MIPS_SYS(sys_uname
, 1)
2096 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2097 MIPS_SYS(sys_vhangup
, 0)
2098 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2099 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2100 MIPS_SYS(sys_wait4
, 4)
2101 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2102 MIPS_SYS(sys_sysinfo
, 1)
2103 MIPS_SYS(sys_ipc
, 6)
2104 MIPS_SYS(sys_fsync
, 1)
2105 MIPS_SYS(sys_sigreturn
, 0)
2106 MIPS_SYS(sys_clone
, 6) /* 4120 */
2107 MIPS_SYS(sys_setdomainname
, 2)
2108 MIPS_SYS(sys_newuname
, 1)
2109 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2110 MIPS_SYS(sys_adjtimex
, 1)
2111 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2112 MIPS_SYS(sys_sigprocmask
, 3)
2113 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2114 MIPS_SYS(sys_init_module
, 5)
2115 MIPS_SYS(sys_delete_module
, 1)
2116 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2117 MIPS_SYS(sys_quotactl
, 0)
2118 MIPS_SYS(sys_getpgid
, 1)
2119 MIPS_SYS(sys_fchdir
, 1)
2120 MIPS_SYS(sys_bdflush
, 2)
2121 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2122 MIPS_SYS(sys_personality
, 1)
2123 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2124 MIPS_SYS(sys_setfsuid
, 1)
2125 MIPS_SYS(sys_setfsgid
, 1)
2126 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2127 MIPS_SYS(sys_getdents
, 3)
2128 MIPS_SYS(sys_select
, 5)
2129 MIPS_SYS(sys_flock
, 2)
2130 MIPS_SYS(sys_msync
, 3)
2131 MIPS_SYS(sys_readv
, 3) /* 4145 */
2132 MIPS_SYS(sys_writev
, 3)
2133 MIPS_SYS(sys_cacheflush
, 3)
2134 MIPS_SYS(sys_cachectl
, 3)
2135 MIPS_SYS(sys_sysmips
, 4)
2136 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2137 MIPS_SYS(sys_getsid
, 1)
2138 MIPS_SYS(sys_fdatasync
, 0)
2139 MIPS_SYS(sys_sysctl
, 1)
2140 MIPS_SYS(sys_mlock
, 2)
2141 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2142 MIPS_SYS(sys_mlockall
, 1)
2143 MIPS_SYS(sys_munlockall
, 0)
2144 MIPS_SYS(sys_sched_setparam
, 2)
2145 MIPS_SYS(sys_sched_getparam
, 2)
2146 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2147 MIPS_SYS(sys_sched_getscheduler
, 1)
2148 MIPS_SYS(sys_sched_yield
, 0)
2149 MIPS_SYS(sys_sched_get_priority_max
, 1)
2150 MIPS_SYS(sys_sched_get_priority_min
, 1)
2151 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2152 MIPS_SYS(sys_nanosleep
, 2)
2153 MIPS_SYS(sys_mremap
, 5)
2154 MIPS_SYS(sys_accept
, 3)
2155 MIPS_SYS(sys_bind
, 3)
2156 MIPS_SYS(sys_connect
, 3) /* 4170 */
2157 MIPS_SYS(sys_getpeername
, 3)
2158 MIPS_SYS(sys_getsockname
, 3)
2159 MIPS_SYS(sys_getsockopt
, 5)
2160 MIPS_SYS(sys_listen
, 2)
2161 MIPS_SYS(sys_recv
, 4) /* 4175 */
2162 MIPS_SYS(sys_recvfrom
, 6)
2163 MIPS_SYS(sys_recvmsg
, 3)
2164 MIPS_SYS(sys_send
, 4)
2165 MIPS_SYS(sys_sendmsg
, 3)
2166 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2167 MIPS_SYS(sys_setsockopt
, 5)
2168 MIPS_SYS(sys_shutdown
, 2)
2169 MIPS_SYS(sys_socket
, 3)
2170 MIPS_SYS(sys_socketpair
, 4)
2171 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2172 MIPS_SYS(sys_getresuid
, 3)
2173 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2174 MIPS_SYS(sys_poll
, 3)
2175 MIPS_SYS(sys_nfsservctl
, 3)
2176 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2177 MIPS_SYS(sys_getresgid
, 3)
2178 MIPS_SYS(sys_prctl
, 5)
2179 MIPS_SYS(sys_rt_sigreturn
, 0)
2180 MIPS_SYS(sys_rt_sigaction
, 4)
2181 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2182 MIPS_SYS(sys_rt_sigpending
, 2)
2183 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2184 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2185 MIPS_SYS(sys_rt_sigsuspend
, 0)
2186 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2187 MIPS_SYS(sys_pwrite64
, 6)
2188 MIPS_SYS(sys_chown
, 3)
2189 MIPS_SYS(sys_getcwd
, 2)
2190 MIPS_SYS(sys_capget
, 2)
2191 MIPS_SYS(sys_capset
, 2) /* 4205 */
2192 MIPS_SYS(sys_sigaltstack
, 2)
2193 MIPS_SYS(sys_sendfile
, 4)
2194 MIPS_SYS(sys_ni_syscall
, 0)
2195 MIPS_SYS(sys_ni_syscall
, 0)
2196 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2197 MIPS_SYS(sys_truncate64
, 4)
2198 MIPS_SYS(sys_ftruncate64
, 4)
2199 MIPS_SYS(sys_stat64
, 2)
2200 MIPS_SYS(sys_lstat64
, 2)
2201 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2202 MIPS_SYS(sys_pivot_root
, 2)
2203 MIPS_SYS(sys_mincore
, 3)
2204 MIPS_SYS(sys_madvise
, 3)
2205 MIPS_SYS(sys_getdents64
, 3)
2206 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2207 MIPS_SYS(sys_ni_syscall
, 0)
2208 MIPS_SYS(sys_gettid
, 0)
2209 MIPS_SYS(sys_readahead
, 5)
2210 MIPS_SYS(sys_setxattr
, 5)
2211 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2212 MIPS_SYS(sys_fsetxattr
, 5)
2213 MIPS_SYS(sys_getxattr
, 4)
2214 MIPS_SYS(sys_lgetxattr
, 4)
2215 MIPS_SYS(sys_fgetxattr
, 4)
2216 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2217 MIPS_SYS(sys_llistxattr
, 3)
2218 MIPS_SYS(sys_flistxattr
, 3)
2219 MIPS_SYS(sys_removexattr
, 2)
2220 MIPS_SYS(sys_lremovexattr
, 2)
2221 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2222 MIPS_SYS(sys_tkill
, 2)
2223 MIPS_SYS(sys_sendfile64
, 5)
2224 MIPS_SYS(sys_futex
, 6)
2225 MIPS_SYS(sys_sched_setaffinity
, 3)
2226 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2227 MIPS_SYS(sys_io_setup
, 2)
2228 MIPS_SYS(sys_io_destroy
, 1)
2229 MIPS_SYS(sys_io_getevents
, 5)
2230 MIPS_SYS(sys_io_submit
, 3)
2231 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2232 MIPS_SYS(sys_exit_group
, 1)
2233 MIPS_SYS(sys_lookup_dcookie
, 3)
2234 MIPS_SYS(sys_epoll_create
, 1)
2235 MIPS_SYS(sys_epoll_ctl
, 4)
2236 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2237 MIPS_SYS(sys_remap_file_pages
, 5)
2238 MIPS_SYS(sys_set_tid_address
, 1)
2239 MIPS_SYS(sys_restart_syscall
, 0)
2240 MIPS_SYS(sys_fadvise64_64
, 7)
2241 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2242 MIPS_SYS(sys_fstatfs64
, 2)
2243 MIPS_SYS(sys_timer_create
, 3)
2244 MIPS_SYS(sys_timer_settime
, 4)
2245 MIPS_SYS(sys_timer_gettime
, 2)
2246 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2247 MIPS_SYS(sys_timer_delete
, 1)
2248 MIPS_SYS(sys_clock_settime
, 2)
2249 MIPS_SYS(sys_clock_gettime
, 2)
2250 MIPS_SYS(sys_clock_getres
, 2)
2251 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2252 MIPS_SYS(sys_tgkill
, 3)
2253 MIPS_SYS(sys_utimes
, 2)
2254 MIPS_SYS(sys_mbind
, 4)
2255 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2256 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2257 MIPS_SYS(sys_mq_open
, 4)
2258 MIPS_SYS(sys_mq_unlink
, 1)
2259 MIPS_SYS(sys_mq_timedsend
, 5)
2260 MIPS_SYS(sys_mq_timedreceive
, 5)
2261 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2262 MIPS_SYS(sys_mq_getsetattr
, 3)
2263 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2264 MIPS_SYS(sys_waitid
, 4)
2265 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2266 MIPS_SYS(sys_add_key
, 5)
2267 MIPS_SYS(sys_request_key
, 4)
2268 MIPS_SYS(sys_keyctl
, 5)
2269 MIPS_SYS(sys_set_thread_area
, 1)
2270 MIPS_SYS(sys_inotify_init
, 0)
2271 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2272 MIPS_SYS(sys_inotify_rm_watch
, 2)
2273 MIPS_SYS(sys_migrate_pages
, 4)
2274 MIPS_SYS(sys_openat
, 4)
2275 MIPS_SYS(sys_mkdirat
, 3)
2276 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2277 MIPS_SYS(sys_fchownat
, 5)
2278 MIPS_SYS(sys_futimesat
, 3)
2279 MIPS_SYS(sys_fstatat64
, 4)
2280 MIPS_SYS(sys_unlinkat
, 3)
2281 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2282 MIPS_SYS(sys_linkat
, 5)
2283 MIPS_SYS(sys_symlinkat
, 3)
2284 MIPS_SYS(sys_readlinkat
, 4)
2285 MIPS_SYS(sys_fchmodat
, 3)
2286 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2287 MIPS_SYS(sys_pselect6
, 6)
2288 MIPS_SYS(sys_ppoll
, 5)
2289 MIPS_SYS(sys_unshare
, 1)
2290 MIPS_SYS(sys_splice
, 6)
2291 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2292 MIPS_SYS(sys_tee
, 4)
2293 MIPS_SYS(sys_vmsplice
, 4)
2294 MIPS_SYS(sys_move_pages
, 6)
2295 MIPS_SYS(sys_set_robust_list
, 2)
2296 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2297 MIPS_SYS(sys_kexec_load
, 4)
2298 MIPS_SYS(sys_getcpu
, 3)
2299 MIPS_SYS(sys_epoll_pwait
, 6)
2300 MIPS_SYS(sys_ioprio_set
, 3)
2301 MIPS_SYS(sys_ioprio_get
, 2)
2302 MIPS_SYS(sys_utimensat
, 4)
2303 MIPS_SYS(sys_signalfd
, 3)
2304 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2305 MIPS_SYS(sys_eventfd
, 1)
2306 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2307 MIPS_SYS(sys_timerfd_create
, 2)
2308 MIPS_SYS(sys_timerfd_gettime
, 2)
2309 MIPS_SYS(sys_timerfd_settime
, 4)
2310 MIPS_SYS(sys_signalfd4
, 4)
2311 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2312 MIPS_SYS(sys_epoll_create1
, 1)
2313 MIPS_SYS(sys_dup3
, 3)
2314 MIPS_SYS(sys_pipe2
, 2)
2315 MIPS_SYS(sys_inotify_init1
, 1)
2316 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2317 MIPS_SYS(sys_pwritev
, 6)
2318 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2319 MIPS_SYS(sys_perf_event_open
, 5)
2320 MIPS_SYS(sys_accept4
, 4)
2321 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2322 MIPS_SYS(sys_fanotify_init
, 2)
2323 MIPS_SYS(sys_fanotify_mark
, 6)
2324 MIPS_SYS(sys_prlimit64
, 4)
2325 MIPS_SYS(sys_name_to_handle_at
, 5)
2326 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2327 MIPS_SYS(sys_clock_adjtime
, 2)
2328 MIPS_SYS(sys_syncfs
, 1)
2333 static int do_store_exclusive(CPUMIPSState
*env
)
2336 target_ulong page_addr
;
2344 page_addr
= addr
& TARGET_PAGE_MASK
;
2347 flags
= page_get_flags(page_addr
);
2348 if ((flags
& PAGE_READ
) == 0) {
2351 reg
= env
->llreg
& 0x1f;
2352 d
= (env
->llreg
& 0x20) != 0;
2354 segv
= get_user_s64(val
, addr
);
2356 segv
= get_user_s32(val
, addr
);
2359 if (val
!= env
->llval
) {
2360 env
->active_tc
.gpr
[reg
] = 0;
2363 segv
= put_user_u64(env
->llnewval
, addr
);
2365 segv
= put_user_u32(env
->llnewval
, addr
);
2368 env
->active_tc
.gpr
[reg
] = 1;
2375 env
->active_tc
.PC
+= 4;
2388 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2396 info
->si_signo
= TARGET_SIGFPE
;
2398 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2399 queue_signal(env
, info
->si_signo
, &*info
);
2403 info
->si_signo
= TARGET_SIGTRAP
;
2405 queue_signal(env
, info
->si_signo
, &*info
);
2413 void cpu_loop(CPUMIPSState
*env
)
2415 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2416 target_siginfo_t info
;
2419 # ifdef TARGET_ABI_MIPSO32
2420 unsigned int syscall_num
;
2425 trapnr
= cpu_mips_exec(env
);
2429 env
->active_tc
.PC
+= 4;
2430 # ifdef TARGET_ABI_MIPSO32
2431 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2432 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2433 ret
= -TARGET_ENOSYS
;
2437 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2439 nb_args
= mips_syscall_args
[syscall_num
];
2440 sp_reg
= env
->active_tc
.gpr
[29];
2442 /* these arguments are taken from the stack */
2444 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2448 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2452 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2456 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2462 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2463 env
->active_tc
.gpr
[4],
2464 env
->active_tc
.gpr
[5],
2465 env
->active_tc
.gpr
[6],
2466 env
->active_tc
.gpr
[7],
2467 arg5
, arg6
, arg7
, arg8
);
2471 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2472 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2473 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2474 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2475 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2477 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2478 /* Returning from a successful sigreturn syscall.
2479 Avoid clobbering register state. */
2482 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2483 env
->active_tc
.gpr
[7] = 1; /* error flag */
2486 env
->active_tc
.gpr
[7] = 0; /* error flag */
2488 env
->active_tc
.gpr
[2] = ret
;
2494 info
.si_signo
= TARGET_SIGSEGV
;
2496 /* XXX: check env->error_code */
2497 info
.si_code
= TARGET_SEGV_MAPERR
;
2498 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2499 queue_signal(env
, info
.si_signo
, &info
);
2503 info
.si_signo
= TARGET_SIGILL
;
2506 queue_signal(env
, info
.si_signo
, &info
);
2508 case EXCP_INTERRUPT
:
2509 /* just indicate that signals should be handled asap */
2515 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2518 info
.si_signo
= sig
;
2520 info
.si_code
= TARGET_TRAP_BRKPT
;
2521 queue_signal(env
, info
.si_signo
, &info
);
2526 if (do_store_exclusive(env
)) {
2527 info
.si_signo
= TARGET_SIGSEGV
;
2529 info
.si_code
= TARGET_SEGV_MAPERR
;
2530 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2531 queue_signal(env
, info
.si_signo
, &info
);
2535 info
.si_signo
= TARGET_SIGILL
;
2537 info
.si_code
= TARGET_ILL_ILLOPC
;
2538 queue_signal(env
, info
.si_signo
, &info
);
2540 /* The code below was inspired by the MIPS Linux kernel trap
2541 * handling code in arch/mips/kernel/traps.c.
2545 abi_ulong trap_instr
;
2548 if (env
->hflags
& MIPS_HFLAG_M16
) {
2549 if (env
->insn_flags
& ASE_MICROMIPS
) {
2550 /* microMIPS mode */
2551 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2556 if ((trap_instr
>> 10) == 0x11) {
2557 /* 16-bit instruction */
2558 code
= trap_instr
& 0xf;
2560 /* 32-bit instruction */
2563 ret
= get_user_u16(instr_lo
,
2564 env
->active_tc
.PC
+ 2);
2568 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2569 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2570 /* Unfortunately, microMIPS also suffers from
2571 the old assembler bug... */
2572 if (code
>= (1 << 10)) {
2578 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2582 code
= (trap_instr
>> 6) & 0x3f;
2585 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2590 /* As described in the original Linux kernel code, the
2591 * below checks on 'code' are to work around an old
2594 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2595 if (code
>= (1 << 10)) {
2600 if (do_break(env
, &info
, code
) != 0) {
2607 abi_ulong trap_instr
;
2608 unsigned int code
= 0;
2610 if (env
->hflags
& MIPS_HFLAG_M16
) {
2611 /* microMIPS mode */
2614 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2615 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2617 trap_instr
= (instr
[0] << 16) | instr
[1];
2619 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2626 /* The immediate versions don't provide a code. */
2627 if (!(trap_instr
& 0xFC000000)) {
2628 if (env
->hflags
& MIPS_HFLAG_M16
) {
2629 /* microMIPS mode */
2630 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2632 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2636 if (do_break(env
, &info
, code
) != 0) {
2643 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2645 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2648 process_pending_signals(env
);
2653 #ifdef TARGET_OPENRISC
2655 void cpu_loop(CPUOpenRISCState
*env
)
2657 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2662 trapnr
= cpu_exec(env
);
2668 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2672 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2673 gdbsig
= TARGET_SIGBUS
;
2677 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2678 gdbsig
= TARGET_SIGSEGV
;
2681 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2684 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2685 gdbsig
= TARGET_SIGBUS
;
2688 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2689 gdbsig
= TARGET_SIGILL
;
2692 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2696 qemu_log("\nTLB miss\n");
2699 qemu_log("\nRange\n");
2700 gdbsig
= TARGET_SIGSEGV
;
2703 env
->pc
+= 4; /* 0xc00; */
2704 env
->gpr
[11] = do_syscall(env
,
2705 env
->gpr
[11], /* return value */
2706 env
->gpr
[3], /* r3 - r7 are params */
2714 qemu_log("\nFloating point error\n");
2717 qemu_log("\nTrap\n");
2718 gdbsig
= TARGET_SIGTRAP
;
2724 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2726 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2727 gdbsig
= TARGET_SIGILL
;
2731 gdb_handlesig(cs
, gdbsig
);
2732 if (gdbsig
!= TARGET_SIGTRAP
) {
2737 process_pending_signals(env
);
2741 #endif /* TARGET_OPENRISC */
2744 void cpu_loop(CPUSH4State
*env
)
2746 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2748 target_siginfo_t info
;
2752 trapnr
= cpu_sh4_exec (env
);
2758 ret
= do_syscall(env
,
2767 env
->gregs
[0] = ret
;
2769 case EXCP_INTERRUPT
:
2770 /* just indicate that signals should be handled asap */
2776 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2779 info
.si_signo
= sig
;
2781 info
.si_code
= TARGET_TRAP_BRKPT
;
2782 queue_signal(env
, info
.si_signo
, &info
);
2788 info
.si_signo
= TARGET_SIGSEGV
;
2790 info
.si_code
= TARGET_SEGV_MAPERR
;
2791 info
._sifields
._sigfault
._addr
= env
->tea
;
2792 queue_signal(env
, info
.si_signo
, &info
);
2796 printf ("Unhandled trap: 0x%x\n", trapnr
);
2797 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2800 process_pending_signals (env
);
2806 void cpu_loop(CPUCRISState
*env
)
2808 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2810 target_siginfo_t info
;
2814 trapnr
= cpu_cris_exec (env
);
2819 info
.si_signo
= TARGET_SIGSEGV
;
2821 /* XXX: check env->error_code */
2822 info
.si_code
= TARGET_SEGV_MAPERR
;
2823 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2824 queue_signal(env
, info
.si_signo
, &info
);
2827 case EXCP_INTERRUPT
:
2828 /* just indicate that signals should be handled asap */
2831 ret
= do_syscall(env
,
2840 env
->regs
[10] = ret
;
2846 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2849 info
.si_signo
= sig
;
2851 info
.si_code
= TARGET_TRAP_BRKPT
;
2852 queue_signal(env
, info
.si_signo
, &info
);
2857 printf ("Unhandled trap: 0x%x\n", trapnr
);
2858 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2861 process_pending_signals (env
);
2866 #ifdef TARGET_MICROBLAZE
2867 void cpu_loop(CPUMBState
*env
)
2869 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2871 target_siginfo_t info
;
2875 trapnr
= cpu_mb_exec (env
);
2880 info
.si_signo
= TARGET_SIGSEGV
;
2882 /* XXX: check env->error_code */
2883 info
.si_code
= TARGET_SEGV_MAPERR
;
2884 info
._sifields
._sigfault
._addr
= 0;
2885 queue_signal(env
, info
.si_signo
, &info
);
2888 case EXCP_INTERRUPT
:
2889 /* just indicate that signals should be handled asap */
2892 /* Return address is 4 bytes after the call. */
2894 env
->sregs
[SR_PC
] = env
->regs
[14];
2895 ret
= do_syscall(env
,
2907 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2908 if (env
->iflags
& D_FLAG
) {
2909 env
->sregs
[SR_ESR
] |= 1 << 12;
2910 env
->sregs
[SR_PC
] -= 4;
2911 /* FIXME: if branch was immed, replay the imm as well. */
2914 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2916 switch (env
->sregs
[SR_ESR
] & 31) {
2917 case ESR_EC_DIVZERO
:
2918 info
.si_signo
= TARGET_SIGFPE
;
2920 info
.si_code
= TARGET_FPE_FLTDIV
;
2921 info
._sifields
._sigfault
._addr
= 0;
2922 queue_signal(env
, info
.si_signo
, &info
);
2925 info
.si_signo
= TARGET_SIGFPE
;
2927 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2928 info
.si_code
= TARGET_FPE_FLTINV
;
2930 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2931 info
.si_code
= TARGET_FPE_FLTDIV
;
2933 info
._sifields
._sigfault
._addr
= 0;
2934 queue_signal(env
, info
.si_signo
, &info
);
2937 printf ("Unhandled hw-exception: 0x%x\n",
2938 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2939 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2948 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2951 info
.si_signo
= sig
;
2953 info
.si_code
= TARGET_TRAP_BRKPT
;
2954 queue_signal(env
, info
.si_signo
, &info
);
2959 printf ("Unhandled trap: 0x%x\n", trapnr
);
2960 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2963 process_pending_signals (env
);
2970 void cpu_loop(CPUM68KState
*env
)
2972 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2975 target_siginfo_t info
;
2976 TaskState
*ts
= cs
->opaque
;
2980 trapnr
= cpu_m68k_exec(env
);
2985 if (ts
->sim_syscalls
) {
2987 get_user_u16(nr
, env
->pc
+ 2);
2989 do_m68k_simcall(env
, nr
);
2995 case EXCP_HALT_INSN
:
2996 /* Semihosing syscall. */
2998 do_m68k_semihosting(env
, env
->dregs
[0]);
3002 case EXCP_UNSUPPORTED
:
3004 info
.si_signo
= TARGET_SIGILL
;
3006 info
.si_code
= TARGET_ILL_ILLOPN
;
3007 info
._sifields
._sigfault
._addr
= env
->pc
;
3008 queue_signal(env
, info
.si_signo
, &info
);
3012 ts
->sim_syscalls
= 0;
3015 env
->dregs
[0] = do_syscall(env
,
3026 case EXCP_INTERRUPT
:
3027 /* just indicate that signals should be handled asap */
3031 info
.si_signo
= TARGET_SIGSEGV
;
3033 /* XXX: check env->error_code */
3034 info
.si_code
= TARGET_SEGV_MAPERR
;
3035 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3036 queue_signal(env
, info
.si_signo
, &info
);
3043 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3046 info
.si_signo
= sig
;
3048 info
.si_code
= TARGET_TRAP_BRKPT
;
3049 queue_signal(env
, info
.si_signo
, &info
);
3054 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3056 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3059 process_pending_signals(env
);
3062 #endif /* TARGET_M68K */
3065 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3067 target_ulong addr
, val
, tmp
;
3068 target_siginfo_t info
;
3071 addr
= env
->lock_addr
;
3072 tmp
= env
->lock_st_addr
;
3073 env
->lock_addr
= -1;
3074 env
->lock_st_addr
= 0;
3080 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3084 if (val
== env
->lock_value
) {
3086 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3103 info
.si_signo
= TARGET_SIGSEGV
;
3105 info
.si_code
= TARGET_SEGV_MAPERR
;
3106 info
._sifields
._sigfault
._addr
= addr
;
3107 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3110 void cpu_loop(CPUAlphaState
*env
)
3112 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3114 target_siginfo_t info
;
3119 trapnr
= cpu_alpha_exec (env
);
3122 /* All of the traps imply a transition through PALcode, which
3123 implies an REI instruction has been executed. Which means
3124 that the intr_flag should be cleared. */
3129 fprintf(stderr
, "Reset requested. Exit\n");
3133 fprintf(stderr
, "Machine check exception. Exit\n");
3136 case EXCP_SMP_INTERRUPT
:
3137 case EXCP_CLK_INTERRUPT
:
3138 case EXCP_DEV_INTERRUPT
:
3139 fprintf(stderr
, "External interrupt. Exit\n");
3143 env
->lock_addr
= -1;
3144 info
.si_signo
= TARGET_SIGSEGV
;
3146 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3147 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3148 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3149 queue_signal(env
, info
.si_signo
, &info
);
3152 env
->lock_addr
= -1;
3153 info
.si_signo
= TARGET_SIGBUS
;
3155 info
.si_code
= TARGET_BUS_ADRALN
;
3156 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3157 queue_signal(env
, info
.si_signo
, &info
);
3161 env
->lock_addr
= -1;
3162 info
.si_signo
= TARGET_SIGILL
;
3164 info
.si_code
= TARGET_ILL_ILLOPC
;
3165 info
._sifields
._sigfault
._addr
= env
->pc
;
3166 queue_signal(env
, info
.si_signo
, &info
);
3169 env
->lock_addr
= -1;
3170 info
.si_signo
= TARGET_SIGFPE
;
3172 info
.si_code
= TARGET_FPE_FLTINV
;
3173 info
._sifields
._sigfault
._addr
= env
->pc
;
3174 queue_signal(env
, info
.si_signo
, &info
);
3177 /* No-op. Linux simply re-enables the FPU. */
3180 env
->lock_addr
= -1;
3181 switch (env
->error_code
) {
3184 info
.si_signo
= TARGET_SIGTRAP
;
3186 info
.si_code
= TARGET_TRAP_BRKPT
;
3187 info
._sifields
._sigfault
._addr
= env
->pc
;
3188 queue_signal(env
, info
.si_signo
, &info
);
3192 info
.si_signo
= TARGET_SIGTRAP
;
3195 info
._sifields
._sigfault
._addr
= env
->pc
;
3196 queue_signal(env
, info
.si_signo
, &info
);
3200 trapnr
= env
->ir
[IR_V0
];
3201 sysret
= do_syscall(env
, trapnr
,
3202 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3203 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3204 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3206 if (trapnr
== TARGET_NR_sigreturn
3207 || trapnr
== TARGET_NR_rt_sigreturn
) {
3210 /* Syscall writes 0 to V0 to bypass error check, similar
3211 to how this is handled internal to Linux kernel.
3212 (Ab)use trapnr temporarily as boolean indicating error. */
3213 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3214 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3215 env
->ir
[IR_A3
] = trapnr
;
3219 /* ??? We can probably elide the code using page_unprotect
3220 that is checking for self-modifying code. Instead we
3221 could simply call tb_flush here. Until we work out the
3222 changes required to turn off the extra write protection,
3223 this can be a no-op. */
3227 /* Handled in the translator for usermode. */
3231 /* Handled in the translator for usermode. */
3235 info
.si_signo
= TARGET_SIGFPE
;
3236 switch (env
->ir
[IR_A0
]) {
3237 case TARGET_GEN_INTOVF
:
3238 info
.si_code
= TARGET_FPE_INTOVF
;
3240 case TARGET_GEN_INTDIV
:
3241 info
.si_code
= TARGET_FPE_INTDIV
;
3243 case TARGET_GEN_FLTOVF
:
3244 info
.si_code
= TARGET_FPE_FLTOVF
;
3246 case TARGET_GEN_FLTUND
:
3247 info
.si_code
= TARGET_FPE_FLTUND
;
3249 case TARGET_GEN_FLTINV
:
3250 info
.si_code
= TARGET_FPE_FLTINV
;
3252 case TARGET_GEN_FLTINE
:
3253 info
.si_code
= TARGET_FPE_FLTRES
;
3255 case TARGET_GEN_ROPRAND
:
3259 info
.si_signo
= TARGET_SIGTRAP
;
3264 info
._sifields
._sigfault
._addr
= env
->pc
;
3265 queue_signal(env
, info
.si_signo
, &info
);
3272 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3273 if (info
.si_signo
) {
3274 env
->lock_addr
= -1;
3276 info
.si_code
= TARGET_TRAP_BRKPT
;
3277 queue_signal(env
, info
.si_signo
, &info
);
3282 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3284 case EXCP_INTERRUPT
:
3285 /* Just indicate that signals should be handled asap. */
3288 printf ("Unhandled trap: 0x%x\n", trapnr
);
3289 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3292 process_pending_signals (env
);
3295 #endif /* TARGET_ALPHA */
3298 void cpu_loop(CPUS390XState
*env
)
3300 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3302 target_siginfo_t info
;
3307 trapnr
= cpu_s390x_exec(env
);
3310 case EXCP_INTERRUPT
:
3311 /* Just indicate that signals should be handled asap. */
3315 n
= env
->int_svc_code
;
3317 /* syscalls > 255 */
3320 env
->psw
.addr
+= env
->int_svc_ilen
;
3321 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3322 env
->regs
[4], env
->regs
[5],
3323 env
->regs
[6], env
->regs
[7], 0, 0);
3327 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3329 n
= TARGET_TRAP_BRKPT
;
3334 n
= env
->int_pgm_code
;
3337 case PGM_PRIVILEGED
:
3338 sig
= TARGET_SIGILL
;
3339 n
= TARGET_ILL_ILLOPC
;
3341 case PGM_PROTECTION
:
3342 case PGM_ADDRESSING
:
3343 sig
= TARGET_SIGSEGV
;
3344 /* XXX: check env->error_code */
3345 n
= TARGET_SEGV_MAPERR
;
3346 addr
= env
->__excp_addr
;
3349 case PGM_SPECIFICATION
:
3350 case PGM_SPECIAL_OP
:
3353 sig
= TARGET_SIGILL
;
3354 n
= TARGET_ILL_ILLOPN
;
3357 case PGM_FIXPT_OVERFLOW
:
3358 sig
= TARGET_SIGFPE
;
3359 n
= TARGET_FPE_INTOVF
;
3361 case PGM_FIXPT_DIVIDE
:
3362 sig
= TARGET_SIGFPE
;
3363 n
= TARGET_FPE_INTDIV
;
3367 n
= (env
->fpc
>> 8) & 0xff;
3369 /* compare-and-trap */
3372 /* An IEEE exception, simulated or otherwise. */
3374 n
= TARGET_FPE_FLTINV
;
3375 } else if (n
& 0x40) {
3376 n
= TARGET_FPE_FLTDIV
;
3377 } else if (n
& 0x20) {
3378 n
= TARGET_FPE_FLTOVF
;
3379 } else if (n
& 0x10) {
3380 n
= TARGET_FPE_FLTUND
;
3381 } else if (n
& 0x08) {
3382 n
= TARGET_FPE_FLTRES
;
3384 /* ??? Quantum exception; BFP, DFP error. */
3387 sig
= TARGET_SIGFPE
;
3392 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3393 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3399 addr
= env
->psw
.addr
;
3401 info
.si_signo
= sig
;
3404 info
._sifields
._sigfault
._addr
= addr
;
3405 queue_signal(env
, info
.si_signo
, &info
);
3409 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3410 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3413 process_pending_signals (env
);
3417 #endif /* TARGET_S390X */
3419 THREAD CPUState
*thread_cpu
;
3421 void task_settid(TaskState
*ts
)
3423 if (ts
->ts_tid
== 0) {
3424 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3428 void stop_all_tasks(void)
3431 * We trust that when using NPTL, start_exclusive()
3432 * handles thread stopping correctly.
3437 /* Assumes contents are already zeroed. */
3438 void init_task_state(TaskState
*ts
)
3443 ts
->first_free
= ts
->sigqueue_table
;
3444 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3445 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3447 ts
->sigqueue_table
[i
].next
= NULL
;
3450 CPUArchState
*cpu_copy(CPUArchState
*env
)
3452 CPUState
*cpu
= ENV_GET_CPU(env
);
3453 CPUState
*new_cpu
= cpu_init(cpu_model
);
3454 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3458 /* Reset non arch specific state */
3461 memcpy(new_env
, env
, sizeof(CPUArchState
));
3463 /* Clone all break/watchpoints.
3464 Note: Once we support ptrace with hw-debug register access, make sure
3465 BP_CPU break/watchpoints are handled correctly on clone. */
3466 QTAILQ_INIT(&cpu
->breakpoints
);
3467 QTAILQ_INIT(&cpu
->watchpoints
);
3468 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3469 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3471 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3472 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3478 static void handle_arg_help(const char *arg
)
3483 static void handle_arg_log(const char *arg
)
3487 mask
= qemu_str_to_log_mask(arg
);
3489 qemu_print_log_usage(stdout
);
3495 static void handle_arg_log_filename(const char *arg
)
3497 qemu_set_log_filename(arg
);
3500 static void handle_arg_set_env(const char *arg
)
3502 char *r
, *p
, *token
;
3503 r
= p
= strdup(arg
);
3504 while ((token
= strsep(&p
, ",")) != NULL
) {
3505 if (envlist_setenv(envlist
, token
) != 0) {
3512 static void handle_arg_unset_env(const char *arg
)
3514 char *r
, *p
, *token
;
3515 r
= p
= strdup(arg
);
3516 while ((token
= strsep(&p
, ",")) != NULL
) {
3517 if (envlist_unsetenv(envlist
, token
) != 0) {
3524 static void handle_arg_argv0(const char *arg
)
3526 argv0
= strdup(arg
);
3529 static void handle_arg_stack_size(const char *arg
)
3532 guest_stack_size
= strtoul(arg
, &p
, 0);
3533 if (guest_stack_size
== 0) {
3538 guest_stack_size
*= 1024 * 1024;
3539 } else if (*p
== 'k' || *p
== 'K') {
3540 guest_stack_size
*= 1024;
3544 static void handle_arg_ld_prefix(const char *arg
)
3546 interp_prefix
= strdup(arg
);
3549 static void handle_arg_pagesize(const char *arg
)
3551 qemu_host_page_size
= atoi(arg
);
3552 if (qemu_host_page_size
== 0 ||
3553 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3554 fprintf(stderr
, "page size must be a power of two\n");
3559 static void handle_arg_randseed(const char *arg
)
3561 unsigned long long seed
;
3563 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3564 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3570 static void handle_arg_gdb(const char *arg
)
3572 gdbstub_port
= atoi(arg
);
3575 static void handle_arg_uname(const char *arg
)
3577 qemu_uname_release
= strdup(arg
);
3580 static void handle_arg_cpu(const char *arg
)
3582 cpu_model
= strdup(arg
);
3583 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3584 /* XXX: implement xxx_cpu_list for targets that still miss it */
3585 #if defined(cpu_list)
3586 cpu_list(stdout
, &fprintf
);
3592 #if defined(CONFIG_USE_GUEST_BASE)
3593 static void handle_arg_guest_base(const char *arg
)
3595 guest_base
= strtol(arg
, NULL
, 0);
3596 have_guest_base
= 1;
3599 static void handle_arg_reserved_va(const char *arg
)
3603 reserved_va
= strtoul(arg
, &p
, 0);
3617 unsigned long unshifted
= reserved_va
;
3619 reserved_va
<<= shift
;
3620 if (((reserved_va
>> shift
) != unshifted
)
3621 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3622 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3625 fprintf(stderr
, "Reserved virtual address too big\n");
3630 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3636 static void handle_arg_singlestep(const char *arg
)
3641 static void handle_arg_strace(const char *arg
)
3646 static void handle_arg_version(const char *arg
)
3648 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3649 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3653 struct qemu_argument
{
3657 void (*handle_opt
)(const char *arg
);
3658 const char *example
;
3662 static const struct qemu_argument arg_table
[] = {
3663 {"h", "", false, handle_arg_help
,
3664 "", "print this help"},
3665 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3666 "port", "wait gdb connection to 'port'"},
3667 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3668 "path", "set the elf interpreter prefix to 'path'"},
3669 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3670 "size", "set the stack size to 'size' bytes"},
3671 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3672 "model", "select CPU (-cpu help for list)"},
3673 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3674 "var=value", "sets targets environment variable (see below)"},
3675 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3676 "var", "unsets targets environment variable (see below)"},
3677 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3678 "argv0", "forces target process argv[0] to be 'argv0'"},
3679 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3680 "uname", "set qemu uname release string to 'uname'"},
3681 #if defined(CONFIG_USE_GUEST_BASE)
3682 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3683 "address", "set guest_base address to 'address'"},
3684 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3685 "size", "reserve 'size' bytes for guest virtual address space"},
3687 {"d", "QEMU_LOG", true, handle_arg_log
,
3688 "item[,...]", "enable logging of specified items "
3689 "(use '-d help' for a list of items)"},
3690 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3691 "logfile", "write logs to 'logfile' (default stderr)"},
3692 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3693 "pagesize", "set the host page size to 'pagesize'"},
3694 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3695 "", "run in singlestep mode"},
3696 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3697 "", "log system calls"},
3698 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3699 "", "Seed for pseudo-random number generator"},
3700 {"version", "QEMU_VERSION", false, handle_arg_version
,
3701 "", "display version information and exit"},
3702 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3705 static void usage(void)
3707 const struct qemu_argument
*arginfo
;
3711 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3712 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3714 "Options and associated environment variables:\n"
3717 /* Calculate column widths. We must always have at least enough space
3718 * for the column header.
3720 maxarglen
= strlen("Argument");
3721 maxenvlen
= strlen("Env-variable");
3723 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3724 int arglen
= strlen(arginfo
->argv
);
3725 if (arginfo
->has_arg
) {
3726 arglen
+= strlen(arginfo
->example
) + 1;
3728 if (strlen(arginfo
->env
) > maxenvlen
) {
3729 maxenvlen
= strlen(arginfo
->env
);
3731 if (arglen
> maxarglen
) {
3736 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3737 maxenvlen
, "Env-variable");
3739 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3740 if (arginfo
->has_arg
) {
3741 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3742 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3743 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3745 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3746 maxenvlen
, arginfo
->env
,
3753 "QEMU_LD_PREFIX = %s\n"
3754 "QEMU_STACK_SIZE = %ld byte\n",
3759 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3760 "QEMU_UNSET_ENV environment variables to set and unset\n"
3761 "environment variables for the target process.\n"
3762 "It is possible to provide several variables by separating them\n"
3763 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3764 "provide the -E and -U options multiple times.\n"
3765 "The following lines are equivalent:\n"
3766 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3767 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3768 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3769 "Note that if you provide several changes to a single variable\n"
3770 "the last change will stay in effect.\n");
3775 static int parse_args(int argc
, char **argv
)
3779 const struct qemu_argument
*arginfo
;
3781 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3782 if (arginfo
->env
== NULL
) {
3786 r
= getenv(arginfo
->env
);
3788 arginfo
->handle_opt(r
);
3794 if (optind
>= argc
) {
3803 if (!strcmp(r
, "-")) {
3807 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3808 if (!strcmp(r
, arginfo
->argv
)) {
3809 if (arginfo
->has_arg
) {
3810 if (optind
>= argc
) {
3813 arginfo
->handle_opt(argv
[optind
]);
3816 arginfo
->handle_opt(NULL
);
3822 /* no option matched the current argv */
3823 if (arginfo
->handle_opt
== NULL
) {
3828 if (optind
>= argc
) {
3832 filename
= argv
[optind
];
3833 exec_path
= argv
[optind
];
3838 int main(int argc
, char **argv
, char **envp
)
3840 struct target_pt_regs regs1
, *regs
= ®s1
;
3841 struct image_info info1
, *info
= &info1
;
3842 struct linux_binprm bprm
;
3847 char **target_environ
, **wrk
;
3854 module_call_init(MODULE_INIT_QOM
);
3856 if ((envlist
= envlist_create()) == NULL
) {
3857 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3861 /* add current environment into the list */
3862 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3863 (void) envlist_setenv(envlist
, *wrk
);
3866 /* Read the stack limit from the kernel. If it's "unlimited",
3867 then we can do little else besides use the default. */
3870 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3871 && lim
.rlim_cur
!= RLIM_INFINITY
3872 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3873 guest_stack_size
= lim
.rlim_cur
;
3878 #if defined(cpudef_setup)
3879 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3884 optind
= parse_args(argc
, argv
);
3887 memset(regs
, 0, sizeof(struct target_pt_regs
));
3889 /* Zero out image_info */
3890 memset(info
, 0, sizeof(struct image_info
));
3892 memset(&bprm
, 0, sizeof (bprm
));
3894 /* Scan interp_prefix dir for replacement files. */
3895 init_paths(interp_prefix
);
3897 init_qemu_uname_release();
3899 if (cpu_model
== NULL
) {
3900 #if defined(TARGET_I386)
3901 #ifdef TARGET_X86_64
3902 cpu_model
= "qemu64";
3904 cpu_model
= "qemu32";
3906 #elif defined(TARGET_ARM)
3908 #elif defined(TARGET_UNICORE32)
3910 #elif defined(TARGET_M68K)
3912 #elif defined(TARGET_SPARC)
3913 #ifdef TARGET_SPARC64
3914 cpu_model
= "TI UltraSparc II";
3916 cpu_model
= "Fujitsu MB86904";
3918 #elif defined(TARGET_MIPS)
3919 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3924 #elif defined TARGET_OPENRISC
3925 cpu_model
= "or1200";
3926 #elif defined(TARGET_PPC)
3927 # ifdef TARGET_PPC64
3928 cpu_model
= "POWER7";
3937 /* NOTE: we need to init the CPU at this stage to get
3938 qemu_host_page_size */
3939 cpu
= cpu_init(cpu_model
);
3941 fprintf(stderr
, "Unable to find CPU definition\n");
3949 if (getenv("QEMU_STRACE")) {
3953 if (getenv("QEMU_RAND_SEED")) {
3954 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
3957 target_environ
= envlist_to_environ(envlist
, NULL
);
3958 envlist_free(envlist
);
3960 #if defined(CONFIG_USE_GUEST_BASE)
3962 * Now that page sizes are configured in cpu_init() we can do
3963 * proper page alignment for guest_base.
3965 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3967 if (reserved_va
|| have_guest_base
) {
3968 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3970 if (guest_base
== (unsigned long)-1) {
3971 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3972 "space for use as guest address space (check your virtual "
3973 "memory ulimit setting or reserve less using -R option)\n",
3979 mmap_next_start
= reserved_va
;
3982 #endif /* CONFIG_USE_GUEST_BASE */
3985 * Read in mmap_min_addr kernel parameter. This value is used
3986 * When loading the ELF image to determine whether guest_base
3987 * is needed. It is also used in mmap_find_vma.
3992 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3994 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3995 mmap_min_addr
= tmp
;
3996 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4003 * Prepare copy of argv vector for target.
4005 target_argc
= argc
- optind
;
4006 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4007 if (target_argv
== NULL
) {
4008 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4013 * If argv0 is specified (using '-0' switch) we replace
4014 * argv[0] pointer with the given one.
4017 if (argv0
!= NULL
) {
4018 target_argv
[i
++] = strdup(argv0
);
4020 for (; i
< target_argc
; i
++) {
4021 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4023 target_argv
[target_argc
] = NULL
;
4025 ts
= g_malloc0 (sizeof(TaskState
));
4026 init_task_state(ts
);
4027 /* build Task State */
4033 execfd
= qemu_getauxval(AT_EXECFD
);
4035 execfd
= open(filename
, O_RDONLY
);
4037 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4042 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4045 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4049 for (wrk
= target_environ
; *wrk
; wrk
++) {
4053 free(target_environ
);
4055 if (qemu_log_enabled()) {
4056 #if defined(CONFIG_USE_GUEST_BASE)
4057 qemu_log("guest_base 0x%lx\n", guest_base
);
4061 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4062 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4063 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4065 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4067 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4068 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4070 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4071 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4074 target_set_brk(info
->brk
);
4078 #if defined(CONFIG_USE_GUEST_BASE)
4079 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4080 generating the prologue until now so that the prologue can take
4081 the real value of GUEST_BASE into account. */
4082 tcg_prologue_init(&tcg_ctx
);
4085 #if defined(TARGET_I386)
4086 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4087 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4088 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4089 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4090 env
->hflags
|= HF_OSFXSR_MASK
;
4092 #ifndef TARGET_ABI32
4093 /* enable 64 bit mode if possible */
4094 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4095 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4098 env
->cr
[4] |= CR4_PAE_MASK
;
4099 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4100 env
->hflags
|= HF_LMA_MASK
;
4103 /* flags setup : we activate the IRQs by default as in user mode */
4104 env
->eflags
|= IF_MASK
;
4106 /* linux register setup */
4107 #ifndef TARGET_ABI32
4108 env
->regs
[R_EAX
] = regs
->rax
;
4109 env
->regs
[R_EBX
] = regs
->rbx
;
4110 env
->regs
[R_ECX
] = regs
->rcx
;
4111 env
->regs
[R_EDX
] = regs
->rdx
;
4112 env
->regs
[R_ESI
] = regs
->rsi
;
4113 env
->regs
[R_EDI
] = regs
->rdi
;
4114 env
->regs
[R_EBP
] = regs
->rbp
;
4115 env
->regs
[R_ESP
] = regs
->rsp
;
4116 env
->eip
= regs
->rip
;
4118 env
->regs
[R_EAX
] = regs
->eax
;
4119 env
->regs
[R_EBX
] = regs
->ebx
;
4120 env
->regs
[R_ECX
] = regs
->ecx
;
4121 env
->regs
[R_EDX
] = regs
->edx
;
4122 env
->regs
[R_ESI
] = regs
->esi
;
4123 env
->regs
[R_EDI
] = regs
->edi
;
4124 env
->regs
[R_EBP
] = regs
->ebp
;
4125 env
->regs
[R_ESP
] = regs
->esp
;
4126 env
->eip
= regs
->eip
;
4129 /* linux interrupt setup */
4130 #ifndef TARGET_ABI32
4131 env
->idt
.limit
= 511;
4133 env
->idt
.limit
= 255;
4135 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4136 PROT_READ
|PROT_WRITE
,
4137 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4138 idt_table
= g2h(env
->idt
.base
);
4161 /* linux segment setup */
4163 uint64_t *gdt_table
;
4164 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4165 PROT_READ
|PROT_WRITE
,
4166 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4167 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4168 gdt_table
= g2h(env
->gdt
.base
);
4170 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4171 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4172 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4174 /* 64 bit code segment */
4175 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4176 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4178 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4180 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4181 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4182 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4184 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4185 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4187 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4188 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4189 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4190 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4191 /* This hack makes Wine work... */
4192 env
->segs
[R_FS
].selector
= 0;
4194 cpu_x86_load_seg(env
, R_DS
, 0);
4195 cpu_x86_load_seg(env
, R_ES
, 0);
4196 cpu_x86_load_seg(env
, R_FS
, 0);
4197 cpu_x86_load_seg(env
, R_GS
, 0);
4199 #elif defined(TARGET_AARCH64)
4203 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4205 "The selected ARM CPU does not support 64 bit mode\n");
4209 for (i
= 0; i
< 31; i
++) {
4210 env
->xregs
[i
] = regs
->regs
[i
];
4213 env
->xregs
[31] = regs
->sp
;
4215 #elif defined(TARGET_ARM)
4218 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4219 for(i
= 0; i
< 16; i
++) {
4220 env
->regs
[i
] = regs
->uregs
[i
];
4223 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4224 && (info
->elf_flags
& EF_ARM_BE8
)) {
4225 env
->bswap_code
= 1;
4228 #elif defined(TARGET_UNICORE32)
4231 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4232 for (i
= 0; i
< 32; i
++) {
4233 env
->regs
[i
] = regs
->uregs
[i
];
4236 #elif defined(TARGET_SPARC)
4240 env
->npc
= regs
->npc
;
4242 for(i
= 0; i
< 8; i
++)
4243 env
->gregs
[i
] = regs
->u_regs
[i
];
4244 for(i
= 0; i
< 8; i
++)
4245 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4247 #elif defined(TARGET_PPC)
4251 #if defined(TARGET_PPC64)
4252 #if defined(TARGET_ABI32)
4253 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4255 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4258 env
->nip
= regs
->nip
;
4259 for(i
= 0; i
< 32; i
++) {
4260 env
->gpr
[i
] = regs
->gpr
[i
];
4263 #elif defined(TARGET_M68K)
4266 env
->dregs
[0] = regs
->d0
;
4267 env
->dregs
[1] = regs
->d1
;
4268 env
->dregs
[2] = regs
->d2
;
4269 env
->dregs
[3] = regs
->d3
;
4270 env
->dregs
[4] = regs
->d4
;
4271 env
->dregs
[5] = regs
->d5
;
4272 env
->dregs
[6] = regs
->d6
;
4273 env
->dregs
[7] = regs
->d7
;
4274 env
->aregs
[0] = regs
->a0
;
4275 env
->aregs
[1] = regs
->a1
;
4276 env
->aregs
[2] = regs
->a2
;
4277 env
->aregs
[3] = regs
->a3
;
4278 env
->aregs
[4] = regs
->a4
;
4279 env
->aregs
[5] = regs
->a5
;
4280 env
->aregs
[6] = regs
->a6
;
4281 env
->aregs
[7] = regs
->usp
;
4283 ts
->sim_syscalls
= 1;
4285 #elif defined(TARGET_MICROBLAZE)
4287 env
->regs
[0] = regs
->r0
;
4288 env
->regs
[1] = regs
->r1
;
4289 env
->regs
[2] = regs
->r2
;
4290 env
->regs
[3] = regs
->r3
;
4291 env
->regs
[4] = regs
->r4
;
4292 env
->regs
[5] = regs
->r5
;
4293 env
->regs
[6] = regs
->r6
;
4294 env
->regs
[7] = regs
->r7
;
4295 env
->regs
[8] = regs
->r8
;
4296 env
->regs
[9] = regs
->r9
;
4297 env
->regs
[10] = regs
->r10
;
4298 env
->regs
[11] = regs
->r11
;
4299 env
->regs
[12] = regs
->r12
;
4300 env
->regs
[13] = regs
->r13
;
4301 env
->regs
[14] = regs
->r14
;
4302 env
->regs
[15] = regs
->r15
;
4303 env
->regs
[16] = regs
->r16
;
4304 env
->regs
[17] = regs
->r17
;
4305 env
->regs
[18] = regs
->r18
;
4306 env
->regs
[19] = regs
->r19
;
4307 env
->regs
[20] = regs
->r20
;
4308 env
->regs
[21] = regs
->r21
;
4309 env
->regs
[22] = regs
->r22
;
4310 env
->regs
[23] = regs
->r23
;
4311 env
->regs
[24] = regs
->r24
;
4312 env
->regs
[25] = regs
->r25
;
4313 env
->regs
[26] = regs
->r26
;
4314 env
->regs
[27] = regs
->r27
;
4315 env
->regs
[28] = regs
->r28
;
4316 env
->regs
[29] = regs
->r29
;
4317 env
->regs
[30] = regs
->r30
;
4318 env
->regs
[31] = regs
->r31
;
4319 env
->sregs
[SR_PC
] = regs
->pc
;
4321 #elif defined(TARGET_MIPS)
4325 for(i
= 0; i
< 32; i
++) {
4326 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4328 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4329 if (regs
->cp0_epc
& 1) {
4330 env
->hflags
|= MIPS_HFLAG_M16
;
4333 #elif defined(TARGET_OPENRISC)
4337 for (i
= 0; i
< 32; i
++) {
4338 env
->gpr
[i
] = regs
->gpr
[i
];
4344 #elif defined(TARGET_SH4)
4348 for(i
= 0; i
< 16; i
++) {
4349 env
->gregs
[i
] = regs
->regs
[i
];
4353 #elif defined(TARGET_ALPHA)
4357 for(i
= 0; i
< 28; i
++) {
4358 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4360 env
->ir
[IR_SP
] = regs
->usp
;
4363 #elif defined(TARGET_CRIS)
4365 env
->regs
[0] = regs
->r0
;
4366 env
->regs
[1] = regs
->r1
;
4367 env
->regs
[2] = regs
->r2
;
4368 env
->regs
[3] = regs
->r3
;
4369 env
->regs
[4] = regs
->r4
;
4370 env
->regs
[5] = regs
->r5
;
4371 env
->regs
[6] = regs
->r6
;
4372 env
->regs
[7] = regs
->r7
;
4373 env
->regs
[8] = regs
->r8
;
4374 env
->regs
[9] = regs
->r9
;
4375 env
->regs
[10] = regs
->r10
;
4376 env
->regs
[11] = regs
->r11
;
4377 env
->regs
[12] = regs
->r12
;
4378 env
->regs
[13] = regs
->r13
;
4379 env
->regs
[14] = info
->start_stack
;
4380 env
->regs
[15] = regs
->acr
;
4381 env
->pc
= regs
->erp
;
4383 #elif defined(TARGET_S390X)
4386 for (i
= 0; i
< 16; i
++) {
4387 env
->regs
[i
] = regs
->gprs
[i
];
4389 env
->psw
.mask
= regs
->psw
.mask
;
4390 env
->psw
.addr
= regs
->psw
.addr
;
4393 #error unsupported target CPU
4396 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4397 ts
->stack_base
= info
->start_stack
;
4398 ts
->heap_base
= info
->brk
;
4399 /* This will be filled in on the first SYS_HEAPINFO call. */
4404 if (gdbserver_start(gdbstub_port
) < 0) {
4405 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4409 gdb_handlesig(cpu
, 0);