4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
44 static const char *cpu_model
;
45 unsigned long mmap_min_addr
;
46 #if defined(CONFIG_USE_GUEST_BASE)
47 unsigned long guest_base
;
49 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
51 * When running 32-on-64 we should make sure we can fit all of the possible
52 * guest address space into a contiguous chunk of virtual host memory.
54 * This way we will never overlap with our own libraries or binaries or stack
55 * or anything else that QEMU maps.
58 /* MIPS only supports 31 bits of virtual address space for user space */
59 unsigned long reserved_va
= 0x77000000;
61 unsigned long reserved_va
= 0xf7000000;
64 unsigned long reserved_va
;
68 static void usage(void);
70 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
71 const char *qemu_uname_release
;
73 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
74 we allocate a bigger stack. Need a better solution, for example
75 by remapping the process stack directly at the right place */
76 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
78 void gemu_log(const char *fmt
, ...)
83 vfprintf(stderr
, fmt
, ap
);
87 #if defined(TARGET_I386)
88 int cpu_get_pic_interrupt(CPUX86State
*env
)
94 /***********************************************************/
95 /* Helper routines for implementing atomic operations. */
97 /* To implement exclusive operations we force all cpus to syncronise.
98 We don't require a full sync, only that no cpus are executing guest code.
99 The alternative is to map target atomic ops onto host equivalents,
100 which requires quite a lot of per host/target work. */
101 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
102 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
104 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
105 static int pending_cpus
;
107 /* Make sure everything is in a consistent state for calling fork(). */
108 void fork_start(void)
110 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
111 pthread_mutex_lock(&exclusive_lock
);
115 void fork_end(int child
)
117 mmap_fork_end(child
);
119 CPUState
*cpu
, *next_cpu
;
120 /* Child processes created by fork() only have a single thread.
121 Discard information about the parent threads. */
122 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
123 if (cpu
!= thread_cpu
) {
124 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
128 pthread_mutex_init(&exclusive_lock
, NULL
);
129 pthread_mutex_init(&cpu_list_mutex
, NULL
);
130 pthread_cond_init(&exclusive_cond
, NULL
);
131 pthread_cond_init(&exclusive_resume
, NULL
);
132 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
133 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
135 pthread_mutex_unlock(&exclusive_lock
);
136 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
140 /* Wait for pending exclusive operations to complete. The exclusive lock
142 static inline void exclusive_idle(void)
144 while (pending_cpus
) {
145 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
149 /* Start an exclusive operation.
150 Must only be called from outside cpu_arm_exec. */
151 static inline void start_exclusive(void)
155 pthread_mutex_lock(&exclusive_lock
);
159 /* Make all other cpus stop executing. */
160 CPU_FOREACH(other_cpu
) {
161 if (other_cpu
->running
) {
166 if (pending_cpus
> 1) {
167 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
171 /* Finish an exclusive operation. */
172 static inline void end_exclusive(void)
175 pthread_cond_broadcast(&exclusive_resume
);
176 pthread_mutex_unlock(&exclusive_lock
);
179 /* Wait for exclusive ops to finish, and begin cpu execution. */
180 static inline void cpu_exec_start(CPUState
*cpu
)
182 pthread_mutex_lock(&exclusive_lock
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Mark cpu as not executing, and release pending exclusive ops. */
189 static inline void cpu_exec_end(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
192 cpu
->running
= false;
193 if (pending_cpus
> 1) {
195 if (pending_cpus
== 1) {
196 pthread_cond_signal(&exclusive_cond
);
200 pthread_mutex_unlock(&exclusive_lock
);
203 void cpu_list_lock(void)
205 pthread_mutex_lock(&cpu_list_mutex
);
208 void cpu_list_unlock(void)
210 pthread_mutex_unlock(&cpu_list_mutex
);
215 /***********************************************************/
216 /* CPUX86 core interface */
218 void cpu_smm_update(CPUX86State
*env
)
222 uint64_t cpu_get_tsc(CPUX86State
*env
)
224 return cpu_get_real_ticks();
227 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
232 e1
= (addr
<< 16) | (limit
& 0xffff);
233 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
240 static uint64_t *idt_table
;
242 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
243 uint64_t addr
, unsigned int sel
)
246 e1
= (addr
& 0xffff) | (sel
<< 16);
247 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
251 p
[2] = tswap32(addr
>> 32);
254 /* only dpl matters as we do only user space emulation */
255 static void set_idt(int n
, unsigned int dpl
)
257 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
260 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
261 uint32_t addr
, unsigned int sel
)
264 e1
= (addr
& 0xffff) | (sel
<< 16);
265 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
271 /* only dpl matters as we do only user space emulation */
272 static void set_idt(int n
, unsigned int dpl
)
274 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
278 void cpu_loop(CPUX86State
*env
)
280 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
283 target_siginfo_t info
;
286 trapnr
= cpu_x86_exec(env
);
289 /* linux syscall from int $0x80 */
290 env
->regs
[R_EAX
] = do_syscall(env
,
302 /* linux syscall from syscall instruction */
303 env
->regs
[R_EAX
] = do_syscall(env
,
312 env
->eip
= env
->exception_next_eip
;
317 info
.si_signo
= SIGBUS
;
319 info
.si_code
= TARGET_SI_KERNEL
;
320 info
._sifields
._sigfault
._addr
= 0;
321 queue_signal(env
, info
.si_signo
, &info
);
324 /* XXX: potential problem if ABI32 */
325 #ifndef TARGET_X86_64
326 if (env
->eflags
& VM_MASK
) {
327 handle_vm86_fault(env
);
331 info
.si_signo
= SIGSEGV
;
333 info
.si_code
= TARGET_SI_KERNEL
;
334 info
._sifields
._sigfault
._addr
= 0;
335 queue_signal(env
, info
.si_signo
, &info
);
339 info
.si_signo
= SIGSEGV
;
341 if (!(env
->error_code
& 1))
342 info
.si_code
= TARGET_SEGV_MAPERR
;
344 info
.si_code
= TARGET_SEGV_ACCERR
;
345 info
._sifields
._sigfault
._addr
= env
->cr
[2];
346 queue_signal(env
, info
.si_signo
, &info
);
349 #ifndef TARGET_X86_64
350 if (env
->eflags
& VM_MASK
) {
351 handle_vm86_trap(env
, trapnr
);
355 /* division by zero */
356 info
.si_signo
= SIGFPE
;
358 info
.si_code
= TARGET_FPE_INTDIV
;
359 info
._sifields
._sigfault
._addr
= env
->eip
;
360 queue_signal(env
, info
.si_signo
, &info
);
365 #ifndef TARGET_X86_64
366 if (env
->eflags
& VM_MASK
) {
367 handle_vm86_trap(env
, trapnr
);
371 info
.si_signo
= SIGTRAP
;
373 if (trapnr
== EXCP01_DB
) {
374 info
.si_code
= TARGET_TRAP_BRKPT
;
375 info
._sifields
._sigfault
._addr
= env
->eip
;
377 info
.si_code
= TARGET_SI_KERNEL
;
378 info
._sifields
._sigfault
._addr
= 0;
380 queue_signal(env
, info
.si_signo
, &info
);
385 #ifndef TARGET_X86_64
386 if (env
->eflags
& VM_MASK
) {
387 handle_vm86_trap(env
, trapnr
);
391 info
.si_signo
= SIGSEGV
;
393 info
.si_code
= TARGET_SI_KERNEL
;
394 info
._sifields
._sigfault
._addr
= 0;
395 queue_signal(env
, info
.si_signo
, &info
);
399 info
.si_signo
= SIGILL
;
401 info
.si_code
= TARGET_ILL_ILLOPN
;
402 info
._sifields
._sigfault
._addr
= env
->eip
;
403 queue_signal(env
, info
.si_signo
, &info
);
406 /* just indicate that signals should be handled asap */
412 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
417 info
.si_code
= TARGET_TRAP_BRKPT
;
418 queue_signal(env
, info
.si_signo
, &info
);
423 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
424 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
428 process_pending_signals(env
);
435 #define get_user_code_u32(x, gaddr, doswap) \
436 ({ abi_long __r = get_user_u32((x), (gaddr)); \
437 if (!__r && (doswap)) { \
443 #define get_user_code_u16(x, gaddr, doswap) \
444 ({ abi_long __r = get_user_u16((x), (gaddr)); \
445 if (!__r && (doswap)) { \
452 /* Commpage handling -- there is no commpage for AArch64 */
455 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
457 * r0 = pointer to oldval
458 * r1 = pointer to newval
459 * r2 = pointer to target value
462 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
463 * C set if *ptr was changed, clear if no exchange happened
465 * Note segv's in kernel helpers are a bit tricky, we can set the
466 * data address sensibly but the PC address is just the entry point.
468 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
470 uint64_t oldval
, newval
, val
;
472 target_siginfo_t info
;
474 /* Based on the 32 bit code in do_kernel_trap */
476 /* XXX: This only works between threads, not between processes.
477 It's probably possible to implement this with native host
478 operations. However things like ldrex/strex are much harder so
479 there's not much point trying. */
481 cpsr
= cpsr_read(env
);
484 if (get_user_u64(oldval
, env
->regs
[0])) {
485 env
->exception
.vaddress
= env
->regs
[0];
489 if (get_user_u64(newval
, env
->regs
[1])) {
490 env
->exception
.vaddress
= env
->regs
[1];
494 if (get_user_u64(val
, addr
)) {
495 env
->exception
.vaddress
= addr
;
502 if (put_user_u64(val
, addr
)) {
503 env
->exception
.vaddress
= addr
;
513 cpsr_write(env
, cpsr
, CPSR_C
);
519 /* We get the PC of the entry address - which is as good as anything,
520 on a real kernel what you get depends on which mode it uses. */
521 info
.si_signo
= SIGSEGV
;
523 /* XXX: check env->error_code */
524 info
.si_code
= TARGET_SEGV_MAPERR
;
525 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
526 queue_signal(env
, info
.si_signo
, &info
);
531 /* Handle a jump to the kernel code page. */
533 do_kernel_trap(CPUARMState
*env
)
539 switch (env
->regs
[15]) {
540 case 0xffff0fa0: /* __kernel_memory_barrier */
541 /* ??? No-op. Will need to do better for SMP. */
543 case 0xffff0fc0: /* __kernel_cmpxchg */
544 /* XXX: This only works between threads, not between processes.
545 It's probably possible to implement this with native host
546 operations. However things like ldrex/strex are much harder so
547 there's not much point trying. */
549 cpsr
= cpsr_read(env
);
551 /* FIXME: This should SEGV if the access fails. */
552 if (get_user_u32(val
, addr
))
554 if (val
== env
->regs
[0]) {
556 /* FIXME: Check for segfaults. */
557 put_user_u32(val
, addr
);
564 cpsr_write(env
, cpsr
, CPSR_C
);
567 case 0xffff0fe0: /* __kernel_get_tls */
568 env
->regs
[0] = env
->cp15
.tpidrro_el0
;
570 case 0xffff0f60: /* __kernel_cmpxchg64 */
571 arm_kernel_cmpxchg64_helper(env
);
577 /* Jump back to the caller. */
578 addr
= env
->regs
[14];
583 env
->regs
[15] = addr
;
588 /* Store exclusive handling for AArch32 */
589 static int do_strex(CPUARMState
*env
)
597 if (env
->exclusive_addr
!= env
->exclusive_test
) {
600 /* We know we're always AArch32 so the address is in uint32_t range
601 * unless it was the -1 exclusive-monitor-lost value (which won't
602 * match exclusive_test above).
604 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
605 addr
= env
->exclusive_addr
;
606 size
= env
->exclusive_info
& 0xf;
609 segv
= get_user_u8(val
, addr
);
612 segv
= get_user_u16(val
, addr
);
616 segv
= get_user_u32(val
, addr
);
622 env
->exception
.vaddress
= addr
;
627 segv
= get_user_u32(valhi
, addr
+ 4);
629 env
->exception
.vaddress
= addr
+ 4;
632 val
= deposit64(val
, 32, 32, valhi
);
634 if (val
!= env
->exclusive_val
) {
638 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
641 segv
= put_user_u8(val
, addr
);
644 segv
= put_user_u16(val
, addr
);
648 segv
= put_user_u32(val
, addr
);
652 env
->exception
.vaddress
= addr
;
656 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
657 segv
= put_user_u32(val
, addr
+ 4);
659 env
->exception
.vaddress
= addr
+ 4;
666 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
672 void cpu_loop(CPUARMState
*env
)
674 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
676 unsigned int n
, insn
;
677 target_siginfo_t info
;
682 trapnr
= cpu_arm_exec(env
);
687 TaskState
*ts
= cs
->opaque
;
691 /* we handle the FPU emulation here, as Linux */
692 /* we get the opcode */
693 /* FIXME - what to do if get_user() fails? */
694 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
696 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
697 if (rc
== 0) { /* illegal instruction */
698 info
.si_signo
= SIGILL
;
700 info
.si_code
= TARGET_ILL_ILLOPN
;
701 info
._sifields
._sigfault
._addr
= env
->regs
[15];
702 queue_signal(env
, info
.si_signo
, &info
);
703 } else if (rc
< 0) { /* FP exception */
706 /* translate softfloat flags to FPSR flags */
707 if (-rc
& float_flag_invalid
)
709 if (-rc
& float_flag_divbyzero
)
711 if (-rc
& float_flag_overflow
)
713 if (-rc
& float_flag_underflow
)
715 if (-rc
& float_flag_inexact
)
718 FPSR fpsr
= ts
->fpa
.fpsr
;
719 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
721 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
722 info
.si_signo
= SIGFPE
;
725 /* ordered by priority, least first */
726 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
727 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
728 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
729 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
730 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
732 info
._sifields
._sigfault
._addr
= env
->regs
[15];
733 queue_signal(env
, info
.si_signo
, &info
);
738 /* accumulate unenabled exceptions */
739 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
741 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
743 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
745 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
747 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
750 } else { /* everything OK */
761 if (trapnr
== EXCP_BKPT
) {
763 /* FIXME - what to do if get_user() fails? */
764 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
768 /* FIXME - what to do if get_user() fails? */
769 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
770 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
775 /* FIXME - what to do if get_user() fails? */
776 get_user_code_u16(insn
, env
->regs
[15] - 2,
780 /* FIXME - what to do if get_user() fails? */
781 get_user_code_u32(insn
, env
->regs
[15] - 4,
787 if (n
== ARM_NR_cacheflush
) {
789 } else if (n
== ARM_NR_semihosting
790 || n
== ARM_NR_thumb_semihosting
) {
791 env
->regs
[0] = do_arm_semihosting (env
);
792 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
794 if (env
->thumb
|| n
== 0) {
797 n
-= ARM_SYSCALL_BASE
;
800 if ( n
> ARM_NR_BASE
) {
802 case ARM_NR_cacheflush
:
806 cpu_set_tls(env
, env
->regs
[0]);
810 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
812 env
->regs
[0] = -TARGET_ENOSYS
;
816 env
->regs
[0] = do_syscall(env
,
832 /* just indicate that signals should be handled asap */
835 if (!do_strex(env
)) {
838 /* fall through for segv */
839 case EXCP_PREFETCH_ABORT
:
840 case EXCP_DATA_ABORT
:
841 addr
= env
->exception
.vaddress
;
843 info
.si_signo
= SIGSEGV
;
845 /* XXX: check env->error_code */
846 info
.si_code
= TARGET_SEGV_MAPERR
;
847 info
._sifields
._sigfault
._addr
= addr
;
848 queue_signal(env
, info
.si_signo
, &info
);
855 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
860 info
.si_code
= TARGET_TRAP_BRKPT
;
861 queue_signal(env
, info
.si_signo
, &info
);
865 case EXCP_KERNEL_TRAP
:
866 if (do_kernel_trap(env
))
871 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
873 cpu_dump_state(cs
, stderr
, fprintf
, 0);
876 process_pending_signals(env
);
883 * Handle AArch64 store-release exclusive
885 * rs = gets the status result of store exclusive
886 * rt = is the register that is stored
887 * rt2 = is the second register store (in STP)
890 static int do_strex_a64(CPUARMState
*env
)
901 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
902 size
= extract32(env
->exclusive_info
, 0, 2);
903 is_pair
= extract32(env
->exclusive_info
, 2, 1);
904 rs
= extract32(env
->exclusive_info
, 4, 5);
905 rt
= extract32(env
->exclusive_info
, 9, 5);
906 rt2
= extract32(env
->exclusive_info
, 14, 5);
908 addr
= env
->exclusive_addr
;
910 if (addr
!= env
->exclusive_test
) {
916 segv
= get_user_u8(val
, addr
);
919 segv
= get_user_u16(val
, addr
);
922 segv
= get_user_u32(val
, addr
);
925 segv
= get_user_u64(val
, addr
);
931 env
->exception
.vaddress
= addr
;
934 if (val
!= env
->exclusive_val
) {
939 segv
= get_user_u32(val
, addr
+ 4);
941 segv
= get_user_u64(val
, addr
+ 8);
944 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
947 if (val
!= env
->exclusive_high
) {
951 /* handle the zero register */
952 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
955 segv
= put_user_u8(val
, addr
);
958 segv
= put_user_u16(val
, addr
);
961 segv
= put_user_u32(val
, addr
);
964 segv
= put_user_u64(val
, addr
);
971 /* handle the zero register */
972 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
974 segv
= put_user_u32(val
, addr
+ 4);
976 segv
= put_user_u64(val
, addr
+ 8);
979 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
986 /* rs == 31 encodes a write to the ZR, thus throwing away
987 * the status return. This is rather silly but valid.
993 /* instruction faulted, PC does not advance */
994 /* either way a strex releases any exclusive lock we have */
995 env
->exclusive_addr
= -1;
1000 /* AArch64 main loop */
1001 void cpu_loop(CPUARMState
*env
)
1003 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1005 target_siginfo_t info
;
1010 trapnr
= cpu_arm_exec(env
);
1015 env
->xregs
[0] = do_syscall(env
,
1025 case EXCP_INTERRUPT
:
1026 /* just indicate that signals should be handled asap */
1029 info
.si_signo
= SIGILL
;
1031 info
.si_code
= TARGET_ILL_ILLOPN
;
1032 info
._sifields
._sigfault
._addr
= env
->pc
;
1033 queue_signal(env
, info
.si_signo
, &info
);
1036 if (!do_strex_a64(env
)) {
1039 /* fall through for segv */
1040 case EXCP_PREFETCH_ABORT
:
1041 case EXCP_DATA_ABORT
:
1042 addr
= env
->exception
.vaddress
;
1043 info
.si_signo
= SIGSEGV
;
1045 /* XXX: check env->error_code */
1046 info
.si_code
= TARGET_SEGV_MAPERR
;
1047 info
._sifields
._sigfault
._addr
= addr
;
1048 queue_signal(env
, info
.si_signo
, &info
);
1052 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1054 info
.si_signo
= sig
;
1056 info
.si_code
= TARGET_TRAP_BRKPT
;
1057 queue_signal(env
, info
.si_signo
, &info
);
1061 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1063 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1066 process_pending_signals(env
);
1067 /* Exception return on AArch64 always clears the exclusive monitor,
1068 * so any return to running guest code implies this.
1069 * A strex (successful or otherwise) also clears the monitor, so
1070 * we don't need to specialcase EXCP_STREX.
1072 env
->exclusive_addr
= -1;
1075 #endif /* ndef TARGET_ABI32 */
1079 #ifdef TARGET_UNICORE32
1081 void cpu_loop(CPUUniCore32State
*env
)
1083 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1085 unsigned int n
, insn
;
1086 target_siginfo_t info
;
1090 trapnr
= uc32_cpu_exec(env
);
1093 case UC32_EXCP_PRIV
:
1096 get_user_u32(insn
, env
->regs
[31] - 4);
1097 n
= insn
& 0xffffff;
1099 if (n
>= UC32_SYSCALL_BASE
) {
1101 n
-= UC32_SYSCALL_BASE
;
1102 if (n
== UC32_SYSCALL_NR_set_tls
) {
1103 cpu_set_tls(env
, env
->regs
[0]);
1106 env
->regs
[0] = do_syscall(env
,
1121 case UC32_EXCP_DTRAP
:
1122 case UC32_EXCP_ITRAP
:
1123 info
.si_signo
= SIGSEGV
;
1125 /* XXX: check env->error_code */
1126 info
.si_code
= TARGET_SEGV_MAPERR
;
1127 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1128 queue_signal(env
, info
.si_signo
, &info
);
1130 case EXCP_INTERRUPT
:
1131 /* just indicate that signals should be handled asap */
1137 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1139 info
.si_signo
= sig
;
1141 info
.si_code
= TARGET_TRAP_BRKPT
;
1142 queue_signal(env
, info
.si_signo
, &info
);
1149 process_pending_signals(env
);
1153 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1154 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1160 #define SPARC64_STACK_BIAS 2047
1164 /* WARNING: dealing with register windows _is_ complicated. More info
1165 can be found at http://www.sics.se/~psm/sparcstack.html */
1166 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1168 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1169 /* wrap handling : if cwp is on the last window, then we use the
1170 registers 'after' the end */
1171 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1172 index
+= 16 * env
->nwindows
;
1176 /* save the register window 'cwp1' */
1177 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1182 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1183 #ifdef TARGET_SPARC64
1185 sp_ptr
+= SPARC64_STACK_BIAS
;
1187 #if defined(DEBUG_WIN)
1188 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1191 for(i
= 0; i
< 16; i
++) {
1192 /* FIXME - what to do if put_user() fails? */
1193 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1194 sp_ptr
+= sizeof(abi_ulong
);
1198 static void save_window(CPUSPARCState
*env
)
1200 #ifndef TARGET_SPARC64
1201 unsigned int new_wim
;
1202 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1203 ((1LL << env
->nwindows
) - 1);
1204 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1207 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1213 static void restore_window(CPUSPARCState
*env
)
1215 #ifndef TARGET_SPARC64
1216 unsigned int new_wim
;
1218 unsigned int i
, cwp1
;
1221 #ifndef TARGET_SPARC64
1222 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1223 ((1LL << env
->nwindows
) - 1);
1226 /* restore the invalid window */
1227 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1228 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1229 #ifdef TARGET_SPARC64
1231 sp_ptr
+= SPARC64_STACK_BIAS
;
1233 #if defined(DEBUG_WIN)
1234 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1237 for(i
= 0; i
< 16; i
++) {
1238 /* FIXME - what to do if get_user() fails? */
1239 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1240 sp_ptr
+= sizeof(abi_ulong
);
1242 #ifdef TARGET_SPARC64
1244 if (env
->cleanwin
< env
->nwindows
- 1)
1252 static void flush_windows(CPUSPARCState
*env
)
1258 /* if restore would invoke restore_window(), then we can stop */
1259 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1260 #ifndef TARGET_SPARC64
1261 if (env
->wim
& (1 << cwp1
))
1264 if (env
->canrestore
== 0)
1269 save_window_offset(env
, cwp1
);
1272 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1273 #ifndef TARGET_SPARC64
1274 /* set wim so that restore will reload the registers */
1275 env
->wim
= 1 << cwp1
;
1277 #if defined(DEBUG_WIN)
1278 printf("flush_windows: nb=%d\n", offset
- 1);
1282 void cpu_loop (CPUSPARCState
*env
)
1284 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1287 target_siginfo_t info
;
1290 trapnr
= cpu_sparc_exec (env
);
1292 /* Compute PSR before exposing state. */
1293 if (env
->cc_op
!= CC_OP_FLAGS
) {
1298 #ifndef TARGET_SPARC64
1305 ret
= do_syscall (env
, env
->gregs
[1],
1306 env
->regwptr
[0], env
->regwptr
[1],
1307 env
->regwptr
[2], env
->regwptr
[3],
1308 env
->regwptr
[4], env
->regwptr
[5],
1310 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1311 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1312 env
->xcc
|= PSR_CARRY
;
1314 env
->psr
|= PSR_CARRY
;
1318 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1319 env
->xcc
&= ~PSR_CARRY
;
1321 env
->psr
&= ~PSR_CARRY
;
1324 env
->regwptr
[0] = ret
;
1325 /* next instruction */
1327 env
->npc
= env
->npc
+ 4;
1329 case 0x83: /* flush windows */
1334 /* next instruction */
1336 env
->npc
= env
->npc
+ 4;
1338 #ifndef TARGET_SPARC64
1339 case TT_WIN_OVF
: /* window overflow */
1342 case TT_WIN_UNF
: /* window underflow */
1343 restore_window(env
);
1348 info
.si_signo
= TARGET_SIGSEGV
;
1350 /* XXX: check env->error_code */
1351 info
.si_code
= TARGET_SEGV_MAPERR
;
1352 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1353 queue_signal(env
, info
.si_signo
, &info
);
1357 case TT_SPILL
: /* window overflow */
1360 case TT_FILL
: /* window underflow */
1361 restore_window(env
);
1366 info
.si_signo
= TARGET_SIGSEGV
;
1368 /* XXX: check env->error_code */
1369 info
.si_code
= TARGET_SEGV_MAPERR
;
1370 if (trapnr
== TT_DFAULT
)
1371 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1373 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1374 queue_signal(env
, info
.si_signo
, &info
);
1377 #ifndef TARGET_ABI32
1380 sparc64_get_context(env
);
1384 sparc64_set_context(env
);
1388 case EXCP_INTERRUPT
:
1389 /* just indicate that signals should be handled asap */
1393 info
.si_signo
= TARGET_SIGILL
;
1395 info
.si_code
= TARGET_ILL_ILLOPC
;
1396 info
._sifields
._sigfault
._addr
= env
->pc
;
1397 queue_signal(env
, info
.si_signo
, &info
);
1404 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1407 info
.si_signo
= sig
;
1409 info
.si_code
= TARGET_TRAP_BRKPT
;
1410 queue_signal(env
, info
.si_signo
, &info
);
1415 printf ("Unhandled trap: 0x%x\n", trapnr
);
1416 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1419 process_pending_signals (env
);
1426 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1432 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1434 return cpu_ppc_get_tb(env
);
1437 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1439 return cpu_ppc_get_tb(env
) >> 32;
1442 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1444 return cpu_ppc_get_tb(env
);
1447 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1449 return cpu_ppc_get_tb(env
) >> 32;
1452 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1453 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1455 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1457 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1460 /* XXX: to be fixed */
1461 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1466 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1471 #define EXCP_DUMP(env, fmt, ...) \
1473 CPUState *cs = ENV_GET_CPU(env); \
1474 fprintf(stderr, fmt , ## __VA_ARGS__); \
1475 cpu_dump_state(cs, stderr, fprintf, 0); \
1476 qemu_log(fmt, ## __VA_ARGS__); \
1477 if (qemu_log_enabled()) { \
1478 log_cpu_state(cs, 0); \
1482 static int do_store_exclusive(CPUPPCState
*env
)
1485 target_ulong page_addr
;
1486 target_ulong val
, val2
__attribute__((unused
)) = 0;
1490 addr
= env
->reserve_ea
;
1491 page_addr
= addr
& TARGET_PAGE_MASK
;
1494 flags
= page_get_flags(page_addr
);
1495 if ((flags
& PAGE_READ
) == 0) {
1498 int reg
= env
->reserve_info
& 0x1f;
1499 int size
= env
->reserve_info
>> 5;
1502 if (addr
== env
->reserve_addr
) {
1504 case 1: segv
= get_user_u8(val
, addr
); break;
1505 case 2: segv
= get_user_u16(val
, addr
); break;
1506 case 4: segv
= get_user_u32(val
, addr
); break;
1507 #if defined(TARGET_PPC64)
1508 case 8: segv
= get_user_u64(val
, addr
); break;
1510 segv
= get_user_u64(val
, addr
);
1512 segv
= get_user_u64(val2
, addr
+ 8);
1519 if (!segv
&& val
== env
->reserve_val
) {
1520 val
= env
->gpr
[reg
];
1522 case 1: segv
= put_user_u8(val
, addr
); break;
1523 case 2: segv
= put_user_u16(val
, addr
); break;
1524 case 4: segv
= put_user_u32(val
, addr
); break;
1525 #if defined(TARGET_PPC64)
1526 case 8: segv
= put_user_u64(val
, addr
); break;
1528 if (val2
== env
->reserve_val2
) {
1531 val
= env
->gpr
[reg
+1];
1533 val2
= env
->gpr
[reg
+1];
1535 segv
= put_user_u64(val
, addr
);
1537 segv
= put_user_u64(val2
, addr
+ 8);
1550 env
->crf
[0] = (stored
<< 1) | xer_so
;
1551 env
->reserve_addr
= (target_ulong
)-1;
1561 void cpu_loop(CPUPPCState
*env
)
1563 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1564 target_siginfo_t info
;
1570 trapnr
= cpu_ppc_exec(env
);
1573 case POWERPC_EXCP_NONE
:
1576 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1577 cpu_abort(cs
, "Critical interrupt while in user mode. "
1580 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1581 cpu_abort(cs
, "Machine check exception while in user mode. "
1584 case POWERPC_EXCP_DSI
: /* Data storage exception */
1585 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1587 /* XXX: check this. Seems bugged */
1588 switch (env
->error_code
& 0xFF000000) {
1590 info
.si_signo
= TARGET_SIGSEGV
;
1592 info
.si_code
= TARGET_SEGV_MAPERR
;
1595 info
.si_signo
= TARGET_SIGILL
;
1597 info
.si_code
= TARGET_ILL_ILLADR
;
1600 info
.si_signo
= TARGET_SIGSEGV
;
1602 info
.si_code
= TARGET_SEGV_ACCERR
;
1605 /* Let's send a regular segfault... */
1606 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1608 info
.si_signo
= TARGET_SIGSEGV
;
1610 info
.si_code
= TARGET_SEGV_MAPERR
;
1613 info
._sifields
._sigfault
._addr
= env
->nip
;
1614 queue_signal(env
, info
.si_signo
, &info
);
1616 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1617 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1618 "\n", env
->spr
[SPR_SRR0
]);
1619 /* XXX: check this */
1620 switch (env
->error_code
& 0xFF000000) {
1622 info
.si_signo
= TARGET_SIGSEGV
;
1624 info
.si_code
= TARGET_SEGV_MAPERR
;
1628 info
.si_signo
= TARGET_SIGSEGV
;
1630 info
.si_code
= TARGET_SEGV_ACCERR
;
1633 /* Let's send a regular segfault... */
1634 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1636 info
.si_signo
= TARGET_SIGSEGV
;
1638 info
.si_code
= TARGET_SEGV_MAPERR
;
1641 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1642 queue_signal(env
, info
.si_signo
, &info
);
1644 case POWERPC_EXCP_EXTERNAL
: /* External input */
1645 cpu_abort(cs
, "External interrupt while in user mode. "
1648 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1649 EXCP_DUMP(env
, "Unaligned memory access\n");
1650 /* XXX: check this */
1651 info
.si_signo
= TARGET_SIGBUS
;
1653 info
.si_code
= TARGET_BUS_ADRALN
;
1654 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1655 queue_signal(env
, info
.si_signo
, &info
);
1657 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1658 /* XXX: check this */
1659 switch (env
->error_code
& ~0xF) {
1660 case POWERPC_EXCP_FP
:
1661 EXCP_DUMP(env
, "Floating point program exception\n");
1662 info
.si_signo
= TARGET_SIGFPE
;
1664 switch (env
->error_code
& 0xF) {
1665 case POWERPC_EXCP_FP_OX
:
1666 info
.si_code
= TARGET_FPE_FLTOVF
;
1668 case POWERPC_EXCP_FP_UX
:
1669 info
.si_code
= TARGET_FPE_FLTUND
;
1671 case POWERPC_EXCP_FP_ZX
:
1672 case POWERPC_EXCP_FP_VXZDZ
:
1673 info
.si_code
= TARGET_FPE_FLTDIV
;
1675 case POWERPC_EXCP_FP_XX
:
1676 info
.si_code
= TARGET_FPE_FLTRES
;
1678 case POWERPC_EXCP_FP_VXSOFT
:
1679 info
.si_code
= TARGET_FPE_FLTINV
;
1681 case POWERPC_EXCP_FP_VXSNAN
:
1682 case POWERPC_EXCP_FP_VXISI
:
1683 case POWERPC_EXCP_FP_VXIDI
:
1684 case POWERPC_EXCP_FP_VXIMZ
:
1685 case POWERPC_EXCP_FP_VXVC
:
1686 case POWERPC_EXCP_FP_VXSQRT
:
1687 case POWERPC_EXCP_FP_VXCVI
:
1688 info
.si_code
= TARGET_FPE_FLTSUB
;
1691 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1696 case POWERPC_EXCP_INVAL
:
1697 EXCP_DUMP(env
, "Invalid instruction\n");
1698 info
.si_signo
= TARGET_SIGILL
;
1700 switch (env
->error_code
& 0xF) {
1701 case POWERPC_EXCP_INVAL_INVAL
:
1702 info
.si_code
= TARGET_ILL_ILLOPC
;
1704 case POWERPC_EXCP_INVAL_LSWX
:
1705 info
.si_code
= TARGET_ILL_ILLOPN
;
1707 case POWERPC_EXCP_INVAL_SPR
:
1708 info
.si_code
= TARGET_ILL_PRVREG
;
1710 case POWERPC_EXCP_INVAL_FP
:
1711 info
.si_code
= TARGET_ILL_COPROC
;
1714 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1715 env
->error_code
& 0xF);
1716 info
.si_code
= TARGET_ILL_ILLADR
;
1720 case POWERPC_EXCP_PRIV
:
1721 EXCP_DUMP(env
, "Privilege violation\n");
1722 info
.si_signo
= TARGET_SIGILL
;
1724 switch (env
->error_code
& 0xF) {
1725 case POWERPC_EXCP_PRIV_OPC
:
1726 info
.si_code
= TARGET_ILL_PRVOPC
;
1728 case POWERPC_EXCP_PRIV_REG
:
1729 info
.si_code
= TARGET_ILL_PRVREG
;
1732 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1733 env
->error_code
& 0xF);
1734 info
.si_code
= TARGET_ILL_PRVOPC
;
1738 case POWERPC_EXCP_TRAP
:
1739 cpu_abort(cs
, "Tried to call a TRAP\n");
1742 /* Should not happen ! */
1743 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1747 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1748 queue_signal(env
, info
.si_signo
, &info
);
1750 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1751 EXCP_DUMP(env
, "No floating point allowed\n");
1752 info
.si_signo
= TARGET_SIGILL
;
1754 info
.si_code
= TARGET_ILL_COPROC
;
1755 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1756 queue_signal(env
, info
.si_signo
, &info
);
1758 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1759 cpu_abort(cs
, "Syscall exception while in user mode. "
1762 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1763 EXCP_DUMP(env
, "No APU instruction allowed\n");
1764 info
.si_signo
= TARGET_SIGILL
;
1766 info
.si_code
= TARGET_ILL_COPROC
;
1767 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1768 queue_signal(env
, info
.si_signo
, &info
);
1770 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1771 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1774 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1775 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1778 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1779 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1782 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1783 cpu_abort(cs
, "Data TLB exception while in user mode. "
1786 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1787 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1790 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1791 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1792 info
.si_signo
= TARGET_SIGILL
;
1794 info
.si_code
= TARGET_ILL_COPROC
;
1795 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1796 queue_signal(env
, info
.si_signo
, &info
);
1798 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1799 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1801 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1802 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1804 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1805 cpu_abort(cs
, "Performance monitor exception not handled\n");
1807 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1808 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1811 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1812 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1815 case POWERPC_EXCP_RESET
: /* System reset exception */
1816 cpu_abort(cs
, "Reset interrupt while in user mode. "
1819 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1820 cpu_abort(cs
, "Data segment exception while in user mode. "
1823 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1824 cpu_abort(cs
, "Instruction segment exception "
1825 "while in user mode. Aborting\n");
1827 /* PowerPC 64 with hypervisor mode support */
1828 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1829 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1830 "while in user mode. Aborting\n");
1832 case POWERPC_EXCP_TRACE
: /* Trace exception */
1834 * we use this exception to emulate step-by-step execution mode.
1837 /* PowerPC 64 with hypervisor mode support */
1838 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1839 cpu_abort(cs
, "Hypervisor data storage exception "
1840 "while in user mode. Aborting\n");
1842 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1843 cpu_abort(cs
, "Hypervisor instruction storage exception "
1844 "while in user mode. Aborting\n");
1846 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1847 cpu_abort(cs
, "Hypervisor data segment exception "
1848 "while in user mode. Aborting\n");
1850 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1851 cpu_abort(cs
, "Hypervisor instruction segment exception "
1852 "while in user mode. Aborting\n");
1854 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1855 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1856 info
.si_signo
= TARGET_SIGILL
;
1858 info
.si_code
= TARGET_ILL_COPROC
;
1859 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1860 queue_signal(env
, info
.si_signo
, &info
);
1862 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1863 cpu_abort(cs
, "Programmable interval timer interrupt "
1864 "while in user mode. Aborting\n");
1866 case POWERPC_EXCP_IO
: /* IO error exception */
1867 cpu_abort(cs
, "IO error exception while in user mode. "
1870 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1871 cpu_abort(cs
, "Run mode exception while in user mode. "
1874 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1875 cpu_abort(cs
, "Emulation trap exception not handled\n");
1877 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1878 cpu_abort(cs
, "Instruction fetch TLB exception "
1879 "while in user-mode. Aborting");
1881 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1882 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1885 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1886 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1889 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1890 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1892 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1893 cpu_abort(cs
, "Instruction address breakpoint exception "
1896 case POWERPC_EXCP_SMI
: /* System management interrupt */
1897 cpu_abort(cs
, "System management interrupt while in user mode. "
1900 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1901 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1904 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1905 cpu_abort(cs
, "Performance monitor exception not handled\n");
1907 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1908 cpu_abort(cs
, "Vector assist exception not handled\n");
1910 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1911 cpu_abort(cs
, "Soft patch exception not handled\n");
1913 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1914 cpu_abort(cs
, "Maintenance exception while in user mode. "
1917 case POWERPC_EXCP_STOP
: /* stop translation */
1918 /* We did invalidate the instruction cache. Go on */
1920 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1921 /* We just stopped because of a branch. Go on */
1923 case POWERPC_EXCP_SYSCALL_USER
:
1924 /* system call in user-mode emulation */
1926 * PPC ABI uses overflow flag in cr0 to signal an error
1929 env
->crf
[0] &= ~0x1;
1930 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1931 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1933 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1934 /* Returning from a successful sigreturn syscall.
1935 Avoid corrupting register state. */
1938 if (ret
> (target_ulong
)(-515)) {
1944 case POWERPC_EXCP_STCX
:
1945 if (do_store_exclusive(env
)) {
1946 info
.si_signo
= TARGET_SIGSEGV
;
1948 info
.si_code
= TARGET_SEGV_MAPERR
;
1949 info
._sifields
._sigfault
._addr
= env
->nip
;
1950 queue_signal(env
, info
.si_signo
, &info
);
1957 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1959 info
.si_signo
= sig
;
1961 info
.si_code
= TARGET_TRAP_BRKPT
;
1962 queue_signal(env
, info
.si_signo
, &info
);
1966 case EXCP_INTERRUPT
:
1967 /* just indicate that signals should be handled asap */
1970 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1973 process_pending_signals(env
);
1980 # ifdef TARGET_ABI_MIPSO32
1981 # define MIPS_SYS(name, args) args,
1982 static const uint8_t mips_syscall_args
[] = {
1983 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1984 MIPS_SYS(sys_exit
, 1)
1985 MIPS_SYS(sys_fork
, 0)
1986 MIPS_SYS(sys_read
, 3)
1987 MIPS_SYS(sys_write
, 3)
1988 MIPS_SYS(sys_open
, 3) /* 4005 */
1989 MIPS_SYS(sys_close
, 1)
1990 MIPS_SYS(sys_waitpid
, 3)
1991 MIPS_SYS(sys_creat
, 2)
1992 MIPS_SYS(sys_link
, 2)
1993 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1994 MIPS_SYS(sys_execve
, 0)
1995 MIPS_SYS(sys_chdir
, 1)
1996 MIPS_SYS(sys_time
, 1)
1997 MIPS_SYS(sys_mknod
, 3)
1998 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1999 MIPS_SYS(sys_lchown
, 3)
2000 MIPS_SYS(sys_ni_syscall
, 0)
2001 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2002 MIPS_SYS(sys_lseek
, 3)
2003 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2004 MIPS_SYS(sys_mount
, 5)
2005 MIPS_SYS(sys_umount
, 1)
2006 MIPS_SYS(sys_setuid
, 1)
2007 MIPS_SYS(sys_getuid
, 0)
2008 MIPS_SYS(sys_stime
, 1) /* 4025 */
2009 MIPS_SYS(sys_ptrace
, 4)
2010 MIPS_SYS(sys_alarm
, 1)
2011 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2012 MIPS_SYS(sys_pause
, 0)
2013 MIPS_SYS(sys_utime
, 2) /* 4030 */
2014 MIPS_SYS(sys_ni_syscall
, 0)
2015 MIPS_SYS(sys_ni_syscall
, 0)
2016 MIPS_SYS(sys_access
, 2)
2017 MIPS_SYS(sys_nice
, 1)
2018 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2019 MIPS_SYS(sys_sync
, 0)
2020 MIPS_SYS(sys_kill
, 2)
2021 MIPS_SYS(sys_rename
, 2)
2022 MIPS_SYS(sys_mkdir
, 2)
2023 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2024 MIPS_SYS(sys_dup
, 1)
2025 MIPS_SYS(sys_pipe
, 0)
2026 MIPS_SYS(sys_times
, 1)
2027 MIPS_SYS(sys_ni_syscall
, 0)
2028 MIPS_SYS(sys_brk
, 1) /* 4045 */
2029 MIPS_SYS(sys_setgid
, 1)
2030 MIPS_SYS(sys_getgid
, 0)
2031 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2032 MIPS_SYS(sys_geteuid
, 0)
2033 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2034 MIPS_SYS(sys_acct
, 0)
2035 MIPS_SYS(sys_umount2
, 2)
2036 MIPS_SYS(sys_ni_syscall
, 0)
2037 MIPS_SYS(sys_ioctl
, 3)
2038 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2039 MIPS_SYS(sys_ni_syscall
, 2)
2040 MIPS_SYS(sys_setpgid
, 2)
2041 MIPS_SYS(sys_ni_syscall
, 0)
2042 MIPS_SYS(sys_olduname
, 1)
2043 MIPS_SYS(sys_umask
, 1) /* 4060 */
2044 MIPS_SYS(sys_chroot
, 1)
2045 MIPS_SYS(sys_ustat
, 2)
2046 MIPS_SYS(sys_dup2
, 2)
2047 MIPS_SYS(sys_getppid
, 0)
2048 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2049 MIPS_SYS(sys_setsid
, 0)
2050 MIPS_SYS(sys_sigaction
, 3)
2051 MIPS_SYS(sys_sgetmask
, 0)
2052 MIPS_SYS(sys_ssetmask
, 1)
2053 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2054 MIPS_SYS(sys_setregid
, 2)
2055 MIPS_SYS(sys_sigsuspend
, 0)
2056 MIPS_SYS(sys_sigpending
, 1)
2057 MIPS_SYS(sys_sethostname
, 2)
2058 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2059 MIPS_SYS(sys_getrlimit
, 2)
2060 MIPS_SYS(sys_getrusage
, 2)
2061 MIPS_SYS(sys_gettimeofday
, 2)
2062 MIPS_SYS(sys_settimeofday
, 2)
2063 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2064 MIPS_SYS(sys_setgroups
, 2)
2065 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2066 MIPS_SYS(sys_symlink
, 2)
2067 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2068 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2069 MIPS_SYS(sys_uselib
, 1)
2070 MIPS_SYS(sys_swapon
, 2)
2071 MIPS_SYS(sys_reboot
, 3)
2072 MIPS_SYS(old_readdir
, 3)
2073 MIPS_SYS(old_mmap
, 6) /* 4090 */
2074 MIPS_SYS(sys_munmap
, 2)
2075 MIPS_SYS(sys_truncate
, 2)
2076 MIPS_SYS(sys_ftruncate
, 2)
2077 MIPS_SYS(sys_fchmod
, 2)
2078 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2079 MIPS_SYS(sys_getpriority
, 2)
2080 MIPS_SYS(sys_setpriority
, 3)
2081 MIPS_SYS(sys_ni_syscall
, 0)
2082 MIPS_SYS(sys_statfs
, 2)
2083 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2084 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2085 MIPS_SYS(sys_socketcall
, 2)
2086 MIPS_SYS(sys_syslog
, 3)
2087 MIPS_SYS(sys_setitimer
, 3)
2088 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2089 MIPS_SYS(sys_newstat
, 2)
2090 MIPS_SYS(sys_newlstat
, 2)
2091 MIPS_SYS(sys_newfstat
, 2)
2092 MIPS_SYS(sys_uname
, 1)
2093 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2094 MIPS_SYS(sys_vhangup
, 0)
2095 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2096 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2097 MIPS_SYS(sys_wait4
, 4)
2098 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2099 MIPS_SYS(sys_sysinfo
, 1)
2100 MIPS_SYS(sys_ipc
, 6)
2101 MIPS_SYS(sys_fsync
, 1)
2102 MIPS_SYS(sys_sigreturn
, 0)
2103 MIPS_SYS(sys_clone
, 6) /* 4120 */
2104 MIPS_SYS(sys_setdomainname
, 2)
2105 MIPS_SYS(sys_newuname
, 1)
2106 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2107 MIPS_SYS(sys_adjtimex
, 1)
2108 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2109 MIPS_SYS(sys_sigprocmask
, 3)
2110 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2111 MIPS_SYS(sys_init_module
, 5)
2112 MIPS_SYS(sys_delete_module
, 1)
2113 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2114 MIPS_SYS(sys_quotactl
, 0)
2115 MIPS_SYS(sys_getpgid
, 1)
2116 MIPS_SYS(sys_fchdir
, 1)
2117 MIPS_SYS(sys_bdflush
, 2)
2118 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2119 MIPS_SYS(sys_personality
, 1)
2120 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2121 MIPS_SYS(sys_setfsuid
, 1)
2122 MIPS_SYS(sys_setfsgid
, 1)
2123 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2124 MIPS_SYS(sys_getdents
, 3)
2125 MIPS_SYS(sys_select
, 5)
2126 MIPS_SYS(sys_flock
, 2)
2127 MIPS_SYS(sys_msync
, 3)
2128 MIPS_SYS(sys_readv
, 3) /* 4145 */
2129 MIPS_SYS(sys_writev
, 3)
2130 MIPS_SYS(sys_cacheflush
, 3)
2131 MIPS_SYS(sys_cachectl
, 3)
2132 MIPS_SYS(sys_sysmips
, 4)
2133 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2134 MIPS_SYS(sys_getsid
, 1)
2135 MIPS_SYS(sys_fdatasync
, 0)
2136 MIPS_SYS(sys_sysctl
, 1)
2137 MIPS_SYS(sys_mlock
, 2)
2138 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2139 MIPS_SYS(sys_mlockall
, 1)
2140 MIPS_SYS(sys_munlockall
, 0)
2141 MIPS_SYS(sys_sched_setparam
, 2)
2142 MIPS_SYS(sys_sched_getparam
, 2)
2143 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2144 MIPS_SYS(sys_sched_getscheduler
, 1)
2145 MIPS_SYS(sys_sched_yield
, 0)
2146 MIPS_SYS(sys_sched_get_priority_max
, 1)
2147 MIPS_SYS(sys_sched_get_priority_min
, 1)
2148 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2149 MIPS_SYS(sys_nanosleep
, 2)
2150 MIPS_SYS(sys_mremap
, 5)
2151 MIPS_SYS(sys_accept
, 3)
2152 MIPS_SYS(sys_bind
, 3)
2153 MIPS_SYS(sys_connect
, 3) /* 4170 */
2154 MIPS_SYS(sys_getpeername
, 3)
2155 MIPS_SYS(sys_getsockname
, 3)
2156 MIPS_SYS(sys_getsockopt
, 5)
2157 MIPS_SYS(sys_listen
, 2)
2158 MIPS_SYS(sys_recv
, 4) /* 4175 */
2159 MIPS_SYS(sys_recvfrom
, 6)
2160 MIPS_SYS(sys_recvmsg
, 3)
2161 MIPS_SYS(sys_send
, 4)
2162 MIPS_SYS(sys_sendmsg
, 3)
2163 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2164 MIPS_SYS(sys_setsockopt
, 5)
2165 MIPS_SYS(sys_shutdown
, 2)
2166 MIPS_SYS(sys_socket
, 3)
2167 MIPS_SYS(sys_socketpair
, 4)
2168 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2169 MIPS_SYS(sys_getresuid
, 3)
2170 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2171 MIPS_SYS(sys_poll
, 3)
2172 MIPS_SYS(sys_nfsservctl
, 3)
2173 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2174 MIPS_SYS(sys_getresgid
, 3)
2175 MIPS_SYS(sys_prctl
, 5)
2176 MIPS_SYS(sys_rt_sigreturn
, 0)
2177 MIPS_SYS(sys_rt_sigaction
, 4)
2178 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2179 MIPS_SYS(sys_rt_sigpending
, 2)
2180 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2181 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2182 MIPS_SYS(sys_rt_sigsuspend
, 0)
2183 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2184 MIPS_SYS(sys_pwrite64
, 6)
2185 MIPS_SYS(sys_chown
, 3)
2186 MIPS_SYS(sys_getcwd
, 2)
2187 MIPS_SYS(sys_capget
, 2)
2188 MIPS_SYS(sys_capset
, 2) /* 4205 */
2189 MIPS_SYS(sys_sigaltstack
, 2)
2190 MIPS_SYS(sys_sendfile
, 4)
2191 MIPS_SYS(sys_ni_syscall
, 0)
2192 MIPS_SYS(sys_ni_syscall
, 0)
2193 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2194 MIPS_SYS(sys_truncate64
, 4)
2195 MIPS_SYS(sys_ftruncate64
, 4)
2196 MIPS_SYS(sys_stat64
, 2)
2197 MIPS_SYS(sys_lstat64
, 2)
2198 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2199 MIPS_SYS(sys_pivot_root
, 2)
2200 MIPS_SYS(sys_mincore
, 3)
2201 MIPS_SYS(sys_madvise
, 3)
2202 MIPS_SYS(sys_getdents64
, 3)
2203 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2204 MIPS_SYS(sys_ni_syscall
, 0)
2205 MIPS_SYS(sys_gettid
, 0)
2206 MIPS_SYS(sys_readahead
, 5)
2207 MIPS_SYS(sys_setxattr
, 5)
2208 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2209 MIPS_SYS(sys_fsetxattr
, 5)
2210 MIPS_SYS(sys_getxattr
, 4)
2211 MIPS_SYS(sys_lgetxattr
, 4)
2212 MIPS_SYS(sys_fgetxattr
, 4)
2213 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2214 MIPS_SYS(sys_llistxattr
, 3)
2215 MIPS_SYS(sys_flistxattr
, 3)
2216 MIPS_SYS(sys_removexattr
, 2)
2217 MIPS_SYS(sys_lremovexattr
, 2)
2218 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2219 MIPS_SYS(sys_tkill
, 2)
2220 MIPS_SYS(sys_sendfile64
, 5)
2221 MIPS_SYS(sys_futex
, 6)
2222 MIPS_SYS(sys_sched_setaffinity
, 3)
2223 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2224 MIPS_SYS(sys_io_setup
, 2)
2225 MIPS_SYS(sys_io_destroy
, 1)
2226 MIPS_SYS(sys_io_getevents
, 5)
2227 MIPS_SYS(sys_io_submit
, 3)
2228 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2229 MIPS_SYS(sys_exit_group
, 1)
2230 MIPS_SYS(sys_lookup_dcookie
, 3)
2231 MIPS_SYS(sys_epoll_create
, 1)
2232 MIPS_SYS(sys_epoll_ctl
, 4)
2233 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2234 MIPS_SYS(sys_remap_file_pages
, 5)
2235 MIPS_SYS(sys_set_tid_address
, 1)
2236 MIPS_SYS(sys_restart_syscall
, 0)
2237 MIPS_SYS(sys_fadvise64_64
, 7)
2238 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2239 MIPS_SYS(sys_fstatfs64
, 2)
2240 MIPS_SYS(sys_timer_create
, 3)
2241 MIPS_SYS(sys_timer_settime
, 4)
2242 MIPS_SYS(sys_timer_gettime
, 2)
2243 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2244 MIPS_SYS(sys_timer_delete
, 1)
2245 MIPS_SYS(sys_clock_settime
, 2)
2246 MIPS_SYS(sys_clock_gettime
, 2)
2247 MIPS_SYS(sys_clock_getres
, 2)
2248 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2249 MIPS_SYS(sys_tgkill
, 3)
2250 MIPS_SYS(sys_utimes
, 2)
2251 MIPS_SYS(sys_mbind
, 4)
2252 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2253 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2254 MIPS_SYS(sys_mq_open
, 4)
2255 MIPS_SYS(sys_mq_unlink
, 1)
2256 MIPS_SYS(sys_mq_timedsend
, 5)
2257 MIPS_SYS(sys_mq_timedreceive
, 5)
2258 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2259 MIPS_SYS(sys_mq_getsetattr
, 3)
2260 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2261 MIPS_SYS(sys_waitid
, 4)
2262 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2263 MIPS_SYS(sys_add_key
, 5)
2264 MIPS_SYS(sys_request_key
, 4)
2265 MIPS_SYS(sys_keyctl
, 5)
2266 MIPS_SYS(sys_set_thread_area
, 1)
2267 MIPS_SYS(sys_inotify_init
, 0)
2268 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2269 MIPS_SYS(sys_inotify_rm_watch
, 2)
2270 MIPS_SYS(sys_migrate_pages
, 4)
2271 MIPS_SYS(sys_openat
, 4)
2272 MIPS_SYS(sys_mkdirat
, 3)
2273 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2274 MIPS_SYS(sys_fchownat
, 5)
2275 MIPS_SYS(sys_futimesat
, 3)
2276 MIPS_SYS(sys_fstatat64
, 4)
2277 MIPS_SYS(sys_unlinkat
, 3)
2278 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2279 MIPS_SYS(sys_linkat
, 5)
2280 MIPS_SYS(sys_symlinkat
, 3)
2281 MIPS_SYS(sys_readlinkat
, 4)
2282 MIPS_SYS(sys_fchmodat
, 3)
2283 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2284 MIPS_SYS(sys_pselect6
, 6)
2285 MIPS_SYS(sys_ppoll
, 5)
2286 MIPS_SYS(sys_unshare
, 1)
2287 MIPS_SYS(sys_splice
, 6)
2288 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2289 MIPS_SYS(sys_tee
, 4)
2290 MIPS_SYS(sys_vmsplice
, 4)
2291 MIPS_SYS(sys_move_pages
, 6)
2292 MIPS_SYS(sys_set_robust_list
, 2)
2293 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2294 MIPS_SYS(sys_kexec_load
, 4)
2295 MIPS_SYS(sys_getcpu
, 3)
2296 MIPS_SYS(sys_epoll_pwait
, 6)
2297 MIPS_SYS(sys_ioprio_set
, 3)
2298 MIPS_SYS(sys_ioprio_get
, 2)
2299 MIPS_SYS(sys_utimensat
, 4)
2300 MIPS_SYS(sys_signalfd
, 3)
2301 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2302 MIPS_SYS(sys_eventfd
, 1)
2303 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2304 MIPS_SYS(sys_timerfd_create
, 2)
2305 MIPS_SYS(sys_timerfd_gettime
, 2)
2306 MIPS_SYS(sys_timerfd_settime
, 4)
2307 MIPS_SYS(sys_signalfd4
, 4)
2308 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2309 MIPS_SYS(sys_epoll_create1
, 1)
2310 MIPS_SYS(sys_dup3
, 3)
2311 MIPS_SYS(sys_pipe2
, 2)
2312 MIPS_SYS(sys_inotify_init1
, 1)
2313 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2314 MIPS_SYS(sys_pwritev
, 6)
2315 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2316 MIPS_SYS(sys_perf_event_open
, 5)
2317 MIPS_SYS(sys_accept4
, 4)
2318 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2319 MIPS_SYS(sys_fanotify_init
, 2)
2320 MIPS_SYS(sys_fanotify_mark
, 6)
2321 MIPS_SYS(sys_prlimit64
, 4)
2322 MIPS_SYS(sys_name_to_handle_at
, 5)
2323 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2324 MIPS_SYS(sys_clock_adjtime
, 2)
2325 MIPS_SYS(sys_syncfs
, 1)
2330 static int do_store_exclusive(CPUMIPSState
*env
)
2333 target_ulong page_addr
;
2341 page_addr
= addr
& TARGET_PAGE_MASK
;
2344 flags
= page_get_flags(page_addr
);
2345 if ((flags
& PAGE_READ
) == 0) {
2348 reg
= env
->llreg
& 0x1f;
2349 d
= (env
->llreg
& 0x20) != 0;
2351 segv
= get_user_s64(val
, addr
);
2353 segv
= get_user_s32(val
, addr
);
2356 if (val
!= env
->llval
) {
2357 env
->active_tc
.gpr
[reg
] = 0;
2360 segv
= put_user_u64(env
->llnewval
, addr
);
2362 segv
= put_user_u32(env
->llnewval
, addr
);
2365 env
->active_tc
.gpr
[reg
] = 1;
2372 env
->active_tc
.PC
+= 4;
2385 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2393 info
->si_signo
= TARGET_SIGFPE
;
2395 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2396 queue_signal(env
, info
->si_signo
, &*info
);
2400 info
->si_signo
= TARGET_SIGTRAP
;
2402 queue_signal(env
, info
->si_signo
, &*info
);
2410 void cpu_loop(CPUMIPSState
*env
)
2412 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2413 target_siginfo_t info
;
2416 # ifdef TARGET_ABI_MIPSO32
2417 unsigned int syscall_num
;
2422 trapnr
= cpu_mips_exec(env
);
2426 env
->active_tc
.PC
+= 4;
2427 # ifdef TARGET_ABI_MIPSO32
2428 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2429 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2430 ret
= -TARGET_ENOSYS
;
2434 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2436 nb_args
= mips_syscall_args
[syscall_num
];
2437 sp_reg
= env
->active_tc
.gpr
[29];
2439 /* these arguments are taken from the stack */
2441 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2445 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2449 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2453 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2459 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2460 env
->active_tc
.gpr
[4],
2461 env
->active_tc
.gpr
[5],
2462 env
->active_tc
.gpr
[6],
2463 env
->active_tc
.gpr
[7],
2464 arg5
, arg6
, arg7
, arg8
);
2468 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2469 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2470 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2471 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2472 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2474 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2475 /* Returning from a successful sigreturn syscall.
2476 Avoid clobbering register state. */
2479 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2480 env
->active_tc
.gpr
[7] = 1; /* error flag */
2483 env
->active_tc
.gpr
[7] = 0; /* error flag */
2485 env
->active_tc
.gpr
[2] = ret
;
2491 info
.si_signo
= TARGET_SIGSEGV
;
2493 /* XXX: check env->error_code */
2494 info
.si_code
= TARGET_SEGV_MAPERR
;
2495 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2496 queue_signal(env
, info
.si_signo
, &info
);
2500 info
.si_signo
= TARGET_SIGILL
;
2503 queue_signal(env
, info
.si_signo
, &info
);
2505 case EXCP_INTERRUPT
:
2506 /* just indicate that signals should be handled asap */
2512 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2515 info
.si_signo
= sig
;
2517 info
.si_code
= TARGET_TRAP_BRKPT
;
2518 queue_signal(env
, info
.si_signo
, &info
);
2523 if (do_store_exclusive(env
)) {
2524 info
.si_signo
= TARGET_SIGSEGV
;
2526 info
.si_code
= TARGET_SEGV_MAPERR
;
2527 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2528 queue_signal(env
, info
.si_signo
, &info
);
2532 info
.si_signo
= TARGET_SIGILL
;
2534 info
.si_code
= TARGET_ILL_ILLOPC
;
2535 queue_signal(env
, info
.si_signo
, &info
);
2537 /* The code below was inspired by the MIPS Linux kernel trap
2538 * handling code in arch/mips/kernel/traps.c.
2542 abi_ulong trap_instr
;
2545 if (env
->hflags
& MIPS_HFLAG_M16
) {
2546 if (env
->insn_flags
& ASE_MICROMIPS
) {
2547 /* microMIPS mode */
2548 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2553 if ((trap_instr
>> 10) == 0x11) {
2554 /* 16-bit instruction */
2555 code
= trap_instr
& 0xf;
2557 /* 32-bit instruction */
2560 ret
= get_user_u16(instr_lo
,
2561 env
->active_tc
.PC
+ 2);
2565 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2566 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2567 /* Unfortunately, microMIPS also suffers from
2568 the old assembler bug... */
2569 if (code
>= (1 << 10)) {
2575 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2579 code
= (trap_instr
>> 6) & 0x3f;
2582 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2587 /* As described in the original Linux kernel code, the
2588 * below checks on 'code' are to work around an old
2591 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2592 if (code
>= (1 << 10)) {
2597 if (do_break(env
, &info
, code
) != 0) {
2604 abi_ulong trap_instr
;
2605 unsigned int code
= 0;
2607 if (env
->hflags
& MIPS_HFLAG_M16
) {
2608 /* microMIPS mode */
2611 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2612 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2614 trap_instr
= (instr
[0] << 16) | instr
[1];
2616 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2623 /* The immediate versions don't provide a code. */
2624 if (!(trap_instr
& 0xFC000000)) {
2625 if (env
->hflags
& MIPS_HFLAG_M16
) {
2626 /* microMIPS mode */
2627 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2629 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2633 if (do_break(env
, &info
, code
) != 0) {
2640 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2642 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2645 process_pending_signals(env
);
2650 #ifdef TARGET_OPENRISC
2652 void cpu_loop(CPUOpenRISCState
*env
)
2654 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2658 trapnr
= cpu_exec(env
);
2663 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2667 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2672 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2673 gdbsig
= TARGET_SIGSEGV
;
2676 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2679 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2683 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2687 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2691 qemu_log("\nTLB miss\n");
2694 qemu_log("\nRange\n");
2698 env
->pc
+= 4; /* 0xc00; */
2699 env
->gpr
[11] = do_syscall(env
,
2700 env
->gpr
[11], /* return value */
2701 env
->gpr
[3], /* r3 - r7 are params */
2709 qemu_log("\nFloating point error\n");
2712 qemu_log("\nTrap\n");
2719 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2721 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2722 gdbsig
= TARGET_SIGILL
;
2726 gdb_handlesig(cs
, gdbsig
);
2727 if (gdbsig
!= TARGET_SIGTRAP
) {
2732 process_pending_signals(env
);
2736 #endif /* TARGET_OPENRISC */
2739 void cpu_loop(CPUSH4State
*env
)
2741 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2743 target_siginfo_t info
;
2746 trapnr
= cpu_sh4_exec (env
);
2751 ret
= do_syscall(env
,
2760 env
->gregs
[0] = ret
;
2762 case EXCP_INTERRUPT
:
2763 /* just indicate that signals should be handled asap */
2769 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2772 info
.si_signo
= sig
;
2774 info
.si_code
= TARGET_TRAP_BRKPT
;
2775 queue_signal(env
, info
.si_signo
, &info
);
2781 info
.si_signo
= SIGSEGV
;
2783 info
.si_code
= TARGET_SEGV_MAPERR
;
2784 info
._sifields
._sigfault
._addr
= env
->tea
;
2785 queue_signal(env
, info
.si_signo
, &info
);
2789 printf ("Unhandled trap: 0x%x\n", trapnr
);
2790 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2793 process_pending_signals (env
);
2799 void cpu_loop(CPUCRISState
*env
)
2801 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2803 target_siginfo_t info
;
2806 trapnr
= cpu_cris_exec (env
);
2810 info
.si_signo
= SIGSEGV
;
2812 /* XXX: check env->error_code */
2813 info
.si_code
= TARGET_SEGV_MAPERR
;
2814 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2815 queue_signal(env
, info
.si_signo
, &info
);
2818 case EXCP_INTERRUPT
:
2819 /* just indicate that signals should be handled asap */
2822 ret
= do_syscall(env
,
2831 env
->regs
[10] = ret
;
2837 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2840 info
.si_signo
= sig
;
2842 info
.si_code
= TARGET_TRAP_BRKPT
;
2843 queue_signal(env
, info
.si_signo
, &info
);
2848 printf ("Unhandled trap: 0x%x\n", trapnr
);
2849 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2852 process_pending_signals (env
);
2857 #ifdef TARGET_MICROBLAZE
2858 void cpu_loop(CPUMBState
*env
)
2860 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2862 target_siginfo_t info
;
2865 trapnr
= cpu_mb_exec (env
);
2869 info
.si_signo
= SIGSEGV
;
2871 /* XXX: check env->error_code */
2872 info
.si_code
= TARGET_SEGV_MAPERR
;
2873 info
._sifields
._sigfault
._addr
= 0;
2874 queue_signal(env
, info
.si_signo
, &info
);
2877 case EXCP_INTERRUPT
:
2878 /* just indicate that signals should be handled asap */
2881 /* Return address is 4 bytes after the call. */
2883 env
->sregs
[SR_PC
] = env
->regs
[14];
2884 ret
= do_syscall(env
,
2896 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2897 if (env
->iflags
& D_FLAG
) {
2898 env
->sregs
[SR_ESR
] |= 1 << 12;
2899 env
->sregs
[SR_PC
] -= 4;
2900 /* FIXME: if branch was immed, replay the imm as well. */
2903 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2905 switch (env
->sregs
[SR_ESR
] & 31) {
2906 case ESR_EC_DIVZERO
:
2907 info
.si_signo
= SIGFPE
;
2909 info
.si_code
= TARGET_FPE_FLTDIV
;
2910 info
._sifields
._sigfault
._addr
= 0;
2911 queue_signal(env
, info
.si_signo
, &info
);
2914 info
.si_signo
= SIGFPE
;
2916 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2917 info
.si_code
= TARGET_FPE_FLTINV
;
2919 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2920 info
.si_code
= TARGET_FPE_FLTDIV
;
2922 info
._sifields
._sigfault
._addr
= 0;
2923 queue_signal(env
, info
.si_signo
, &info
);
2926 printf ("Unhandled hw-exception: 0x%x\n",
2927 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2928 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2937 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2940 info
.si_signo
= sig
;
2942 info
.si_code
= TARGET_TRAP_BRKPT
;
2943 queue_signal(env
, info
.si_signo
, &info
);
2948 printf ("Unhandled trap: 0x%x\n", trapnr
);
2949 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2952 process_pending_signals (env
);
2959 void cpu_loop(CPUM68KState
*env
)
2961 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2964 target_siginfo_t info
;
2965 TaskState
*ts
= cs
->opaque
;
2968 trapnr
= cpu_m68k_exec(env
);
2972 if (ts
->sim_syscalls
) {
2974 nr
= lduw(env
->pc
+ 2);
2976 do_m68k_simcall(env
, nr
);
2982 case EXCP_HALT_INSN
:
2983 /* Semihosing syscall. */
2985 do_m68k_semihosting(env
, env
->dregs
[0]);
2989 case EXCP_UNSUPPORTED
:
2991 info
.si_signo
= SIGILL
;
2993 info
.si_code
= TARGET_ILL_ILLOPN
;
2994 info
._sifields
._sigfault
._addr
= env
->pc
;
2995 queue_signal(env
, info
.si_signo
, &info
);
2999 ts
->sim_syscalls
= 0;
3002 env
->dregs
[0] = do_syscall(env
,
3013 case EXCP_INTERRUPT
:
3014 /* just indicate that signals should be handled asap */
3018 info
.si_signo
= SIGSEGV
;
3020 /* XXX: check env->error_code */
3021 info
.si_code
= TARGET_SEGV_MAPERR
;
3022 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3023 queue_signal(env
, info
.si_signo
, &info
);
3030 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3033 info
.si_signo
= sig
;
3035 info
.si_code
= TARGET_TRAP_BRKPT
;
3036 queue_signal(env
, info
.si_signo
, &info
);
3041 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3043 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3046 process_pending_signals(env
);
3049 #endif /* TARGET_M68K */
3052 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3054 target_ulong addr
, val
, tmp
;
3055 target_siginfo_t info
;
3058 addr
= env
->lock_addr
;
3059 tmp
= env
->lock_st_addr
;
3060 env
->lock_addr
= -1;
3061 env
->lock_st_addr
= 0;
3067 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3071 if (val
== env
->lock_value
) {
3073 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3090 info
.si_signo
= TARGET_SIGSEGV
;
3092 info
.si_code
= TARGET_SEGV_MAPERR
;
3093 info
._sifields
._sigfault
._addr
= addr
;
3094 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3097 void cpu_loop(CPUAlphaState
*env
)
3099 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3101 target_siginfo_t info
;
3105 trapnr
= cpu_alpha_exec (env
);
3107 /* All of the traps imply a transition through PALcode, which
3108 implies an REI instruction has been executed. Which means
3109 that the intr_flag should be cleared. */
3114 fprintf(stderr
, "Reset requested. Exit\n");
3118 fprintf(stderr
, "Machine check exception. Exit\n");
3121 case EXCP_SMP_INTERRUPT
:
3122 case EXCP_CLK_INTERRUPT
:
3123 case EXCP_DEV_INTERRUPT
:
3124 fprintf(stderr
, "External interrupt. Exit\n");
3128 env
->lock_addr
= -1;
3129 info
.si_signo
= TARGET_SIGSEGV
;
3131 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3132 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3133 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3134 queue_signal(env
, info
.si_signo
, &info
);
3137 env
->lock_addr
= -1;
3138 info
.si_signo
= TARGET_SIGBUS
;
3140 info
.si_code
= TARGET_BUS_ADRALN
;
3141 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3142 queue_signal(env
, info
.si_signo
, &info
);
3146 env
->lock_addr
= -1;
3147 info
.si_signo
= TARGET_SIGILL
;
3149 info
.si_code
= TARGET_ILL_ILLOPC
;
3150 info
._sifields
._sigfault
._addr
= env
->pc
;
3151 queue_signal(env
, info
.si_signo
, &info
);
3154 env
->lock_addr
= -1;
3155 info
.si_signo
= TARGET_SIGFPE
;
3157 info
.si_code
= TARGET_FPE_FLTINV
;
3158 info
._sifields
._sigfault
._addr
= env
->pc
;
3159 queue_signal(env
, info
.si_signo
, &info
);
3162 /* No-op. Linux simply re-enables the FPU. */
3165 env
->lock_addr
= -1;
3166 switch (env
->error_code
) {
3169 info
.si_signo
= TARGET_SIGTRAP
;
3171 info
.si_code
= TARGET_TRAP_BRKPT
;
3172 info
._sifields
._sigfault
._addr
= env
->pc
;
3173 queue_signal(env
, info
.si_signo
, &info
);
3177 info
.si_signo
= TARGET_SIGTRAP
;
3180 info
._sifields
._sigfault
._addr
= env
->pc
;
3181 queue_signal(env
, info
.si_signo
, &info
);
3185 trapnr
= env
->ir
[IR_V0
];
3186 sysret
= do_syscall(env
, trapnr
,
3187 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3188 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3189 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3191 if (trapnr
== TARGET_NR_sigreturn
3192 || trapnr
== TARGET_NR_rt_sigreturn
) {
3195 /* Syscall writes 0 to V0 to bypass error check, similar
3196 to how this is handled internal to Linux kernel.
3197 (Ab)use trapnr temporarily as boolean indicating error. */
3198 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3199 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3200 env
->ir
[IR_A3
] = trapnr
;
3204 /* ??? We can probably elide the code using page_unprotect
3205 that is checking for self-modifying code. Instead we
3206 could simply call tb_flush here. Until we work out the
3207 changes required to turn off the extra write protection,
3208 this can be a no-op. */
3212 /* Handled in the translator for usermode. */
3216 /* Handled in the translator for usermode. */
3220 info
.si_signo
= TARGET_SIGFPE
;
3221 switch (env
->ir
[IR_A0
]) {
3222 case TARGET_GEN_INTOVF
:
3223 info
.si_code
= TARGET_FPE_INTOVF
;
3225 case TARGET_GEN_INTDIV
:
3226 info
.si_code
= TARGET_FPE_INTDIV
;
3228 case TARGET_GEN_FLTOVF
:
3229 info
.si_code
= TARGET_FPE_FLTOVF
;
3231 case TARGET_GEN_FLTUND
:
3232 info
.si_code
= TARGET_FPE_FLTUND
;
3234 case TARGET_GEN_FLTINV
:
3235 info
.si_code
= TARGET_FPE_FLTINV
;
3237 case TARGET_GEN_FLTINE
:
3238 info
.si_code
= TARGET_FPE_FLTRES
;
3240 case TARGET_GEN_ROPRAND
:
3244 info
.si_signo
= TARGET_SIGTRAP
;
3249 info
._sifields
._sigfault
._addr
= env
->pc
;
3250 queue_signal(env
, info
.si_signo
, &info
);
3257 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3258 if (info
.si_signo
) {
3259 env
->lock_addr
= -1;
3261 info
.si_code
= TARGET_TRAP_BRKPT
;
3262 queue_signal(env
, info
.si_signo
, &info
);
3267 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3269 case EXCP_INTERRUPT
:
3270 /* Just indicate that signals should be handled asap. */
3273 printf ("Unhandled trap: 0x%x\n", trapnr
);
3274 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3277 process_pending_signals (env
);
3280 #endif /* TARGET_ALPHA */
3283 void cpu_loop(CPUS390XState
*env
)
3285 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3287 target_siginfo_t info
;
3291 trapnr
= cpu_s390x_exec(env
);
3293 case EXCP_INTERRUPT
:
3294 /* Just indicate that signals should be handled asap. */
3298 n
= env
->int_svc_code
;
3300 /* syscalls > 255 */
3303 env
->psw
.addr
+= env
->int_svc_ilen
;
3304 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3305 env
->regs
[4], env
->regs
[5],
3306 env
->regs
[6], env
->regs
[7], 0, 0);
3310 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3312 n
= TARGET_TRAP_BRKPT
;
3317 n
= env
->int_pgm_code
;
3320 case PGM_PRIVILEGED
:
3322 n
= TARGET_ILL_ILLOPC
;
3324 case PGM_PROTECTION
:
3325 case PGM_ADDRESSING
:
3327 /* XXX: check env->error_code */
3328 n
= TARGET_SEGV_MAPERR
;
3329 addr
= env
->__excp_addr
;
3332 case PGM_SPECIFICATION
:
3333 case PGM_SPECIAL_OP
:
3337 n
= TARGET_ILL_ILLOPN
;
3340 case PGM_FIXPT_OVERFLOW
:
3342 n
= TARGET_FPE_INTOVF
;
3344 case PGM_FIXPT_DIVIDE
:
3346 n
= TARGET_FPE_INTDIV
;
3350 n
= (env
->fpc
>> 8) & 0xff;
3352 /* compare-and-trap */
3355 /* An IEEE exception, simulated or otherwise. */
3357 n
= TARGET_FPE_FLTINV
;
3358 } else if (n
& 0x40) {
3359 n
= TARGET_FPE_FLTDIV
;
3360 } else if (n
& 0x20) {
3361 n
= TARGET_FPE_FLTOVF
;
3362 } else if (n
& 0x10) {
3363 n
= TARGET_FPE_FLTUND
;
3364 } else if (n
& 0x08) {
3365 n
= TARGET_FPE_FLTRES
;
3367 /* ??? Quantum exception; BFP, DFP error. */
3375 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3376 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3382 addr
= env
->psw
.addr
;
3384 info
.si_signo
= sig
;
3387 info
._sifields
._sigfault
._addr
= addr
;
3388 queue_signal(env
, info
.si_signo
, &info
);
3392 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3393 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3396 process_pending_signals (env
);
3400 #endif /* TARGET_S390X */
3402 THREAD CPUState
*thread_cpu
;
3404 void task_settid(TaskState
*ts
)
3406 if (ts
->ts_tid
== 0) {
3407 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3411 void stop_all_tasks(void)
3414 * We trust that when using NPTL, start_exclusive()
3415 * handles thread stopping correctly.
3420 /* Assumes contents are already zeroed. */
3421 void init_task_state(TaskState
*ts
)
3426 ts
->first_free
= ts
->sigqueue_table
;
3427 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3428 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3430 ts
->sigqueue_table
[i
].next
= NULL
;
3433 CPUArchState
*cpu_copy(CPUArchState
*env
)
3435 CPUState
*cpu
= ENV_GET_CPU(env
);
3436 CPUArchState
*new_env
= cpu_init(cpu_model
);
3437 CPUState
*new_cpu
= ENV_GET_CPU(new_env
);
3438 #if defined(TARGET_HAS_ICE)
3443 /* Reset non arch specific state */
3446 memcpy(new_env
, env
, sizeof(CPUArchState
));
3448 /* Clone all break/watchpoints.
3449 Note: Once we support ptrace with hw-debug register access, make sure
3450 BP_CPU break/watchpoints are handled correctly on clone. */
3451 QTAILQ_INIT(&cpu
->breakpoints
);
3452 QTAILQ_INIT(&cpu
->watchpoints
);
3453 #if defined(TARGET_HAS_ICE)
3454 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3455 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3457 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3458 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, (~wp
->len_mask
) + 1,
3466 static void handle_arg_help(const char *arg
)
3471 static void handle_arg_log(const char *arg
)
3475 mask
= qemu_str_to_log_mask(arg
);
3477 qemu_print_log_usage(stdout
);
3483 static void handle_arg_log_filename(const char *arg
)
3485 qemu_set_log_filename(arg
);
3488 static void handle_arg_set_env(const char *arg
)
3490 char *r
, *p
, *token
;
3491 r
= p
= strdup(arg
);
3492 while ((token
= strsep(&p
, ",")) != NULL
) {
3493 if (envlist_setenv(envlist
, token
) != 0) {
3500 static void handle_arg_unset_env(const char *arg
)
3502 char *r
, *p
, *token
;
3503 r
= p
= strdup(arg
);
3504 while ((token
= strsep(&p
, ",")) != NULL
) {
3505 if (envlist_unsetenv(envlist
, token
) != 0) {
3512 static void handle_arg_argv0(const char *arg
)
3514 argv0
= strdup(arg
);
3517 static void handle_arg_stack_size(const char *arg
)
3520 guest_stack_size
= strtoul(arg
, &p
, 0);
3521 if (guest_stack_size
== 0) {
3526 guest_stack_size
*= 1024 * 1024;
3527 } else if (*p
== 'k' || *p
== 'K') {
3528 guest_stack_size
*= 1024;
3532 static void handle_arg_ld_prefix(const char *arg
)
3534 interp_prefix
= strdup(arg
);
3537 static void handle_arg_pagesize(const char *arg
)
3539 qemu_host_page_size
= atoi(arg
);
3540 if (qemu_host_page_size
== 0 ||
3541 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3542 fprintf(stderr
, "page size must be a power of two\n");
3547 static void handle_arg_gdb(const char *arg
)
3549 gdbstub_port
= atoi(arg
);
3552 static void handle_arg_uname(const char *arg
)
3554 qemu_uname_release
= strdup(arg
);
3557 static void handle_arg_cpu(const char *arg
)
3559 cpu_model
= strdup(arg
);
3560 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3561 /* XXX: implement xxx_cpu_list for targets that still miss it */
3562 #if defined(cpu_list)
3563 cpu_list(stdout
, &fprintf
);
3569 #if defined(CONFIG_USE_GUEST_BASE)
3570 static void handle_arg_guest_base(const char *arg
)
3572 guest_base
= strtol(arg
, NULL
, 0);
3573 have_guest_base
= 1;
3576 static void handle_arg_reserved_va(const char *arg
)
3580 reserved_va
= strtoul(arg
, &p
, 0);
3594 unsigned long unshifted
= reserved_va
;
3596 reserved_va
<<= shift
;
3597 if (((reserved_va
>> shift
) != unshifted
)
3598 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3599 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3602 fprintf(stderr
, "Reserved virtual address too big\n");
3607 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3613 static void handle_arg_singlestep(const char *arg
)
3618 static void handle_arg_strace(const char *arg
)
3623 static void handle_arg_version(const char *arg
)
3625 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3626 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3630 struct qemu_argument
{
3634 void (*handle_opt
)(const char *arg
);
3635 const char *example
;
3639 static const struct qemu_argument arg_table
[] = {
3640 {"h", "", false, handle_arg_help
,
3641 "", "print this help"},
3642 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3643 "port", "wait gdb connection to 'port'"},
3644 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3645 "path", "set the elf interpreter prefix to 'path'"},
3646 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3647 "size", "set the stack size to 'size' bytes"},
3648 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3649 "model", "select CPU (-cpu help for list)"},
3650 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3651 "var=value", "sets targets environment variable (see below)"},
3652 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3653 "var", "unsets targets environment variable (see below)"},
3654 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3655 "argv0", "forces target process argv[0] to be 'argv0'"},
3656 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3657 "uname", "set qemu uname release string to 'uname'"},
3658 #if defined(CONFIG_USE_GUEST_BASE)
3659 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3660 "address", "set guest_base address to 'address'"},
3661 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3662 "size", "reserve 'size' bytes for guest virtual address space"},
3664 {"d", "QEMU_LOG", true, handle_arg_log
,
3665 "item[,...]", "enable logging of specified items "
3666 "(use '-d help' for a list of items)"},
3667 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3668 "logfile", "write logs to 'logfile' (default stderr)"},
3669 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3670 "pagesize", "set the host page size to 'pagesize'"},
3671 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3672 "", "run in singlestep mode"},
3673 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3674 "", "log system calls"},
3675 {"version", "QEMU_VERSION", false, handle_arg_version
,
3676 "", "display version information and exit"},
3677 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3680 static void usage(void)
3682 const struct qemu_argument
*arginfo
;
3686 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3687 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3689 "Options and associated environment variables:\n"
3692 /* Calculate column widths. We must always have at least enough space
3693 * for the column header.
3695 maxarglen
= strlen("Argument");
3696 maxenvlen
= strlen("Env-variable");
3698 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3699 int arglen
= strlen(arginfo
->argv
);
3700 if (arginfo
->has_arg
) {
3701 arglen
+= strlen(arginfo
->example
) + 1;
3703 if (strlen(arginfo
->env
) > maxenvlen
) {
3704 maxenvlen
= strlen(arginfo
->env
);
3706 if (arglen
> maxarglen
) {
3711 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3712 maxenvlen
, "Env-variable");
3714 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3715 if (arginfo
->has_arg
) {
3716 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3717 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3718 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3720 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3721 maxenvlen
, arginfo
->env
,
3728 "QEMU_LD_PREFIX = %s\n"
3729 "QEMU_STACK_SIZE = %ld byte\n",
3734 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3735 "QEMU_UNSET_ENV environment variables to set and unset\n"
3736 "environment variables for the target process.\n"
3737 "It is possible to provide several variables by separating them\n"
3738 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3739 "provide the -E and -U options multiple times.\n"
3740 "The following lines are equivalent:\n"
3741 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3742 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3743 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3744 "Note that if you provide several changes to a single variable\n"
3745 "the last change will stay in effect.\n");
3750 static int parse_args(int argc
, char **argv
)
3754 const struct qemu_argument
*arginfo
;
3756 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3757 if (arginfo
->env
== NULL
) {
3761 r
= getenv(arginfo
->env
);
3763 arginfo
->handle_opt(r
);
3769 if (optind
>= argc
) {
3778 if (!strcmp(r
, "-")) {
3782 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3783 if (!strcmp(r
, arginfo
->argv
)) {
3784 if (arginfo
->has_arg
) {
3785 if (optind
>= argc
) {
3788 arginfo
->handle_opt(argv
[optind
]);
3791 arginfo
->handle_opt(NULL
);
3797 /* no option matched the current argv */
3798 if (arginfo
->handle_opt
== NULL
) {
3803 if (optind
>= argc
) {
3807 filename
= argv
[optind
];
3808 exec_path
= argv
[optind
];
3813 int main(int argc
, char **argv
, char **envp
)
3815 struct target_pt_regs regs1
, *regs
= ®s1
;
3816 struct image_info info1
, *info
= &info1
;
3817 struct linux_binprm bprm
;
3822 char **target_environ
, **wrk
;
3829 module_call_init(MODULE_INIT_QOM
);
3831 if ((envlist
= envlist_create()) == NULL
) {
3832 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3836 /* add current environment into the list */
3837 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3838 (void) envlist_setenv(envlist
, *wrk
);
3841 /* Read the stack limit from the kernel. If it's "unlimited",
3842 then we can do little else besides use the default. */
3845 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3846 && lim
.rlim_cur
!= RLIM_INFINITY
3847 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3848 guest_stack_size
= lim
.rlim_cur
;
3853 #if defined(cpudef_setup)
3854 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3857 optind
= parse_args(argc
, argv
);
3860 memset(regs
, 0, sizeof(struct target_pt_regs
));
3862 /* Zero out image_info */
3863 memset(info
, 0, sizeof(struct image_info
));
3865 memset(&bprm
, 0, sizeof (bprm
));
3867 /* Scan interp_prefix dir for replacement files. */
3868 init_paths(interp_prefix
);
3870 init_qemu_uname_release();
3872 if (cpu_model
== NULL
) {
3873 #if defined(TARGET_I386)
3874 #ifdef TARGET_X86_64
3875 cpu_model
= "qemu64";
3877 cpu_model
= "qemu32";
3879 #elif defined(TARGET_ARM)
3881 #elif defined(TARGET_UNICORE32)
3883 #elif defined(TARGET_M68K)
3885 #elif defined(TARGET_SPARC)
3886 #ifdef TARGET_SPARC64
3887 cpu_model
= "TI UltraSparc II";
3889 cpu_model
= "Fujitsu MB86904";
3891 #elif defined(TARGET_MIPS)
3892 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3897 #elif defined TARGET_OPENRISC
3898 cpu_model
= "or1200";
3899 #elif defined(TARGET_PPC)
3901 cpu_model
= "970fx";
3910 cpu_exec_init_all();
3911 /* NOTE: we need to init the CPU at this stage to get
3912 qemu_host_page_size */
3913 env
= cpu_init(cpu_model
);
3915 fprintf(stderr
, "Unable to find CPU definition\n");
3918 cpu
= ENV_GET_CPU(env
);
3923 if (getenv("QEMU_STRACE")) {
3927 target_environ
= envlist_to_environ(envlist
, NULL
);
3928 envlist_free(envlist
);
3930 #if defined(CONFIG_USE_GUEST_BASE)
3932 * Now that page sizes are configured in cpu_init() we can do
3933 * proper page alignment for guest_base.
3935 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3937 if (reserved_va
|| have_guest_base
) {
3938 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3940 if (guest_base
== (unsigned long)-1) {
3941 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3942 "space for use as guest address space (check your virtual "
3943 "memory ulimit setting or reserve less using -R option)\n",
3949 mmap_next_start
= reserved_va
;
3952 #endif /* CONFIG_USE_GUEST_BASE */
3955 * Read in mmap_min_addr kernel parameter. This value is used
3956 * When loading the ELF image to determine whether guest_base
3957 * is needed. It is also used in mmap_find_vma.
3962 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3964 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3965 mmap_min_addr
= tmp
;
3966 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
3973 * Prepare copy of argv vector for target.
3975 target_argc
= argc
- optind
;
3976 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
3977 if (target_argv
== NULL
) {
3978 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
3983 * If argv0 is specified (using '-0' switch) we replace
3984 * argv[0] pointer with the given one.
3987 if (argv0
!= NULL
) {
3988 target_argv
[i
++] = strdup(argv0
);
3990 for (; i
< target_argc
; i
++) {
3991 target_argv
[i
] = strdup(argv
[optind
+ i
]);
3993 target_argv
[target_argc
] = NULL
;
3995 ts
= g_malloc0 (sizeof(TaskState
));
3996 init_task_state(ts
);
3997 /* build Task State */
4003 execfd
= qemu_getauxval(AT_EXECFD
);
4005 execfd
= open(filename
, O_RDONLY
);
4007 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4012 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4015 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4019 for (wrk
= target_environ
; *wrk
; wrk
++) {
4023 free(target_environ
);
4025 if (qemu_log_enabled()) {
4026 #if defined(CONFIG_USE_GUEST_BASE)
4027 qemu_log("guest_base 0x%lx\n", guest_base
);
4031 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4032 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4033 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4035 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4037 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4038 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4040 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4041 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4044 target_set_brk(info
->brk
);
4048 #if defined(CONFIG_USE_GUEST_BASE)
4049 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4050 generating the prologue until now so that the prologue can take
4051 the real value of GUEST_BASE into account. */
4052 tcg_prologue_init(&tcg_ctx
);
4055 #if defined(TARGET_I386)
4056 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4057 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4058 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4059 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4060 env
->hflags
|= HF_OSFXSR_MASK
;
4062 #ifndef TARGET_ABI32
4063 /* enable 64 bit mode if possible */
4064 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4065 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4068 env
->cr
[4] |= CR4_PAE_MASK
;
4069 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4070 env
->hflags
|= HF_LMA_MASK
;
4073 /* flags setup : we activate the IRQs by default as in user mode */
4074 env
->eflags
|= IF_MASK
;
4076 /* linux register setup */
4077 #ifndef TARGET_ABI32
4078 env
->regs
[R_EAX
] = regs
->rax
;
4079 env
->regs
[R_EBX
] = regs
->rbx
;
4080 env
->regs
[R_ECX
] = regs
->rcx
;
4081 env
->regs
[R_EDX
] = regs
->rdx
;
4082 env
->regs
[R_ESI
] = regs
->rsi
;
4083 env
->regs
[R_EDI
] = regs
->rdi
;
4084 env
->regs
[R_EBP
] = regs
->rbp
;
4085 env
->regs
[R_ESP
] = regs
->rsp
;
4086 env
->eip
= regs
->rip
;
4088 env
->regs
[R_EAX
] = regs
->eax
;
4089 env
->regs
[R_EBX
] = regs
->ebx
;
4090 env
->regs
[R_ECX
] = regs
->ecx
;
4091 env
->regs
[R_EDX
] = regs
->edx
;
4092 env
->regs
[R_ESI
] = regs
->esi
;
4093 env
->regs
[R_EDI
] = regs
->edi
;
4094 env
->regs
[R_EBP
] = regs
->ebp
;
4095 env
->regs
[R_ESP
] = regs
->esp
;
4096 env
->eip
= regs
->eip
;
4099 /* linux interrupt setup */
4100 #ifndef TARGET_ABI32
4101 env
->idt
.limit
= 511;
4103 env
->idt
.limit
= 255;
4105 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4106 PROT_READ
|PROT_WRITE
,
4107 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4108 idt_table
= g2h(env
->idt
.base
);
4131 /* linux segment setup */
4133 uint64_t *gdt_table
;
4134 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4135 PROT_READ
|PROT_WRITE
,
4136 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4137 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4138 gdt_table
= g2h(env
->gdt
.base
);
4140 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4141 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4142 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4144 /* 64 bit code segment */
4145 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4146 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4148 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4150 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4151 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4152 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4154 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4155 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4157 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4158 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4159 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4160 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4161 /* This hack makes Wine work... */
4162 env
->segs
[R_FS
].selector
= 0;
4164 cpu_x86_load_seg(env
, R_DS
, 0);
4165 cpu_x86_load_seg(env
, R_ES
, 0);
4166 cpu_x86_load_seg(env
, R_FS
, 0);
4167 cpu_x86_load_seg(env
, R_GS
, 0);
4169 #elif defined(TARGET_AARCH64)
4173 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4175 "The selected ARM CPU does not support 64 bit mode\n");
4179 for (i
= 0; i
< 31; i
++) {
4180 env
->xregs
[i
] = regs
->regs
[i
];
4183 env
->xregs
[31] = regs
->sp
;
4185 #elif defined(TARGET_ARM)
4188 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4189 for(i
= 0; i
< 16; i
++) {
4190 env
->regs
[i
] = regs
->uregs
[i
];
4193 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4194 && (info
->elf_flags
& EF_ARM_BE8
)) {
4195 env
->bswap_code
= 1;
4198 #elif defined(TARGET_UNICORE32)
4201 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4202 for (i
= 0; i
< 32; i
++) {
4203 env
->regs
[i
] = regs
->uregs
[i
];
4206 #elif defined(TARGET_SPARC)
4210 env
->npc
= regs
->npc
;
4212 for(i
= 0; i
< 8; i
++)
4213 env
->gregs
[i
] = regs
->u_regs
[i
];
4214 for(i
= 0; i
< 8; i
++)
4215 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4217 #elif defined(TARGET_PPC)
4221 #if defined(TARGET_PPC64)
4222 #if defined(TARGET_ABI32)
4223 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4225 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4228 env
->nip
= regs
->nip
;
4229 for(i
= 0; i
< 32; i
++) {
4230 env
->gpr
[i
] = regs
->gpr
[i
];
4233 #elif defined(TARGET_M68K)
4236 env
->dregs
[0] = regs
->d0
;
4237 env
->dregs
[1] = regs
->d1
;
4238 env
->dregs
[2] = regs
->d2
;
4239 env
->dregs
[3] = regs
->d3
;
4240 env
->dregs
[4] = regs
->d4
;
4241 env
->dregs
[5] = regs
->d5
;
4242 env
->dregs
[6] = regs
->d6
;
4243 env
->dregs
[7] = regs
->d7
;
4244 env
->aregs
[0] = regs
->a0
;
4245 env
->aregs
[1] = regs
->a1
;
4246 env
->aregs
[2] = regs
->a2
;
4247 env
->aregs
[3] = regs
->a3
;
4248 env
->aregs
[4] = regs
->a4
;
4249 env
->aregs
[5] = regs
->a5
;
4250 env
->aregs
[6] = regs
->a6
;
4251 env
->aregs
[7] = regs
->usp
;
4253 ts
->sim_syscalls
= 1;
4255 #elif defined(TARGET_MICROBLAZE)
4257 env
->regs
[0] = regs
->r0
;
4258 env
->regs
[1] = regs
->r1
;
4259 env
->regs
[2] = regs
->r2
;
4260 env
->regs
[3] = regs
->r3
;
4261 env
->regs
[4] = regs
->r4
;
4262 env
->regs
[5] = regs
->r5
;
4263 env
->regs
[6] = regs
->r6
;
4264 env
->regs
[7] = regs
->r7
;
4265 env
->regs
[8] = regs
->r8
;
4266 env
->regs
[9] = regs
->r9
;
4267 env
->regs
[10] = regs
->r10
;
4268 env
->regs
[11] = regs
->r11
;
4269 env
->regs
[12] = regs
->r12
;
4270 env
->regs
[13] = regs
->r13
;
4271 env
->regs
[14] = regs
->r14
;
4272 env
->regs
[15] = regs
->r15
;
4273 env
->regs
[16] = regs
->r16
;
4274 env
->regs
[17] = regs
->r17
;
4275 env
->regs
[18] = regs
->r18
;
4276 env
->regs
[19] = regs
->r19
;
4277 env
->regs
[20] = regs
->r20
;
4278 env
->regs
[21] = regs
->r21
;
4279 env
->regs
[22] = regs
->r22
;
4280 env
->regs
[23] = regs
->r23
;
4281 env
->regs
[24] = regs
->r24
;
4282 env
->regs
[25] = regs
->r25
;
4283 env
->regs
[26] = regs
->r26
;
4284 env
->regs
[27] = regs
->r27
;
4285 env
->regs
[28] = regs
->r28
;
4286 env
->regs
[29] = regs
->r29
;
4287 env
->regs
[30] = regs
->r30
;
4288 env
->regs
[31] = regs
->r31
;
4289 env
->sregs
[SR_PC
] = regs
->pc
;
4291 #elif defined(TARGET_MIPS)
4295 for(i
= 0; i
< 32; i
++) {
4296 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4298 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4299 if (regs
->cp0_epc
& 1) {
4300 env
->hflags
|= MIPS_HFLAG_M16
;
4303 #elif defined(TARGET_OPENRISC)
4307 for (i
= 0; i
< 32; i
++) {
4308 env
->gpr
[i
] = regs
->gpr
[i
];
4314 #elif defined(TARGET_SH4)
4318 for(i
= 0; i
< 16; i
++) {
4319 env
->gregs
[i
] = regs
->regs
[i
];
4323 #elif defined(TARGET_ALPHA)
4327 for(i
= 0; i
< 28; i
++) {
4328 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4330 env
->ir
[IR_SP
] = regs
->usp
;
4333 #elif defined(TARGET_CRIS)
4335 env
->regs
[0] = regs
->r0
;
4336 env
->regs
[1] = regs
->r1
;
4337 env
->regs
[2] = regs
->r2
;
4338 env
->regs
[3] = regs
->r3
;
4339 env
->regs
[4] = regs
->r4
;
4340 env
->regs
[5] = regs
->r5
;
4341 env
->regs
[6] = regs
->r6
;
4342 env
->regs
[7] = regs
->r7
;
4343 env
->regs
[8] = regs
->r8
;
4344 env
->regs
[9] = regs
->r9
;
4345 env
->regs
[10] = regs
->r10
;
4346 env
->regs
[11] = regs
->r11
;
4347 env
->regs
[12] = regs
->r12
;
4348 env
->regs
[13] = regs
->r13
;
4349 env
->regs
[14] = info
->start_stack
;
4350 env
->regs
[15] = regs
->acr
;
4351 env
->pc
= regs
->erp
;
4353 #elif defined(TARGET_S390X)
4356 for (i
= 0; i
< 16; i
++) {
4357 env
->regs
[i
] = regs
->gprs
[i
];
4359 env
->psw
.mask
= regs
->psw
.mask
;
4360 env
->psw
.addr
= regs
->psw
.addr
;
4363 #error unsupported target CPU
4366 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4367 ts
->stack_base
= info
->start_stack
;
4368 ts
->heap_base
= info
->brk
;
4369 /* This will be filled in on the first SYS_HEAPINFO call. */
4374 if (gdbserver_start(gdbstub_port
) < 0) {
4375 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4379 gdb_handlesig(cpu
, 0);