4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
44 static const char *cpu_model
;
45 unsigned long mmap_min_addr
;
46 #if defined(CONFIG_USE_GUEST_BASE)
47 unsigned long guest_base
;
49 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
51 * When running 32-on-64 we should make sure we can fit all of the possible
52 * guest address space into a contiguous chunk of virtual host memory.
54 * This way we will never overlap with our own libraries or binaries or stack
55 * or anything else that QEMU maps.
58 /* MIPS only supports 31 bits of virtual address space for user space */
59 unsigned long reserved_va
= 0x77000000;
61 unsigned long reserved_va
= 0xf7000000;
64 unsigned long reserved_va
;
68 static void usage(void);
70 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
71 const char *qemu_uname_release
;
73 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
74 we allocate a bigger stack. Need a better solution, for example
75 by remapping the process stack directly at the right place */
76 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
78 void gemu_log(const char *fmt
, ...)
83 vfprintf(stderr
, fmt
, ap
);
87 #if defined(TARGET_I386)
88 int cpu_get_pic_interrupt(CPUX86State
*env
)
94 /***********************************************************/
95 /* Helper routines for implementing atomic operations. */
97 /* To implement exclusive operations we force all cpus to syncronise.
98 We don't require a full sync, only that no cpus are executing guest code.
99 The alternative is to map target atomic ops onto host equivalents,
100 which requires quite a lot of per host/target work. */
101 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
102 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
104 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
105 static int pending_cpus
;
107 /* Make sure everything is in a consistent state for calling fork(). */
108 void fork_start(void)
110 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
111 pthread_mutex_lock(&exclusive_lock
);
115 void fork_end(int child
)
117 mmap_fork_end(child
);
119 CPUState
*cpu
, *next_cpu
;
120 /* Child processes created by fork() only have a single thread.
121 Discard information about the parent threads. */
122 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
123 if (cpu
!= thread_cpu
) {
124 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
128 pthread_mutex_init(&exclusive_lock
, NULL
);
129 pthread_mutex_init(&cpu_list_mutex
, NULL
);
130 pthread_cond_init(&exclusive_cond
, NULL
);
131 pthread_cond_init(&exclusive_resume
, NULL
);
132 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
133 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
135 pthread_mutex_unlock(&exclusive_lock
);
136 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
140 /* Wait for pending exclusive operations to complete. The exclusive lock
142 static inline void exclusive_idle(void)
144 while (pending_cpus
) {
145 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
149 /* Start an exclusive operation.
150 Must only be called from outside cpu_arm_exec. */
151 static inline void start_exclusive(void)
155 pthread_mutex_lock(&exclusive_lock
);
159 /* Make all other cpus stop executing. */
160 CPU_FOREACH(other_cpu
) {
161 if (other_cpu
->running
) {
166 if (pending_cpus
> 1) {
167 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
171 /* Finish an exclusive operation. */
172 static inline void end_exclusive(void)
175 pthread_cond_broadcast(&exclusive_resume
);
176 pthread_mutex_unlock(&exclusive_lock
);
179 /* Wait for exclusive ops to finish, and begin cpu execution. */
180 static inline void cpu_exec_start(CPUState
*cpu
)
182 pthread_mutex_lock(&exclusive_lock
);
185 pthread_mutex_unlock(&exclusive_lock
);
188 /* Mark cpu as not executing, and release pending exclusive ops. */
189 static inline void cpu_exec_end(CPUState
*cpu
)
191 pthread_mutex_lock(&exclusive_lock
);
192 cpu
->running
= false;
193 if (pending_cpus
> 1) {
195 if (pending_cpus
== 1) {
196 pthread_cond_signal(&exclusive_cond
);
200 pthread_mutex_unlock(&exclusive_lock
);
203 void cpu_list_lock(void)
205 pthread_mutex_lock(&cpu_list_mutex
);
208 void cpu_list_unlock(void)
210 pthread_mutex_unlock(&cpu_list_mutex
);
215 /***********************************************************/
216 /* CPUX86 core interface */
218 void cpu_smm_update(CPUX86State
*env
)
222 uint64_t cpu_get_tsc(CPUX86State
*env
)
224 return cpu_get_real_ticks();
227 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
232 e1
= (addr
<< 16) | (limit
& 0xffff);
233 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
240 static uint64_t *idt_table
;
242 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
243 uint64_t addr
, unsigned int sel
)
246 e1
= (addr
& 0xffff) | (sel
<< 16);
247 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
251 p
[2] = tswap32(addr
>> 32);
254 /* only dpl matters as we do only user space emulation */
255 static void set_idt(int n
, unsigned int dpl
)
257 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
260 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
261 uint32_t addr
, unsigned int sel
)
264 e1
= (addr
& 0xffff) | (sel
<< 16);
265 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
271 /* only dpl matters as we do only user space emulation */
272 static void set_idt(int n
, unsigned int dpl
)
274 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
278 void cpu_loop(CPUX86State
*env
)
280 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
283 target_siginfo_t info
;
286 trapnr
= cpu_x86_exec(env
);
289 /* linux syscall from int $0x80 */
290 env
->regs
[R_EAX
] = do_syscall(env
,
302 /* linux syscall from syscall instruction */
303 env
->regs
[R_EAX
] = do_syscall(env
,
312 env
->eip
= env
->exception_next_eip
;
317 info
.si_signo
= SIGBUS
;
319 info
.si_code
= TARGET_SI_KERNEL
;
320 info
._sifields
._sigfault
._addr
= 0;
321 queue_signal(env
, info
.si_signo
, &info
);
324 /* XXX: potential problem if ABI32 */
325 #ifndef TARGET_X86_64
326 if (env
->eflags
& VM_MASK
) {
327 handle_vm86_fault(env
);
331 info
.si_signo
= SIGSEGV
;
333 info
.si_code
= TARGET_SI_KERNEL
;
334 info
._sifields
._sigfault
._addr
= 0;
335 queue_signal(env
, info
.si_signo
, &info
);
339 info
.si_signo
= SIGSEGV
;
341 if (!(env
->error_code
& 1))
342 info
.si_code
= TARGET_SEGV_MAPERR
;
344 info
.si_code
= TARGET_SEGV_ACCERR
;
345 info
._sifields
._sigfault
._addr
= env
->cr
[2];
346 queue_signal(env
, info
.si_signo
, &info
);
349 #ifndef TARGET_X86_64
350 if (env
->eflags
& VM_MASK
) {
351 handle_vm86_trap(env
, trapnr
);
355 /* division by zero */
356 info
.si_signo
= SIGFPE
;
358 info
.si_code
= TARGET_FPE_INTDIV
;
359 info
._sifields
._sigfault
._addr
= env
->eip
;
360 queue_signal(env
, info
.si_signo
, &info
);
365 #ifndef TARGET_X86_64
366 if (env
->eflags
& VM_MASK
) {
367 handle_vm86_trap(env
, trapnr
);
371 info
.si_signo
= SIGTRAP
;
373 if (trapnr
== EXCP01_DB
) {
374 info
.si_code
= TARGET_TRAP_BRKPT
;
375 info
._sifields
._sigfault
._addr
= env
->eip
;
377 info
.si_code
= TARGET_SI_KERNEL
;
378 info
._sifields
._sigfault
._addr
= 0;
380 queue_signal(env
, info
.si_signo
, &info
);
385 #ifndef TARGET_X86_64
386 if (env
->eflags
& VM_MASK
) {
387 handle_vm86_trap(env
, trapnr
);
391 info
.si_signo
= SIGSEGV
;
393 info
.si_code
= TARGET_SI_KERNEL
;
394 info
._sifields
._sigfault
._addr
= 0;
395 queue_signal(env
, info
.si_signo
, &info
);
399 info
.si_signo
= SIGILL
;
401 info
.si_code
= TARGET_ILL_ILLOPN
;
402 info
._sifields
._sigfault
._addr
= env
->eip
;
403 queue_signal(env
, info
.si_signo
, &info
);
406 /* just indicate that signals should be handled asap */
412 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
417 info
.si_code
= TARGET_TRAP_BRKPT
;
418 queue_signal(env
, info
.si_signo
, &info
);
423 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
424 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
428 process_pending_signals(env
);
435 #define get_user_code_u32(x, gaddr, doswap) \
436 ({ abi_long __r = get_user_u32((x), (gaddr)); \
437 if (!__r && (doswap)) { \
443 #define get_user_code_u16(x, gaddr, doswap) \
444 ({ abi_long __r = get_user_u16((x), (gaddr)); \
445 if (!__r && (doswap)) { \
452 /* Commpage handling -- there is no commpage for AArch64 */
455 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
457 * r0 = pointer to oldval
458 * r1 = pointer to newval
459 * r2 = pointer to target value
462 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
463 * C set if *ptr was changed, clear if no exchange happened
465 * Note segv's in kernel helpers are a bit tricky, we can set the
466 * data address sensibly but the PC address is just the entry point.
468 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
470 uint64_t oldval
, newval
, val
;
472 target_siginfo_t info
;
474 /* Based on the 32 bit code in do_kernel_trap */
476 /* XXX: This only works between threads, not between processes.
477 It's probably possible to implement this with native host
478 operations. However things like ldrex/strex are much harder so
479 there's not much point trying. */
481 cpsr
= cpsr_read(env
);
484 if (get_user_u64(oldval
, env
->regs
[0])) {
485 env
->exception
.vaddress
= env
->regs
[0];
489 if (get_user_u64(newval
, env
->regs
[1])) {
490 env
->exception
.vaddress
= env
->regs
[1];
494 if (get_user_u64(val
, addr
)) {
495 env
->exception
.vaddress
= addr
;
502 if (put_user_u64(val
, addr
)) {
503 env
->exception
.vaddress
= addr
;
513 cpsr_write(env
, cpsr
, CPSR_C
);
519 /* We get the PC of the entry address - which is as good as anything,
520 on a real kernel what you get depends on which mode it uses. */
521 info
.si_signo
= SIGSEGV
;
523 /* XXX: check env->error_code */
524 info
.si_code
= TARGET_SEGV_MAPERR
;
525 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
526 queue_signal(env
, info
.si_signo
, &info
);
531 /* Handle a jump to the kernel code page. */
533 do_kernel_trap(CPUARMState
*env
)
539 switch (env
->regs
[15]) {
540 case 0xffff0fa0: /* __kernel_memory_barrier */
541 /* ??? No-op. Will need to do better for SMP. */
543 case 0xffff0fc0: /* __kernel_cmpxchg */
544 /* XXX: This only works between threads, not between processes.
545 It's probably possible to implement this with native host
546 operations. However things like ldrex/strex are much harder so
547 there's not much point trying. */
549 cpsr
= cpsr_read(env
);
551 /* FIXME: This should SEGV if the access fails. */
552 if (get_user_u32(val
, addr
))
554 if (val
== env
->regs
[0]) {
556 /* FIXME: Check for segfaults. */
557 put_user_u32(val
, addr
);
564 cpsr_write(env
, cpsr
, CPSR_C
);
567 case 0xffff0fe0: /* __kernel_get_tls */
568 env
->regs
[0] = env
->cp15
.tpidrro_el0
;
570 case 0xffff0f60: /* __kernel_cmpxchg64 */
571 arm_kernel_cmpxchg64_helper(env
);
577 /* Jump back to the caller. */
578 addr
= env
->regs
[14];
583 env
->regs
[15] = addr
;
588 /* Store exclusive handling for AArch32 */
589 static int do_strex(CPUARMState
*env
)
597 if (env
->exclusive_addr
!= env
->exclusive_test
) {
600 /* We know we're always AArch32 so the address is in uint32_t range
601 * unless it was the -1 exclusive-monitor-lost value (which won't
602 * match exclusive_test above).
604 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
605 addr
= env
->exclusive_addr
;
606 size
= env
->exclusive_info
& 0xf;
609 segv
= get_user_u8(val
, addr
);
612 segv
= get_user_u16(val
, addr
);
616 segv
= get_user_u32(val
, addr
);
622 env
->exception
.vaddress
= addr
;
627 segv
= get_user_u32(valhi
, addr
+ 4);
629 env
->exception
.vaddress
= addr
+ 4;
632 val
= deposit64(val
, 32, 32, valhi
);
634 if (val
!= env
->exclusive_val
) {
638 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
641 segv
= put_user_u8(val
, addr
);
644 segv
= put_user_u16(val
, addr
);
648 segv
= put_user_u32(val
, addr
);
652 env
->exception
.vaddress
= addr
;
656 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
657 segv
= put_user_u32(val
, addr
+ 4);
659 env
->exception
.vaddress
= addr
+ 4;
666 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
672 void cpu_loop(CPUARMState
*env
)
674 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
676 unsigned int n
, insn
;
677 target_siginfo_t info
;
682 trapnr
= cpu_arm_exec(env
);
687 TaskState
*ts
= cs
->opaque
;
691 /* we handle the FPU emulation here, as Linux */
692 /* we get the opcode */
693 /* FIXME - what to do if get_user() fails? */
694 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
696 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
697 if (rc
== 0) { /* illegal instruction */
698 info
.si_signo
= SIGILL
;
700 info
.si_code
= TARGET_ILL_ILLOPN
;
701 info
._sifields
._sigfault
._addr
= env
->regs
[15];
702 queue_signal(env
, info
.si_signo
, &info
);
703 } else if (rc
< 0) { /* FP exception */
706 /* translate softfloat flags to FPSR flags */
707 if (-rc
& float_flag_invalid
)
709 if (-rc
& float_flag_divbyzero
)
711 if (-rc
& float_flag_overflow
)
713 if (-rc
& float_flag_underflow
)
715 if (-rc
& float_flag_inexact
)
718 FPSR fpsr
= ts
->fpa
.fpsr
;
719 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
721 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
722 info
.si_signo
= SIGFPE
;
725 /* ordered by priority, least first */
726 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
727 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
728 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
729 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
730 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
732 info
._sifields
._sigfault
._addr
= env
->regs
[15];
733 queue_signal(env
, info
.si_signo
, &info
);
738 /* accumulate unenabled exceptions */
739 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
741 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
743 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
745 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
747 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
750 } else { /* everything OK */
761 if (trapnr
== EXCP_BKPT
) {
763 /* FIXME - what to do if get_user() fails? */
764 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
768 /* FIXME - what to do if get_user() fails? */
769 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
770 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
775 /* FIXME - what to do if get_user() fails? */
776 get_user_code_u16(insn
, env
->regs
[15] - 2,
780 /* FIXME - what to do if get_user() fails? */
781 get_user_code_u32(insn
, env
->regs
[15] - 4,
787 if (n
== ARM_NR_cacheflush
) {
789 } else if (n
== ARM_NR_semihosting
790 || n
== ARM_NR_thumb_semihosting
) {
791 env
->regs
[0] = do_arm_semihosting (env
);
792 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
794 if (env
->thumb
|| n
== 0) {
797 n
-= ARM_SYSCALL_BASE
;
800 if ( n
> ARM_NR_BASE
) {
802 case ARM_NR_cacheflush
:
806 cpu_set_tls(env
, env
->regs
[0]);
809 case ARM_NR_breakpoint
:
810 env
->regs
[15] -= env
->thumb
? 2 : 4;
813 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
815 env
->regs
[0] = -TARGET_ENOSYS
;
819 env
->regs
[0] = do_syscall(env
,
835 /* just indicate that signals should be handled asap */
838 if (!do_strex(env
)) {
841 /* fall through for segv */
842 case EXCP_PREFETCH_ABORT
:
843 case EXCP_DATA_ABORT
:
844 addr
= env
->exception
.vaddress
;
846 info
.si_signo
= SIGSEGV
;
848 /* XXX: check env->error_code */
849 info
.si_code
= TARGET_SEGV_MAPERR
;
850 info
._sifields
._sigfault
._addr
= addr
;
851 queue_signal(env
, info
.si_signo
, &info
);
859 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
864 info
.si_code
= TARGET_TRAP_BRKPT
;
865 queue_signal(env
, info
.si_signo
, &info
);
869 case EXCP_KERNEL_TRAP
:
870 if (do_kernel_trap(env
))
875 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
877 cpu_dump_state(cs
, stderr
, fprintf
, 0);
880 process_pending_signals(env
);
887 * Handle AArch64 store-release exclusive
889 * rs = gets the status result of store exclusive
890 * rt = is the register that is stored
891 * rt2 = is the second register store (in STP)
894 static int do_strex_a64(CPUARMState
*env
)
905 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
906 size
= extract32(env
->exclusive_info
, 0, 2);
907 is_pair
= extract32(env
->exclusive_info
, 2, 1);
908 rs
= extract32(env
->exclusive_info
, 4, 5);
909 rt
= extract32(env
->exclusive_info
, 9, 5);
910 rt2
= extract32(env
->exclusive_info
, 14, 5);
912 addr
= env
->exclusive_addr
;
914 if (addr
!= env
->exclusive_test
) {
920 segv
= get_user_u8(val
, addr
);
923 segv
= get_user_u16(val
, addr
);
926 segv
= get_user_u32(val
, addr
);
929 segv
= get_user_u64(val
, addr
);
935 env
->exception
.vaddress
= addr
;
938 if (val
!= env
->exclusive_val
) {
943 segv
= get_user_u32(val
, addr
+ 4);
945 segv
= get_user_u64(val
, addr
+ 8);
948 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
951 if (val
!= env
->exclusive_high
) {
955 /* handle the zero register */
956 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
959 segv
= put_user_u8(val
, addr
);
962 segv
= put_user_u16(val
, addr
);
965 segv
= put_user_u32(val
, addr
);
968 segv
= put_user_u64(val
, addr
);
975 /* handle the zero register */
976 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
978 segv
= put_user_u32(val
, addr
+ 4);
980 segv
= put_user_u64(val
, addr
+ 8);
983 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
990 /* rs == 31 encodes a write to the ZR, thus throwing away
991 * the status return. This is rather silly but valid.
997 /* instruction faulted, PC does not advance */
998 /* either way a strex releases any exclusive lock we have */
999 env
->exclusive_addr
= -1;
1004 /* AArch64 main loop */
1005 void cpu_loop(CPUARMState
*env
)
1007 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1009 target_siginfo_t info
;
1014 trapnr
= cpu_arm_exec(env
);
1019 env
->xregs
[0] = do_syscall(env
,
1029 case EXCP_INTERRUPT
:
1030 /* just indicate that signals should be handled asap */
1033 info
.si_signo
= SIGILL
;
1035 info
.si_code
= TARGET_ILL_ILLOPN
;
1036 info
._sifields
._sigfault
._addr
= env
->pc
;
1037 queue_signal(env
, info
.si_signo
, &info
);
1040 if (!do_strex_a64(env
)) {
1043 /* fall through for segv */
1044 case EXCP_PREFETCH_ABORT
:
1045 case EXCP_DATA_ABORT
:
1046 addr
= env
->exception
.vaddress
;
1047 info
.si_signo
= SIGSEGV
;
1049 /* XXX: check env->error_code */
1050 info
.si_code
= TARGET_SEGV_MAPERR
;
1051 info
._sifields
._sigfault
._addr
= addr
;
1052 queue_signal(env
, info
.si_signo
, &info
);
1056 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1058 info
.si_signo
= sig
;
1060 info
.si_code
= TARGET_TRAP_BRKPT
;
1061 queue_signal(env
, info
.si_signo
, &info
);
1065 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1067 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1070 process_pending_signals(env
);
1071 /* Exception return on AArch64 always clears the exclusive monitor,
1072 * so any return to running guest code implies this.
1073 * A strex (successful or otherwise) also clears the monitor, so
1074 * we don't need to specialcase EXCP_STREX.
1076 env
->exclusive_addr
= -1;
1079 #endif /* ndef TARGET_ABI32 */
1083 #ifdef TARGET_UNICORE32
1085 void cpu_loop(CPUUniCore32State
*env
)
1087 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1089 unsigned int n
, insn
;
1090 target_siginfo_t info
;
1094 trapnr
= uc32_cpu_exec(env
);
1097 case UC32_EXCP_PRIV
:
1100 get_user_u32(insn
, env
->regs
[31] - 4);
1101 n
= insn
& 0xffffff;
1103 if (n
>= UC32_SYSCALL_BASE
) {
1105 n
-= UC32_SYSCALL_BASE
;
1106 if (n
== UC32_SYSCALL_NR_set_tls
) {
1107 cpu_set_tls(env
, env
->regs
[0]);
1110 env
->regs
[0] = do_syscall(env
,
1125 case UC32_EXCP_DTRAP
:
1126 case UC32_EXCP_ITRAP
:
1127 info
.si_signo
= SIGSEGV
;
1129 /* XXX: check env->error_code */
1130 info
.si_code
= TARGET_SEGV_MAPERR
;
1131 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1132 queue_signal(env
, info
.si_signo
, &info
);
1134 case EXCP_INTERRUPT
:
1135 /* just indicate that signals should be handled asap */
1141 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1143 info
.si_signo
= sig
;
1145 info
.si_code
= TARGET_TRAP_BRKPT
;
1146 queue_signal(env
, info
.si_signo
, &info
);
1153 process_pending_signals(env
);
1157 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1158 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1164 #define SPARC64_STACK_BIAS 2047
1168 /* WARNING: dealing with register windows _is_ complicated. More info
1169 can be found at http://www.sics.se/~psm/sparcstack.html */
1170 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1172 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1173 /* wrap handling : if cwp is on the last window, then we use the
1174 registers 'after' the end */
1175 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1176 index
+= 16 * env
->nwindows
;
1180 /* save the register window 'cwp1' */
1181 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1186 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1187 #ifdef TARGET_SPARC64
1189 sp_ptr
+= SPARC64_STACK_BIAS
;
1191 #if defined(DEBUG_WIN)
1192 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1195 for(i
= 0; i
< 16; i
++) {
1196 /* FIXME - what to do if put_user() fails? */
1197 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1198 sp_ptr
+= sizeof(abi_ulong
);
1202 static void save_window(CPUSPARCState
*env
)
1204 #ifndef TARGET_SPARC64
1205 unsigned int new_wim
;
1206 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1207 ((1LL << env
->nwindows
) - 1);
1208 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1211 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1217 static void restore_window(CPUSPARCState
*env
)
1219 #ifndef TARGET_SPARC64
1220 unsigned int new_wim
;
1222 unsigned int i
, cwp1
;
1225 #ifndef TARGET_SPARC64
1226 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1227 ((1LL << env
->nwindows
) - 1);
1230 /* restore the invalid window */
1231 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1232 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1233 #ifdef TARGET_SPARC64
1235 sp_ptr
+= SPARC64_STACK_BIAS
;
1237 #if defined(DEBUG_WIN)
1238 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1241 for(i
= 0; i
< 16; i
++) {
1242 /* FIXME - what to do if get_user() fails? */
1243 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1244 sp_ptr
+= sizeof(abi_ulong
);
1246 #ifdef TARGET_SPARC64
1248 if (env
->cleanwin
< env
->nwindows
- 1)
1256 static void flush_windows(CPUSPARCState
*env
)
1262 /* if restore would invoke restore_window(), then we can stop */
1263 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1264 #ifndef TARGET_SPARC64
1265 if (env
->wim
& (1 << cwp1
))
1268 if (env
->canrestore
== 0)
1273 save_window_offset(env
, cwp1
);
1276 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1277 #ifndef TARGET_SPARC64
1278 /* set wim so that restore will reload the registers */
1279 env
->wim
= 1 << cwp1
;
1281 #if defined(DEBUG_WIN)
1282 printf("flush_windows: nb=%d\n", offset
- 1);
1286 void cpu_loop (CPUSPARCState
*env
)
1288 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1291 target_siginfo_t info
;
1294 trapnr
= cpu_sparc_exec (env
);
1296 /* Compute PSR before exposing state. */
1297 if (env
->cc_op
!= CC_OP_FLAGS
) {
1302 #ifndef TARGET_SPARC64
1309 ret
= do_syscall (env
, env
->gregs
[1],
1310 env
->regwptr
[0], env
->regwptr
[1],
1311 env
->regwptr
[2], env
->regwptr
[3],
1312 env
->regwptr
[4], env
->regwptr
[5],
1314 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1315 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1316 env
->xcc
|= PSR_CARRY
;
1318 env
->psr
|= PSR_CARRY
;
1322 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1323 env
->xcc
&= ~PSR_CARRY
;
1325 env
->psr
&= ~PSR_CARRY
;
1328 env
->regwptr
[0] = ret
;
1329 /* next instruction */
1331 env
->npc
= env
->npc
+ 4;
1333 case 0x83: /* flush windows */
1338 /* next instruction */
1340 env
->npc
= env
->npc
+ 4;
1342 #ifndef TARGET_SPARC64
1343 case TT_WIN_OVF
: /* window overflow */
1346 case TT_WIN_UNF
: /* window underflow */
1347 restore_window(env
);
1352 info
.si_signo
= TARGET_SIGSEGV
;
1354 /* XXX: check env->error_code */
1355 info
.si_code
= TARGET_SEGV_MAPERR
;
1356 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1357 queue_signal(env
, info
.si_signo
, &info
);
1361 case TT_SPILL
: /* window overflow */
1364 case TT_FILL
: /* window underflow */
1365 restore_window(env
);
1370 info
.si_signo
= TARGET_SIGSEGV
;
1372 /* XXX: check env->error_code */
1373 info
.si_code
= TARGET_SEGV_MAPERR
;
1374 if (trapnr
== TT_DFAULT
)
1375 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1377 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1378 queue_signal(env
, info
.si_signo
, &info
);
1381 #ifndef TARGET_ABI32
1384 sparc64_get_context(env
);
1388 sparc64_set_context(env
);
1392 case EXCP_INTERRUPT
:
1393 /* just indicate that signals should be handled asap */
1397 info
.si_signo
= TARGET_SIGILL
;
1399 info
.si_code
= TARGET_ILL_ILLOPC
;
1400 info
._sifields
._sigfault
._addr
= env
->pc
;
1401 queue_signal(env
, info
.si_signo
, &info
);
1408 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1411 info
.si_signo
= sig
;
1413 info
.si_code
= TARGET_TRAP_BRKPT
;
1414 queue_signal(env
, info
.si_signo
, &info
);
1419 printf ("Unhandled trap: 0x%x\n", trapnr
);
1420 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1423 process_pending_signals (env
);
1430 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1436 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1438 return cpu_ppc_get_tb(env
);
1441 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1443 return cpu_ppc_get_tb(env
) >> 32;
1446 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1448 return cpu_ppc_get_tb(env
);
1451 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1453 return cpu_ppc_get_tb(env
) >> 32;
1456 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1457 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1459 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1461 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1464 /* XXX: to be fixed */
1465 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1470 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1475 #define EXCP_DUMP(env, fmt, ...) \
1477 CPUState *cs = ENV_GET_CPU(env); \
1478 fprintf(stderr, fmt , ## __VA_ARGS__); \
1479 cpu_dump_state(cs, stderr, fprintf, 0); \
1480 qemu_log(fmt, ## __VA_ARGS__); \
1481 if (qemu_log_enabled()) { \
1482 log_cpu_state(cs, 0); \
1486 static int do_store_exclusive(CPUPPCState
*env
)
1489 target_ulong page_addr
;
1490 target_ulong val
, val2
__attribute__((unused
)) = 0;
1494 addr
= env
->reserve_ea
;
1495 page_addr
= addr
& TARGET_PAGE_MASK
;
1498 flags
= page_get_flags(page_addr
);
1499 if ((flags
& PAGE_READ
) == 0) {
1502 int reg
= env
->reserve_info
& 0x1f;
1503 int size
= env
->reserve_info
>> 5;
1506 if (addr
== env
->reserve_addr
) {
1508 case 1: segv
= get_user_u8(val
, addr
); break;
1509 case 2: segv
= get_user_u16(val
, addr
); break;
1510 case 4: segv
= get_user_u32(val
, addr
); break;
1511 #if defined(TARGET_PPC64)
1512 case 8: segv
= get_user_u64(val
, addr
); break;
1514 segv
= get_user_u64(val
, addr
);
1516 segv
= get_user_u64(val2
, addr
+ 8);
1523 if (!segv
&& val
== env
->reserve_val
) {
1524 val
= env
->gpr
[reg
];
1526 case 1: segv
= put_user_u8(val
, addr
); break;
1527 case 2: segv
= put_user_u16(val
, addr
); break;
1528 case 4: segv
= put_user_u32(val
, addr
); break;
1529 #if defined(TARGET_PPC64)
1530 case 8: segv
= put_user_u64(val
, addr
); break;
1532 if (val2
== env
->reserve_val2
) {
1535 val
= env
->gpr
[reg
+1];
1537 val2
= env
->gpr
[reg
+1];
1539 segv
= put_user_u64(val
, addr
);
1541 segv
= put_user_u64(val2
, addr
+ 8);
1554 env
->crf
[0] = (stored
<< 1) | xer_so
;
1555 env
->reserve_addr
= (target_ulong
)-1;
1565 void cpu_loop(CPUPPCState
*env
)
1567 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1568 target_siginfo_t info
;
1574 trapnr
= cpu_ppc_exec(env
);
1577 case POWERPC_EXCP_NONE
:
1580 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1581 cpu_abort(cs
, "Critical interrupt while in user mode. "
1584 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1585 cpu_abort(cs
, "Machine check exception while in user mode. "
1588 case POWERPC_EXCP_DSI
: /* Data storage exception */
1589 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1591 /* XXX: check this. Seems bugged */
1592 switch (env
->error_code
& 0xFF000000) {
1594 info
.si_signo
= TARGET_SIGSEGV
;
1596 info
.si_code
= TARGET_SEGV_MAPERR
;
1599 info
.si_signo
= TARGET_SIGILL
;
1601 info
.si_code
= TARGET_ILL_ILLADR
;
1604 info
.si_signo
= TARGET_SIGSEGV
;
1606 info
.si_code
= TARGET_SEGV_ACCERR
;
1609 /* Let's send a regular segfault... */
1610 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1612 info
.si_signo
= TARGET_SIGSEGV
;
1614 info
.si_code
= TARGET_SEGV_MAPERR
;
1617 info
._sifields
._sigfault
._addr
= env
->nip
;
1618 queue_signal(env
, info
.si_signo
, &info
);
1620 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1621 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1622 "\n", env
->spr
[SPR_SRR0
]);
1623 /* XXX: check this */
1624 switch (env
->error_code
& 0xFF000000) {
1626 info
.si_signo
= TARGET_SIGSEGV
;
1628 info
.si_code
= TARGET_SEGV_MAPERR
;
1632 info
.si_signo
= TARGET_SIGSEGV
;
1634 info
.si_code
= TARGET_SEGV_ACCERR
;
1637 /* Let's send a regular segfault... */
1638 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1640 info
.si_signo
= TARGET_SIGSEGV
;
1642 info
.si_code
= TARGET_SEGV_MAPERR
;
1645 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1646 queue_signal(env
, info
.si_signo
, &info
);
1648 case POWERPC_EXCP_EXTERNAL
: /* External input */
1649 cpu_abort(cs
, "External interrupt while in user mode. "
1652 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1653 EXCP_DUMP(env
, "Unaligned memory access\n");
1654 /* XXX: check this */
1655 info
.si_signo
= TARGET_SIGBUS
;
1657 info
.si_code
= TARGET_BUS_ADRALN
;
1658 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1659 queue_signal(env
, info
.si_signo
, &info
);
1661 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1662 /* XXX: check this */
1663 switch (env
->error_code
& ~0xF) {
1664 case POWERPC_EXCP_FP
:
1665 EXCP_DUMP(env
, "Floating point program exception\n");
1666 info
.si_signo
= TARGET_SIGFPE
;
1668 switch (env
->error_code
& 0xF) {
1669 case POWERPC_EXCP_FP_OX
:
1670 info
.si_code
= TARGET_FPE_FLTOVF
;
1672 case POWERPC_EXCP_FP_UX
:
1673 info
.si_code
= TARGET_FPE_FLTUND
;
1675 case POWERPC_EXCP_FP_ZX
:
1676 case POWERPC_EXCP_FP_VXZDZ
:
1677 info
.si_code
= TARGET_FPE_FLTDIV
;
1679 case POWERPC_EXCP_FP_XX
:
1680 info
.si_code
= TARGET_FPE_FLTRES
;
1682 case POWERPC_EXCP_FP_VXSOFT
:
1683 info
.si_code
= TARGET_FPE_FLTINV
;
1685 case POWERPC_EXCP_FP_VXSNAN
:
1686 case POWERPC_EXCP_FP_VXISI
:
1687 case POWERPC_EXCP_FP_VXIDI
:
1688 case POWERPC_EXCP_FP_VXIMZ
:
1689 case POWERPC_EXCP_FP_VXVC
:
1690 case POWERPC_EXCP_FP_VXSQRT
:
1691 case POWERPC_EXCP_FP_VXCVI
:
1692 info
.si_code
= TARGET_FPE_FLTSUB
;
1695 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1700 case POWERPC_EXCP_INVAL
:
1701 EXCP_DUMP(env
, "Invalid instruction\n");
1702 info
.si_signo
= TARGET_SIGILL
;
1704 switch (env
->error_code
& 0xF) {
1705 case POWERPC_EXCP_INVAL_INVAL
:
1706 info
.si_code
= TARGET_ILL_ILLOPC
;
1708 case POWERPC_EXCP_INVAL_LSWX
:
1709 info
.si_code
= TARGET_ILL_ILLOPN
;
1711 case POWERPC_EXCP_INVAL_SPR
:
1712 info
.si_code
= TARGET_ILL_PRVREG
;
1714 case POWERPC_EXCP_INVAL_FP
:
1715 info
.si_code
= TARGET_ILL_COPROC
;
1718 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1719 env
->error_code
& 0xF);
1720 info
.si_code
= TARGET_ILL_ILLADR
;
1724 case POWERPC_EXCP_PRIV
:
1725 EXCP_DUMP(env
, "Privilege violation\n");
1726 info
.si_signo
= TARGET_SIGILL
;
1728 switch (env
->error_code
& 0xF) {
1729 case POWERPC_EXCP_PRIV_OPC
:
1730 info
.si_code
= TARGET_ILL_PRVOPC
;
1732 case POWERPC_EXCP_PRIV_REG
:
1733 info
.si_code
= TARGET_ILL_PRVREG
;
1736 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1737 env
->error_code
& 0xF);
1738 info
.si_code
= TARGET_ILL_PRVOPC
;
1742 case POWERPC_EXCP_TRAP
:
1743 cpu_abort(cs
, "Tried to call a TRAP\n");
1746 /* Should not happen ! */
1747 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1751 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1752 queue_signal(env
, info
.si_signo
, &info
);
1754 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1755 EXCP_DUMP(env
, "No floating point allowed\n");
1756 info
.si_signo
= TARGET_SIGILL
;
1758 info
.si_code
= TARGET_ILL_COPROC
;
1759 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1760 queue_signal(env
, info
.si_signo
, &info
);
1762 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1763 cpu_abort(cs
, "Syscall exception while in user mode. "
1766 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1767 EXCP_DUMP(env
, "No APU instruction allowed\n");
1768 info
.si_signo
= TARGET_SIGILL
;
1770 info
.si_code
= TARGET_ILL_COPROC
;
1771 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1772 queue_signal(env
, info
.si_signo
, &info
);
1774 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1775 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1778 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1779 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1782 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1783 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1786 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1787 cpu_abort(cs
, "Data TLB exception while in user mode. "
1790 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1791 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1794 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1795 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1796 info
.si_signo
= TARGET_SIGILL
;
1798 info
.si_code
= TARGET_ILL_COPROC
;
1799 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1800 queue_signal(env
, info
.si_signo
, &info
);
1802 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1803 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1805 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1806 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1808 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1809 cpu_abort(cs
, "Performance monitor exception not handled\n");
1811 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1812 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1815 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1816 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1819 case POWERPC_EXCP_RESET
: /* System reset exception */
1820 cpu_abort(cs
, "Reset interrupt while in user mode. "
1823 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1824 cpu_abort(cs
, "Data segment exception while in user mode. "
1827 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1828 cpu_abort(cs
, "Instruction segment exception "
1829 "while in user mode. Aborting\n");
1831 /* PowerPC 64 with hypervisor mode support */
1832 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1833 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1834 "while in user mode. Aborting\n");
1836 case POWERPC_EXCP_TRACE
: /* Trace exception */
1838 * we use this exception to emulate step-by-step execution mode.
1841 /* PowerPC 64 with hypervisor mode support */
1842 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1843 cpu_abort(cs
, "Hypervisor data storage exception "
1844 "while in user mode. Aborting\n");
1846 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1847 cpu_abort(cs
, "Hypervisor instruction storage exception "
1848 "while in user mode. Aborting\n");
1850 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1851 cpu_abort(cs
, "Hypervisor data segment exception "
1852 "while in user mode. Aborting\n");
1854 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1855 cpu_abort(cs
, "Hypervisor instruction segment exception "
1856 "while in user mode. Aborting\n");
1858 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1859 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1860 info
.si_signo
= TARGET_SIGILL
;
1862 info
.si_code
= TARGET_ILL_COPROC
;
1863 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1864 queue_signal(env
, info
.si_signo
, &info
);
1866 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1867 cpu_abort(cs
, "Programmable interval timer interrupt "
1868 "while in user mode. Aborting\n");
1870 case POWERPC_EXCP_IO
: /* IO error exception */
1871 cpu_abort(cs
, "IO error exception while in user mode. "
1874 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1875 cpu_abort(cs
, "Run mode exception while in user mode. "
1878 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1879 cpu_abort(cs
, "Emulation trap exception not handled\n");
1881 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1882 cpu_abort(cs
, "Instruction fetch TLB exception "
1883 "while in user-mode. Aborting");
1885 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1886 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1889 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1890 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1893 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1894 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1896 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1897 cpu_abort(cs
, "Instruction address breakpoint exception "
1900 case POWERPC_EXCP_SMI
: /* System management interrupt */
1901 cpu_abort(cs
, "System management interrupt while in user mode. "
1904 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1905 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1908 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1909 cpu_abort(cs
, "Performance monitor exception not handled\n");
1911 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1912 cpu_abort(cs
, "Vector assist exception not handled\n");
1914 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1915 cpu_abort(cs
, "Soft patch exception not handled\n");
1917 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1918 cpu_abort(cs
, "Maintenance exception while in user mode. "
1921 case POWERPC_EXCP_STOP
: /* stop translation */
1922 /* We did invalidate the instruction cache. Go on */
1924 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1925 /* We just stopped because of a branch. Go on */
1927 case POWERPC_EXCP_SYSCALL_USER
:
1928 /* system call in user-mode emulation */
1930 * PPC ABI uses overflow flag in cr0 to signal an error
1933 env
->crf
[0] &= ~0x1;
1934 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1935 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1937 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1938 /* Returning from a successful sigreturn syscall.
1939 Avoid corrupting register state. */
1942 if (ret
> (target_ulong
)(-515)) {
1948 case POWERPC_EXCP_STCX
:
1949 if (do_store_exclusive(env
)) {
1950 info
.si_signo
= TARGET_SIGSEGV
;
1952 info
.si_code
= TARGET_SEGV_MAPERR
;
1953 info
._sifields
._sigfault
._addr
= env
->nip
;
1954 queue_signal(env
, info
.si_signo
, &info
);
1961 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1963 info
.si_signo
= sig
;
1965 info
.si_code
= TARGET_TRAP_BRKPT
;
1966 queue_signal(env
, info
.si_signo
, &info
);
1970 case EXCP_INTERRUPT
:
1971 /* just indicate that signals should be handled asap */
1974 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1977 process_pending_signals(env
);
1984 # ifdef TARGET_ABI_MIPSO32
1985 # define MIPS_SYS(name, args) args,
1986 static const uint8_t mips_syscall_args
[] = {
1987 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1988 MIPS_SYS(sys_exit
, 1)
1989 MIPS_SYS(sys_fork
, 0)
1990 MIPS_SYS(sys_read
, 3)
1991 MIPS_SYS(sys_write
, 3)
1992 MIPS_SYS(sys_open
, 3) /* 4005 */
1993 MIPS_SYS(sys_close
, 1)
1994 MIPS_SYS(sys_waitpid
, 3)
1995 MIPS_SYS(sys_creat
, 2)
1996 MIPS_SYS(sys_link
, 2)
1997 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1998 MIPS_SYS(sys_execve
, 0)
1999 MIPS_SYS(sys_chdir
, 1)
2000 MIPS_SYS(sys_time
, 1)
2001 MIPS_SYS(sys_mknod
, 3)
2002 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2003 MIPS_SYS(sys_lchown
, 3)
2004 MIPS_SYS(sys_ni_syscall
, 0)
2005 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2006 MIPS_SYS(sys_lseek
, 3)
2007 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2008 MIPS_SYS(sys_mount
, 5)
2009 MIPS_SYS(sys_umount
, 1)
2010 MIPS_SYS(sys_setuid
, 1)
2011 MIPS_SYS(sys_getuid
, 0)
2012 MIPS_SYS(sys_stime
, 1) /* 4025 */
2013 MIPS_SYS(sys_ptrace
, 4)
2014 MIPS_SYS(sys_alarm
, 1)
2015 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2016 MIPS_SYS(sys_pause
, 0)
2017 MIPS_SYS(sys_utime
, 2) /* 4030 */
2018 MIPS_SYS(sys_ni_syscall
, 0)
2019 MIPS_SYS(sys_ni_syscall
, 0)
2020 MIPS_SYS(sys_access
, 2)
2021 MIPS_SYS(sys_nice
, 1)
2022 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2023 MIPS_SYS(sys_sync
, 0)
2024 MIPS_SYS(sys_kill
, 2)
2025 MIPS_SYS(sys_rename
, 2)
2026 MIPS_SYS(sys_mkdir
, 2)
2027 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2028 MIPS_SYS(sys_dup
, 1)
2029 MIPS_SYS(sys_pipe
, 0)
2030 MIPS_SYS(sys_times
, 1)
2031 MIPS_SYS(sys_ni_syscall
, 0)
2032 MIPS_SYS(sys_brk
, 1) /* 4045 */
2033 MIPS_SYS(sys_setgid
, 1)
2034 MIPS_SYS(sys_getgid
, 0)
2035 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2036 MIPS_SYS(sys_geteuid
, 0)
2037 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2038 MIPS_SYS(sys_acct
, 0)
2039 MIPS_SYS(sys_umount2
, 2)
2040 MIPS_SYS(sys_ni_syscall
, 0)
2041 MIPS_SYS(sys_ioctl
, 3)
2042 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2043 MIPS_SYS(sys_ni_syscall
, 2)
2044 MIPS_SYS(sys_setpgid
, 2)
2045 MIPS_SYS(sys_ni_syscall
, 0)
2046 MIPS_SYS(sys_olduname
, 1)
2047 MIPS_SYS(sys_umask
, 1) /* 4060 */
2048 MIPS_SYS(sys_chroot
, 1)
2049 MIPS_SYS(sys_ustat
, 2)
2050 MIPS_SYS(sys_dup2
, 2)
2051 MIPS_SYS(sys_getppid
, 0)
2052 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2053 MIPS_SYS(sys_setsid
, 0)
2054 MIPS_SYS(sys_sigaction
, 3)
2055 MIPS_SYS(sys_sgetmask
, 0)
2056 MIPS_SYS(sys_ssetmask
, 1)
2057 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2058 MIPS_SYS(sys_setregid
, 2)
2059 MIPS_SYS(sys_sigsuspend
, 0)
2060 MIPS_SYS(sys_sigpending
, 1)
2061 MIPS_SYS(sys_sethostname
, 2)
2062 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2063 MIPS_SYS(sys_getrlimit
, 2)
2064 MIPS_SYS(sys_getrusage
, 2)
2065 MIPS_SYS(sys_gettimeofday
, 2)
2066 MIPS_SYS(sys_settimeofday
, 2)
2067 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2068 MIPS_SYS(sys_setgroups
, 2)
2069 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2070 MIPS_SYS(sys_symlink
, 2)
2071 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2072 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2073 MIPS_SYS(sys_uselib
, 1)
2074 MIPS_SYS(sys_swapon
, 2)
2075 MIPS_SYS(sys_reboot
, 3)
2076 MIPS_SYS(old_readdir
, 3)
2077 MIPS_SYS(old_mmap
, 6) /* 4090 */
2078 MIPS_SYS(sys_munmap
, 2)
2079 MIPS_SYS(sys_truncate
, 2)
2080 MIPS_SYS(sys_ftruncate
, 2)
2081 MIPS_SYS(sys_fchmod
, 2)
2082 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2083 MIPS_SYS(sys_getpriority
, 2)
2084 MIPS_SYS(sys_setpriority
, 3)
2085 MIPS_SYS(sys_ni_syscall
, 0)
2086 MIPS_SYS(sys_statfs
, 2)
2087 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2088 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2089 MIPS_SYS(sys_socketcall
, 2)
2090 MIPS_SYS(sys_syslog
, 3)
2091 MIPS_SYS(sys_setitimer
, 3)
2092 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2093 MIPS_SYS(sys_newstat
, 2)
2094 MIPS_SYS(sys_newlstat
, 2)
2095 MIPS_SYS(sys_newfstat
, 2)
2096 MIPS_SYS(sys_uname
, 1)
2097 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2098 MIPS_SYS(sys_vhangup
, 0)
2099 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2100 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2101 MIPS_SYS(sys_wait4
, 4)
2102 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2103 MIPS_SYS(sys_sysinfo
, 1)
2104 MIPS_SYS(sys_ipc
, 6)
2105 MIPS_SYS(sys_fsync
, 1)
2106 MIPS_SYS(sys_sigreturn
, 0)
2107 MIPS_SYS(sys_clone
, 6) /* 4120 */
2108 MIPS_SYS(sys_setdomainname
, 2)
2109 MIPS_SYS(sys_newuname
, 1)
2110 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2111 MIPS_SYS(sys_adjtimex
, 1)
2112 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2113 MIPS_SYS(sys_sigprocmask
, 3)
2114 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2115 MIPS_SYS(sys_init_module
, 5)
2116 MIPS_SYS(sys_delete_module
, 1)
2117 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2118 MIPS_SYS(sys_quotactl
, 0)
2119 MIPS_SYS(sys_getpgid
, 1)
2120 MIPS_SYS(sys_fchdir
, 1)
2121 MIPS_SYS(sys_bdflush
, 2)
2122 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2123 MIPS_SYS(sys_personality
, 1)
2124 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2125 MIPS_SYS(sys_setfsuid
, 1)
2126 MIPS_SYS(sys_setfsgid
, 1)
2127 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2128 MIPS_SYS(sys_getdents
, 3)
2129 MIPS_SYS(sys_select
, 5)
2130 MIPS_SYS(sys_flock
, 2)
2131 MIPS_SYS(sys_msync
, 3)
2132 MIPS_SYS(sys_readv
, 3) /* 4145 */
2133 MIPS_SYS(sys_writev
, 3)
2134 MIPS_SYS(sys_cacheflush
, 3)
2135 MIPS_SYS(sys_cachectl
, 3)
2136 MIPS_SYS(sys_sysmips
, 4)
2137 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2138 MIPS_SYS(sys_getsid
, 1)
2139 MIPS_SYS(sys_fdatasync
, 0)
2140 MIPS_SYS(sys_sysctl
, 1)
2141 MIPS_SYS(sys_mlock
, 2)
2142 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2143 MIPS_SYS(sys_mlockall
, 1)
2144 MIPS_SYS(sys_munlockall
, 0)
2145 MIPS_SYS(sys_sched_setparam
, 2)
2146 MIPS_SYS(sys_sched_getparam
, 2)
2147 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2148 MIPS_SYS(sys_sched_getscheduler
, 1)
2149 MIPS_SYS(sys_sched_yield
, 0)
2150 MIPS_SYS(sys_sched_get_priority_max
, 1)
2151 MIPS_SYS(sys_sched_get_priority_min
, 1)
2152 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2153 MIPS_SYS(sys_nanosleep
, 2)
2154 MIPS_SYS(sys_mremap
, 5)
2155 MIPS_SYS(sys_accept
, 3)
2156 MIPS_SYS(sys_bind
, 3)
2157 MIPS_SYS(sys_connect
, 3) /* 4170 */
2158 MIPS_SYS(sys_getpeername
, 3)
2159 MIPS_SYS(sys_getsockname
, 3)
2160 MIPS_SYS(sys_getsockopt
, 5)
2161 MIPS_SYS(sys_listen
, 2)
2162 MIPS_SYS(sys_recv
, 4) /* 4175 */
2163 MIPS_SYS(sys_recvfrom
, 6)
2164 MIPS_SYS(sys_recvmsg
, 3)
2165 MIPS_SYS(sys_send
, 4)
2166 MIPS_SYS(sys_sendmsg
, 3)
2167 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2168 MIPS_SYS(sys_setsockopt
, 5)
2169 MIPS_SYS(sys_shutdown
, 2)
2170 MIPS_SYS(sys_socket
, 3)
2171 MIPS_SYS(sys_socketpair
, 4)
2172 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2173 MIPS_SYS(sys_getresuid
, 3)
2174 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2175 MIPS_SYS(sys_poll
, 3)
2176 MIPS_SYS(sys_nfsservctl
, 3)
2177 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2178 MIPS_SYS(sys_getresgid
, 3)
2179 MIPS_SYS(sys_prctl
, 5)
2180 MIPS_SYS(sys_rt_sigreturn
, 0)
2181 MIPS_SYS(sys_rt_sigaction
, 4)
2182 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2183 MIPS_SYS(sys_rt_sigpending
, 2)
2184 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2185 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2186 MIPS_SYS(sys_rt_sigsuspend
, 0)
2187 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2188 MIPS_SYS(sys_pwrite64
, 6)
2189 MIPS_SYS(sys_chown
, 3)
2190 MIPS_SYS(sys_getcwd
, 2)
2191 MIPS_SYS(sys_capget
, 2)
2192 MIPS_SYS(sys_capset
, 2) /* 4205 */
2193 MIPS_SYS(sys_sigaltstack
, 2)
2194 MIPS_SYS(sys_sendfile
, 4)
2195 MIPS_SYS(sys_ni_syscall
, 0)
2196 MIPS_SYS(sys_ni_syscall
, 0)
2197 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2198 MIPS_SYS(sys_truncate64
, 4)
2199 MIPS_SYS(sys_ftruncate64
, 4)
2200 MIPS_SYS(sys_stat64
, 2)
2201 MIPS_SYS(sys_lstat64
, 2)
2202 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2203 MIPS_SYS(sys_pivot_root
, 2)
2204 MIPS_SYS(sys_mincore
, 3)
2205 MIPS_SYS(sys_madvise
, 3)
2206 MIPS_SYS(sys_getdents64
, 3)
2207 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2208 MIPS_SYS(sys_ni_syscall
, 0)
2209 MIPS_SYS(sys_gettid
, 0)
2210 MIPS_SYS(sys_readahead
, 5)
2211 MIPS_SYS(sys_setxattr
, 5)
2212 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2213 MIPS_SYS(sys_fsetxattr
, 5)
2214 MIPS_SYS(sys_getxattr
, 4)
2215 MIPS_SYS(sys_lgetxattr
, 4)
2216 MIPS_SYS(sys_fgetxattr
, 4)
2217 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2218 MIPS_SYS(sys_llistxattr
, 3)
2219 MIPS_SYS(sys_flistxattr
, 3)
2220 MIPS_SYS(sys_removexattr
, 2)
2221 MIPS_SYS(sys_lremovexattr
, 2)
2222 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2223 MIPS_SYS(sys_tkill
, 2)
2224 MIPS_SYS(sys_sendfile64
, 5)
2225 MIPS_SYS(sys_futex
, 6)
2226 MIPS_SYS(sys_sched_setaffinity
, 3)
2227 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2228 MIPS_SYS(sys_io_setup
, 2)
2229 MIPS_SYS(sys_io_destroy
, 1)
2230 MIPS_SYS(sys_io_getevents
, 5)
2231 MIPS_SYS(sys_io_submit
, 3)
2232 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2233 MIPS_SYS(sys_exit_group
, 1)
2234 MIPS_SYS(sys_lookup_dcookie
, 3)
2235 MIPS_SYS(sys_epoll_create
, 1)
2236 MIPS_SYS(sys_epoll_ctl
, 4)
2237 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2238 MIPS_SYS(sys_remap_file_pages
, 5)
2239 MIPS_SYS(sys_set_tid_address
, 1)
2240 MIPS_SYS(sys_restart_syscall
, 0)
2241 MIPS_SYS(sys_fadvise64_64
, 7)
2242 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2243 MIPS_SYS(sys_fstatfs64
, 2)
2244 MIPS_SYS(sys_timer_create
, 3)
2245 MIPS_SYS(sys_timer_settime
, 4)
2246 MIPS_SYS(sys_timer_gettime
, 2)
2247 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2248 MIPS_SYS(sys_timer_delete
, 1)
2249 MIPS_SYS(sys_clock_settime
, 2)
2250 MIPS_SYS(sys_clock_gettime
, 2)
2251 MIPS_SYS(sys_clock_getres
, 2)
2252 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2253 MIPS_SYS(sys_tgkill
, 3)
2254 MIPS_SYS(sys_utimes
, 2)
2255 MIPS_SYS(sys_mbind
, 4)
2256 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2257 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2258 MIPS_SYS(sys_mq_open
, 4)
2259 MIPS_SYS(sys_mq_unlink
, 1)
2260 MIPS_SYS(sys_mq_timedsend
, 5)
2261 MIPS_SYS(sys_mq_timedreceive
, 5)
2262 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2263 MIPS_SYS(sys_mq_getsetattr
, 3)
2264 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2265 MIPS_SYS(sys_waitid
, 4)
2266 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2267 MIPS_SYS(sys_add_key
, 5)
2268 MIPS_SYS(sys_request_key
, 4)
2269 MIPS_SYS(sys_keyctl
, 5)
2270 MIPS_SYS(sys_set_thread_area
, 1)
2271 MIPS_SYS(sys_inotify_init
, 0)
2272 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2273 MIPS_SYS(sys_inotify_rm_watch
, 2)
2274 MIPS_SYS(sys_migrate_pages
, 4)
2275 MIPS_SYS(sys_openat
, 4)
2276 MIPS_SYS(sys_mkdirat
, 3)
2277 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2278 MIPS_SYS(sys_fchownat
, 5)
2279 MIPS_SYS(sys_futimesat
, 3)
2280 MIPS_SYS(sys_fstatat64
, 4)
2281 MIPS_SYS(sys_unlinkat
, 3)
2282 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2283 MIPS_SYS(sys_linkat
, 5)
2284 MIPS_SYS(sys_symlinkat
, 3)
2285 MIPS_SYS(sys_readlinkat
, 4)
2286 MIPS_SYS(sys_fchmodat
, 3)
2287 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2288 MIPS_SYS(sys_pselect6
, 6)
2289 MIPS_SYS(sys_ppoll
, 5)
2290 MIPS_SYS(sys_unshare
, 1)
2291 MIPS_SYS(sys_splice
, 6)
2292 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2293 MIPS_SYS(sys_tee
, 4)
2294 MIPS_SYS(sys_vmsplice
, 4)
2295 MIPS_SYS(sys_move_pages
, 6)
2296 MIPS_SYS(sys_set_robust_list
, 2)
2297 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2298 MIPS_SYS(sys_kexec_load
, 4)
2299 MIPS_SYS(sys_getcpu
, 3)
2300 MIPS_SYS(sys_epoll_pwait
, 6)
2301 MIPS_SYS(sys_ioprio_set
, 3)
2302 MIPS_SYS(sys_ioprio_get
, 2)
2303 MIPS_SYS(sys_utimensat
, 4)
2304 MIPS_SYS(sys_signalfd
, 3)
2305 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2306 MIPS_SYS(sys_eventfd
, 1)
2307 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2308 MIPS_SYS(sys_timerfd_create
, 2)
2309 MIPS_SYS(sys_timerfd_gettime
, 2)
2310 MIPS_SYS(sys_timerfd_settime
, 4)
2311 MIPS_SYS(sys_signalfd4
, 4)
2312 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2313 MIPS_SYS(sys_epoll_create1
, 1)
2314 MIPS_SYS(sys_dup3
, 3)
2315 MIPS_SYS(sys_pipe2
, 2)
2316 MIPS_SYS(sys_inotify_init1
, 1)
2317 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2318 MIPS_SYS(sys_pwritev
, 6)
2319 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2320 MIPS_SYS(sys_perf_event_open
, 5)
2321 MIPS_SYS(sys_accept4
, 4)
2322 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2323 MIPS_SYS(sys_fanotify_init
, 2)
2324 MIPS_SYS(sys_fanotify_mark
, 6)
2325 MIPS_SYS(sys_prlimit64
, 4)
2326 MIPS_SYS(sys_name_to_handle_at
, 5)
2327 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2328 MIPS_SYS(sys_clock_adjtime
, 2)
2329 MIPS_SYS(sys_syncfs
, 1)
2334 static int do_store_exclusive(CPUMIPSState
*env
)
2337 target_ulong page_addr
;
2345 page_addr
= addr
& TARGET_PAGE_MASK
;
2348 flags
= page_get_flags(page_addr
);
2349 if ((flags
& PAGE_READ
) == 0) {
2352 reg
= env
->llreg
& 0x1f;
2353 d
= (env
->llreg
& 0x20) != 0;
2355 segv
= get_user_s64(val
, addr
);
2357 segv
= get_user_s32(val
, addr
);
2360 if (val
!= env
->llval
) {
2361 env
->active_tc
.gpr
[reg
] = 0;
2364 segv
= put_user_u64(env
->llnewval
, addr
);
2366 segv
= put_user_u32(env
->llnewval
, addr
);
2369 env
->active_tc
.gpr
[reg
] = 1;
2376 env
->active_tc
.PC
+= 4;
2389 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2397 info
->si_signo
= TARGET_SIGFPE
;
2399 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2400 queue_signal(env
, info
->si_signo
, &*info
);
2404 info
->si_signo
= TARGET_SIGTRAP
;
2406 queue_signal(env
, info
->si_signo
, &*info
);
2414 void cpu_loop(CPUMIPSState
*env
)
2416 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2417 target_siginfo_t info
;
2420 # ifdef TARGET_ABI_MIPSO32
2421 unsigned int syscall_num
;
2426 trapnr
= cpu_mips_exec(env
);
2430 env
->active_tc
.PC
+= 4;
2431 # ifdef TARGET_ABI_MIPSO32
2432 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2433 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2434 ret
= -TARGET_ENOSYS
;
2438 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2440 nb_args
= mips_syscall_args
[syscall_num
];
2441 sp_reg
= env
->active_tc
.gpr
[29];
2443 /* these arguments are taken from the stack */
2445 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2449 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2453 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2457 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2463 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2464 env
->active_tc
.gpr
[4],
2465 env
->active_tc
.gpr
[5],
2466 env
->active_tc
.gpr
[6],
2467 env
->active_tc
.gpr
[7],
2468 arg5
, arg6
, arg7
, arg8
);
2472 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2473 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2474 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2475 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2476 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2478 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2479 /* Returning from a successful sigreturn syscall.
2480 Avoid clobbering register state. */
2483 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2484 env
->active_tc
.gpr
[7] = 1; /* error flag */
2487 env
->active_tc
.gpr
[7] = 0; /* error flag */
2489 env
->active_tc
.gpr
[2] = ret
;
2495 info
.si_signo
= TARGET_SIGSEGV
;
2497 /* XXX: check env->error_code */
2498 info
.si_code
= TARGET_SEGV_MAPERR
;
2499 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2500 queue_signal(env
, info
.si_signo
, &info
);
2504 info
.si_signo
= TARGET_SIGILL
;
2507 queue_signal(env
, info
.si_signo
, &info
);
2509 case EXCP_INTERRUPT
:
2510 /* just indicate that signals should be handled asap */
2516 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2519 info
.si_signo
= sig
;
2521 info
.si_code
= TARGET_TRAP_BRKPT
;
2522 queue_signal(env
, info
.si_signo
, &info
);
2527 if (do_store_exclusive(env
)) {
2528 info
.si_signo
= TARGET_SIGSEGV
;
2530 info
.si_code
= TARGET_SEGV_MAPERR
;
2531 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2532 queue_signal(env
, info
.si_signo
, &info
);
2536 info
.si_signo
= TARGET_SIGILL
;
2538 info
.si_code
= TARGET_ILL_ILLOPC
;
2539 queue_signal(env
, info
.si_signo
, &info
);
2541 /* The code below was inspired by the MIPS Linux kernel trap
2542 * handling code in arch/mips/kernel/traps.c.
2546 abi_ulong trap_instr
;
2549 if (env
->hflags
& MIPS_HFLAG_M16
) {
2550 if (env
->insn_flags
& ASE_MICROMIPS
) {
2551 /* microMIPS mode */
2552 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2557 if ((trap_instr
>> 10) == 0x11) {
2558 /* 16-bit instruction */
2559 code
= trap_instr
& 0xf;
2561 /* 32-bit instruction */
2564 ret
= get_user_u16(instr_lo
,
2565 env
->active_tc
.PC
+ 2);
2569 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2570 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2571 /* Unfortunately, microMIPS also suffers from
2572 the old assembler bug... */
2573 if (code
>= (1 << 10)) {
2579 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2583 code
= (trap_instr
>> 6) & 0x3f;
2586 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2591 /* As described in the original Linux kernel code, the
2592 * below checks on 'code' are to work around an old
2595 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2596 if (code
>= (1 << 10)) {
2601 if (do_break(env
, &info
, code
) != 0) {
2608 abi_ulong trap_instr
;
2609 unsigned int code
= 0;
2611 if (env
->hflags
& MIPS_HFLAG_M16
) {
2612 /* microMIPS mode */
2615 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2616 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2618 trap_instr
= (instr
[0] << 16) | instr
[1];
2620 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2627 /* The immediate versions don't provide a code. */
2628 if (!(trap_instr
& 0xFC000000)) {
2629 if (env
->hflags
& MIPS_HFLAG_M16
) {
2630 /* microMIPS mode */
2631 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2633 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2637 if (do_break(env
, &info
, code
) != 0) {
2644 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2646 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2649 process_pending_signals(env
);
2654 #ifdef TARGET_OPENRISC
2656 void cpu_loop(CPUOpenRISCState
*env
)
2658 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2662 trapnr
= cpu_exec(env
);
2667 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2671 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2676 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2677 gdbsig
= TARGET_SIGSEGV
;
2680 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2683 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2687 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2691 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2695 qemu_log("\nTLB miss\n");
2698 qemu_log("\nRange\n");
2702 env
->pc
+= 4; /* 0xc00; */
2703 env
->gpr
[11] = do_syscall(env
,
2704 env
->gpr
[11], /* return value */
2705 env
->gpr
[3], /* r3 - r7 are params */
2713 qemu_log("\nFloating point error\n");
2716 qemu_log("\nTrap\n");
2723 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2725 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2726 gdbsig
= TARGET_SIGILL
;
2730 gdb_handlesig(cs
, gdbsig
);
2731 if (gdbsig
!= TARGET_SIGTRAP
) {
2736 process_pending_signals(env
);
2740 #endif /* TARGET_OPENRISC */
2743 void cpu_loop(CPUSH4State
*env
)
2745 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2747 target_siginfo_t info
;
2750 trapnr
= cpu_sh4_exec (env
);
2755 ret
= do_syscall(env
,
2764 env
->gregs
[0] = ret
;
2766 case EXCP_INTERRUPT
:
2767 /* just indicate that signals should be handled asap */
2773 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2776 info
.si_signo
= sig
;
2778 info
.si_code
= TARGET_TRAP_BRKPT
;
2779 queue_signal(env
, info
.si_signo
, &info
);
2785 info
.si_signo
= SIGSEGV
;
2787 info
.si_code
= TARGET_SEGV_MAPERR
;
2788 info
._sifields
._sigfault
._addr
= env
->tea
;
2789 queue_signal(env
, info
.si_signo
, &info
);
2793 printf ("Unhandled trap: 0x%x\n", trapnr
);
2794 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2797 process_pending_signals (env
);
2803 void cpu_loop(CPUCRISState
*env
)
2805 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2807 target_siginfo_t info
;
2810 trapnr
= cpu_cris_exec (env
);
2814 info
.si_signo
= SIGSEGV
;
2816 /* XXX: check env->error_code */
2817 info
.si_code
= TARGET_SEGV_MAPERR
;
2818 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2819 queue_signal(env
, info
.si_signo
, &info
);
2822 case EXCP_INTERRUPT
:
2823 /* just indicate that signals should be handled asap */
2826 ret
= do_syscall(env
,
2835 env
->regs
[10] = ret
;
2841 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2844 info
.si_signo
= sig
;
2846 info
.si_code
= TARGET_TRAP_BRKPT
;
2847 queue_signal(env
, info
.si_signo
, &info
);
2852 printf ("Unhandled trap: 0x%x\n", trapnr
);
2853 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2856 process_pending_signals (env
);
2861 #ifdef TARGET_MICROBLAZE
2862 void cpu_loop(CPUMBState
*env
)
2864 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2866 target_siginfo_t info
;
2869 trapnr
= cpu_mb_exec (env
);
2873 info
.si_signo
= SIGSEGV
;
2875 /* XXX: check env->error_code */
2876 info
.si_code
= TARGET_SEGV_MAPERR
;
2877 info
._sifields
._sigfault
._addr
= 0;
2878 queue_signal(env
, info
.si_signo
, &info
);
2881 case EXCP_INTERRUPT
:
2882 /* just indicate that signals should be handled asap */
2885 /* Return address is 4 bytes after the call. */
2887 env
->sregs
[SR_PC
] = env
->regs
[14];
2888 ret
= do_syscall(env
,
2900 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2901 if (env
->iflags
& D_FLAG
) {
2902 env
->sregs
[SR_ESR
] |= 1 << 12;
2903 env
->sregs
[SR_PC
] -= 4;
2904 /* FIXME: if branch was immed, replay the imm as well. */
2907 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2909 switch (env
->sregs
[SR_ESR
] & 31) {
2910 case ESR_EC_DIVZERO
:
2911 info
.si_signo
= SIGFPE
;
2913 info
.si_code
= TARGET_FPE_FLTDIV
;
2914 info
._sifields
._sigfault
._addr
= 0;
2915 queue_signal(env
, info
.si_signo
, &info
);
2918 info
.si_signo
= SIGFPE
;
2920 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2921 info
.si_code
= TARGET_FPE_FLTINV
;
2923 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2924 info
.si_code
= TARGET_FPE_FLTDIV
;
2926 info
._sifields
._sigfault
._addr
= 0;
2927 queue_signal(env
, info
.si_signo
, &info
);
2930 printf ("Unhandled hw-exception: 0x%x\n",
2931 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2932 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2941 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2944 info
.si_signo
= sig
;
2946 info
.si_code
= TARGET_TRAP_BRKPT
;
2947 queue_signal(env
, info
.si_signo
, &info
);
2952 printf ("Unhandled trap: 0x%x\n", trapnr
);
2953 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2956 process_pending_signals (env
);
2963 void cpu_loop(CPUM68KState
*env
)
2965 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2968 target_siginfo_t info
;
2969 TaskState
*ts
= cs
->opaque
;
2972 trapnr
= cpu_m68k_exec(env
);
2976 if (ts
->sim_syscalls
) {
2978 nr
= lduw(env
->pc
+ 2);
2980 do_m68k_simcall(env
, nr
);
2986 case EXCP_HALT_INSN
:
2987 /* Semihosing syscall. */
2989 do_m68k_semihosting(env
, env
->dregs
[0]);
2993 case EXCP_UNSUPPORTED
:
2995 info
.si_signo
= SIGILL
;
2997 info
.si_code
= TARGET_ILL_ILLOPN
;
2998 info
._sifields
._sigfault
._addr
= env
->pc
;
2999 queue_signal(env
, info
.si_signo
, &info
);
3003 ts
->sim_syscalls
= 0;
3006 env
->dregs
[0] = do_syscall(env
,
3017 case EXCP_INTERRUPT
:
3018 /* just indicate that signals should be handled asap */
3022 info
.si_signo
= SIGSEGV
;
3024 /* XXX: check env->error_code */
3025 info
.si_code
= TARGET_SEGV_MAPERR
;
3026 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3027 queue_signal(env
, info
.si_signo
, &info
);
3034 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3037 info
.si_signo
= sig
;
3039 info
.si_code
= TARGET_TRAP_BRKPT
;
3040 queue_signal(env
, info
.si_signo
, &info
);
3045 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3047 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3050 process_pending_signals(env
);
3053 #endif /* TARGET_M68K */
3056 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3058 target_ulong addr
, val
, tmp
;
3059 target_siginfo_t info
;
3062 addr
= env
->lock_addr
;
3063 tmp
= env
->lock_st_addr
;
3064 env
->lock_addr
= -1;
3065 env
->lock_st_addr
= 0;
3071 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3075 if (val
== env
->lock_value
) {
3077 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3094 info
.si_signo
= TARGET_SIGSEGV
;
3096 info
.si_code
= TARGET_SEGV_MAPERR
;
3097 info
._sifields
._sigfault
._addr
= addr
;
3098 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3101 void cpu_loop(CPUAlphaState
*env
)
3103 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3105 target_siginfo_t info
;
3109 trapnr
= cpu_alpha_exec (env
);
3111 /* All of the traps imply a transition through PALcode, which
3112 implies an REI instruction has been executed. Which means
3113 that the intr_flag should be cleared. */
3118 fprintf(stderr
, "Reset requested. Exit\n");
3122 fprintf(stderr
, "Machine check exception. Exit\n");
3125 case EXCP_SMP_INTERRUPT
:
3126 case EXCP_CLK_INTERRUPT
:
3127 case EXCP_DEV_INTERRUPT
:
3128 fprintf(stderr
, "External interrupt. Exit\n");
3132 env
->lock_addr
= -1;
3133 info
.si_signo
= TARGET_SIGSEGV
;
3135 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3136 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3137 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3138 queue_signal(env
, info
.si_signo
, &info
);
3141 env
->lock_addr
= -1;
3142 info
.si_signo
= TARGET_SIGBUS
;
3144 info
.si_code
= TARGET_BUS_ADRALN
;
3145 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3146 queue_signal(env
, info
.si_signo
, &info
);
3150 env
->lock_addr
= -1;
3151 info
.si_signo
= TARGET_SIGILL
;
3153 info
.si_code
= TARGET_ILL_ILLOPC
;
3154 info
._sifields
._sigfault
._addr
= env
->pc
;
3155 queue_signal(env
, info
.si_signo
, &info
);
3158 env
->lock_addr
= -1;
3159 info
.si_signo
= TARGET_SIGFPE
;
3161 info
.si_code
= TARGET_FPE_FLTINV
;
3162 info
._sifields
._sigfault
._addr
= env
->pc
;
3163 queue_signal(env
, info
.si_signo
, &info
);
3166 /* No-op. Linux simply re-enables the FPU. */
3169 env
->lock_addr
= -1;
3170 switch (env
->error_code
) {
3173 info
.si_signo
= TARGET_SIGTRAP
;
3175 info
.si_code
= TARGET_TRAP_BRKPT
;
3176 info
._sifields
._sigfault
._addr
= env
->pc
;
3177 queue_signal(env
, info
.si_signo
, &info
);
3181 info
.si_signo
= TARGET_SIGTRAP
;
3184 info
._sifields
._sigfault
._addr
= env
->pc
;
3185 queue_signal(env
, info
.si_signo
, &info
);
3189 trapnr
= env
->ir
[IR_V0
];
3190 sysret
= do_syscall(env
, trapnr
,
3191 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3192 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3193 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3195 if (trapnr
== TARGET_NR_sigreturn
3196 || trapnr
== TARGET_NR_rt_sigreturn
) {
3199 /* Syscall writes 0 to V0 to bypass error check, similar
3200 to how this is handled internal to Linux kernel.
3201 (Ab)use trapnr temporarily as boolean indicating error. */
3202 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3203 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3204 env
->ir
[IR_A3
] = trapnr
;
3208 /* ??? We can probably elide the code using page_unprotect
3209 that is checking for self-modifying code. Instead we
3210 could simply call tb_flush here. Until we work out the
3211 changes required to turn off the extra write protection,
3212 this can be a no-op. */
3216 /* Handled in the translator for usermode. */
3220 /* Handled in the translator for usermode. */
3224 info
.si_signo
= TARGET_SIGFPE
;
3225 switch (env
->ir
[IR_A0
]) {
3226 case TARGET_GEN_INTOVF
:
3227 info
.si_code
= TARGET_FPE_INTOVF
;
3229 case TARGET_GEN_INTDIV
:
3230 info
.si_code
= TARGET_FPE_INTDIV
;
3232 case TARGET_GEN_FLTOVF
:
3233 info
.si_code
= TARGET_FPE_FLTOVF
;
3235 case TARGET_GEN_FLTUND
:
3236 info
.si_code
= TARGET_FPE_FLTUND
;
3238 case TARGET_GEN_FLTINV
:
3239 info
.si_code
= TARGET_FPE_FLTINV
;
3241 case TARGET_GEN_FLTINE
:
3242 info
.si_code
= TARGET_FPE_FLTRES
;
3244 case TARGET_GEN_ROPRAND
:
3248 info
.si_signo
= TARGET_SIGTRAP
;
3253 info
._sifields
._sigfault
._addr
= env
->pc
;
3254 queue_signal(env
, info
.si_signo
, &info
);
3261 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3262 if (info
.si_signo
) {
3263 env
->lock_addr
= -1;
3265 info
.si_code
= TARGET_TRAP_BRKPT
;
3266 queue_signal(env
, info
.si_signo
, &info
);
3271 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3273 case EXCP_INTERRUPT
:
3274 /* Just indicate that signals should be handled asap. */
3277 printf ("Unhandled trap: 0x%x\n", trapnr
);
3278 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3281 process_pending_signals (env
);
3284 #endif /* TARGET_ALPHA */
3287 void cpu_loop(CPUS390XState
*env
)
3289 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3291 target_siginfo_t info
;
3295 trapnr
= cpu_s390x_exec(env
);
3297 case EXCP_INTERRUPT
:
3298 /* Just indicate that signals should be handled asap. */
3302 n
= env
->int_svc_code
;
3304 /* syscalls > 255 */
3307 env
->psw
.addr
+= env
->int_svc_ilen
;
3308 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3309 env
->regs
[4], env
->regs
[5],
3310 env
->regs
[6], env
->regs
[7], 0, 0);
3314 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3316 n
= TARGET_TRAP_BRKPT
;
3321 n
= env
->int_pgm_code
;
3324 case PGM_PRIVILEGED
:
3326 n
= TARGET_ILL_ILLOPC
;
3328 case PGM_PROTECTION
:
3329 case PGM_ADDRESSING
:
3331 /* XXX: check env->error_code */
3332 n
= TARGET_SEGV_MAPERR
;
3333 addr
= env
->__excp_addr
;
3336 case PGM_SPECIFICATION
:
3337 case PGM_SPECIAL_OP
:
3341 n
= TARGET_ILL_ILLOPN
;
3344 case PGM_FIXPT_OVERFLOW
:
3346 n
= TARGET_FPE_INTOVF
;
3348 case PGM_FIXPT_DIVIDE
:
3350 n
= TARGET_FPE_INTDIV
;
3354 n
= (env
->fpc
>> 8) & 0xff;
3356 /* compare-and-trap */
3359 /* An IEEE exception, simulated or otherwise. */
3361 n
= TARGET_FPE_FLTINV
;
3362 } else if (n
& 0x40) {
3363 n
= TARGET_FPE_FLTDIV
;
3364 } else if (n
& 0x20) {
3365 n
= TARGET_FPE_FLTOVF
;
3366 } else if (n
& 0x10) {
3367 n
= TARGET_FPE_FLTUND
;
3368 } else if (n
& 0x08) {
3369 n
= TARGET_FPE_FLTRES
;
3371 /* ??? Quantum exception; BFP, DFP error. */
3379 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3380 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3386 addr
= env
->psw
.addr
;
3388 info
.si_signo
= sig
;
3391 info
._sifields
._sigfault
._addr
= addr
;
3392 queue_signal(env
, info
.si_signo
, &info
);
3396 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3397 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3400 process_pending_signals (env
);
3404 #endif /* TARGET_S390X */
3406 THREAD CPUState
*thread_cpu
;
3408 void task_settid(TaskState
*ts
)
3410 if (ts
->ts_tid
== 0) {
3411 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3415 void stop_all_tasks(void)
3418 * We trust that when using NPTL, start_exclusive()
3419 * handles thread stopping correctly.
3424 /* Assumes contents are already zeroed. */
3425 void init_task_state(TaskState
*ts
)
3430 ts
->first_free
= ts
->sigqueue_table
;
3431 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3432 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3434 ts
->sigqueue_table
[i
].next
= NULL
;
3437 CPUArchState
*cpu_copy(CPUArchState
*env
)
3439 CPUState
*cpu
= ENV_GET_CPU(env
);
3440 CPUArchState
*new_env
= cpu_init(cpu_model
);
3441 CPUState
*new_cpu
= ENV_GET_CPU(new_env
);
3442 #if defined(TARGET_HAS_ICE)
3447 /* Reset non arch specific state */
3450 memcpy(new_env
, env
, sizeof(CPUArchState
));
3452 /* Clone all break/watchpoints.
3453 Note: Once we support ptrace with hw-debug register access, make sure
3454 BP_CPU break/watchpoints are handled correctly on clone. */
3455 QTAILQ_INIT(&cpu
->breakpoints
);
3456 QTAILQ_INIT(&cpu
->watchpoints
);
3457 #if defined(TARGET_HAS_ICE)
3458 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3459 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3461 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3462 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, (~wp
->len_mask
) + 1,
3470 static void handle_arg_help(const char *arg
)
3475 static void handle_arg_log(const char *arg
)
3479 mask
= qemu_str_to_log_mask(arg
);
3481 qemu_print_log_usage(stdout
);
3487 static void handle_arg_log_filename(const char *arg
)
3489 qemu_set_log_filename(arg
);
3492 static void handle_arg_set_env(const char *arg
)
3494 char *r
, *p
, *token
;
3495 r
= p
= strdup(arg
);
3496 while ((token
= strsep(&p
, ",")) != NULL
) {
3497 if (envlist_setenv(envlist
, token
) != 0) {
3504 static void handle_arg_unset_env(const char *arg
)
3506 char *r
, *p
, *token
;
3507 r
= p
= strdup(arg
);
3508 while ((token
= strsep(&p
, ",")) != NULL
) {
3509 if (envlist_unsetenv(envlist
, token
) != 0) {
3516 static void handle_arg_argv0(const char *arg
)
3518 argv0
= strdup(arg
);
3521 static void handle_arg_stack_size(const char *arg
)
3524 guest_stack_size
= strtoul(arg
, &p
, 0);
3525 if (guest_stack_size
== 0) {
3530 guest_stack_size
*= 1024 * 1024;
3531 } else if (*p
== 'k' || *p
== 'K') {
3532 guest_stack_size
*= 1024;
3536 static void handle_arg_ld_prefix(const char *arg
)
3538 interp_prefix
= strdup(arg
);
3541 static void handle_arg_pagesize(const char *arg
)
3543 qemu_host_page_size
= atoi(arg
);
3544 if (qemu_host_page_size
== 0 ||
3545 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3546 fprintf(stderr
, "page size must be a power of two\n");
3551 static void handle_arg_gdb(const char *arg
)
3553 gdbstub_port
= atoi(arg
);
3556 static void handle_arg_uname(const char *arg
)
3558 qemu_uname_release
= strdup(arg
);
3561 static void handle_arg_cpu(const char *arg
)
3563 cpu_model
= strdup(arg
);
3564 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3565 /* XXX: implement xxx_cpu_list for targets that still miss it */
3566 #if defined(cpu_list)
3567 cpu_list(stdout
, &fprintf
);
3573 #if defined(CONFIG_USE_GUEST_BASE)
3574 static void handle_arg_guest_base(const char *arg
)
3576 guest_base
= strtol(arg
, NULL
, 0);
3577 have_guest_base
= 1;
3580 static void handle_arg_reserved_va(const char *arg
)
3584 reserved_va
= strtoul(arg
, &p
, 0);
3598 unsigned long unshifted
= reserved_va
;
3600 reserved_va
<<= shift
;
3601 if (((reserved_va
>> shift
) != unshifted
)
3602 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3603 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3606 fprintf(stderr
, "Reserved virtual address too big\n");
3611 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3617 static void handle_arg_singlestep(const char *arg
)
3622 static void handle_arg_strace(const char *arg
)
3627 static void handle_arg_version(const char *arg
)
3629 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3630 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3634 struct qemu_argument
{
3638 void (*handle_opt
)(const char *arg
);
3639 const char *example
;
3643 static const struct qemu_argument arg_table
[] = {
3644 {"h", "", false, handle_arg_help
,
3645 "", "print this help"},
3646 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3647 "port", "wait gdb connection to 'port'"},
3648 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3649 "path", "set the elf interpreter prefix to 'path'"},
3650 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3651 "size", "set the stack size to 'size' bytes"},
3652 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3653 "model", "select CPU (-cpu help for list)"},
3654 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3655 "var=value", "sets targets environment variable (see below)"},
3656 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3657 "var", "unsets targets environment variable (see below)"},
3658 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3659 "argv0", "forces target process argv[0] to be 'argv0'"},
3660 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3661 "uname", "set qemu uname release string to 'uname'"},
3662 #if defined(CONFIG_USE_GUEST_BASE)
3663 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3664 "address", "set guest_base address to 'address'"},
3665 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3666 "size", "reserve 'size' bytes for guest virtual address space"},
3668 {"d", "QEMU_LOG", true, handle_arg_log
,
3669 "item[,...]", "enable logging of specified items "
3670 "(use '-d help' for a list of items)"},
3671 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3672 "logfile", "write logs to 'logfile' (default stderr)"},
3673 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3674 "pagesize", "set the host page size to 'pagesize'"},
3675 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3676 "", "run in singlestep mode"},
3677 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3678 "", "log system calls"},
3679 {"version", "QEMU_VERSION", false, handle_arg_version
,
3680 "", "display version information and exit"},
3681 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3684 static void usage(void)
3686 const struct qemu_argument
*arginfo
;
3690 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3691 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3693 "Options and associated environment variables:\n"
3696 /* Calculate column widths. We must always have at least enough space
3697 * for the column header.
3699 maxarglen
= strlen("Argument");
3700 maxenvlen
= strlen("Env-variable");
3702 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3703 int arglen
= strlen(arginfo
->argv
);
3704 if (arginfo
->has_arg
) {
3705 arglen
+= strlen(arginfo
->example
) + 1;
3707 if (strlen(arginfo
->env
) > maxenvlen
) {
3708 maxenvlen
= strlen(arginfo
->env
);
3710 if (arglen
> maxarglen
) {
3715 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3716 maxenvlen
, "Env-variable");
3718 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3719 if (arginfo
->has_arg
) {
3720 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3721 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3722 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3724 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3725 maxenvlen
, arginfo
->env
,
3732 "QEMU_LD_PREFIX = %s\n"
3733 "QEMU_STACK_SIZE = %ld byte\n",
3738 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3739 "QEMU_UNSET_ENV environment variables to set and unset\n"
3740 "environment variables for the target process.\n"
3741 "It is possible to provide several variables by separating them\n"
3742 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3743 "provide the -E and -U options multiple times.\n"
3744 "The following lines are equivalent:\n"
3745 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3746 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3747 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3748 "Note that if you provide several changes to a single variable\n"
3749 "the last change will stay in effect.\n");
3754 static int parse_args(int argc
, char **argv
)
3758 const struct qemu_argument
*arginfo
;
3760 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3761 if (arginfo
->env
== NULL
) {
3765 r
= getenv(arginfo
->env
);
3767 arginfo
->handle_opt(r
);
3773 if (optind
>= argc
) {
3782 if (!strcmp(r
, "-")) {
3786 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3787 if (!strcmp(r
, arginfo
->argv
)) {
3788 if (arginfo
->has_arg
) {
3789 if (optind
>= argc
) {
3792 arginfo
->handle_opt(argv
[optind
]);
3795 arginfo
->handle_opt(NULL
);
3801 /* no option matched the current argv */
3802 if (arginfo
->handle_opt
== NULL
) {
3807 if (optind
>= argc
) {
3811 filename
= argv
[optind
];
3812 exec_path
= argv
[optind
];
3817 int main(int argc
, char **argv
, char **envp
)
3819 struct target_pt_regs regs1
, *regs
= ®s1
;
3820 struct image_info info1
, *info
= &info1
;
3821 struct linux_binprm bprm
;
3826 char **target_environ
, **wrk
;
3833 module_call_init(MODULE_INIT_QOM
);
3835 if ((envlist
= envlist_create()) == NULL
) {
3836 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3840 /* add current environment into the list */
3841 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3842 (void) envlist_setenv(envlist
, *wrk
);
3845 /* Read the stack limit from the kernel. If it's "unlimited",
3846 then we can do little else besides use the default. */
3849 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3850 && lim
.rlim_cur
!= RLIM_INFINITY
3851 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3852 guest_stack_size
= lim
.rlim_cur
;
3857 #if defined(cpudef_setup)
3858 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3861 optind
= parse_args(argc
, argv
);
3864 memset(regs
, 0, sizeof(struct target_pt_regs
));
3866 /* Zero out image_info */
3867 memset(info
, 0, sizeof(struct image_info
));
3869 memset(&bprm
, 0, sizeof (bprm
));
3871 /* Scan interp_prefix dir for replacement files. */
3872 init_paths(interp_prefix
);
3874 init_qemu_uname_release();
3876 if (cpu_model
== NULL
) {
3877 #if defined(TARGET_I386)
3878 #ifdef TARGET_X86_64
3879 cpu_model
= "qemu64";
3881 cpu_model
= "qemu32";
3883 #elif defined(TARGET_ARM)
3885 #elif defined(TARGET_UNICORE32)
3887 #elif defined(TARGET_M68K)
3889 #elif defined(TARGET_SPARC)
3890 #ifdef TARGET_SPARC64
3891 cpu_model
= "TI UltraSparc II";
3893 cpu_model
= "Fujitsu MB86904";
3895 #elif defined(TARGET_MIPS)
3896 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3901 #elif defined TARGET_OPENRISC
3902 cpu_model
= "or1200";
3903 #elif defined(TARGET_PPC)
3904 # ifdef TARGET_PPC64
3905 cpu_model
= "POWER7";
3914 cpu_exec_init_all();
3915 /* NOTE: we need to init the CPU at this stage to get
3916 qemu_host_page_size */
3917 env
= cpu_init(cpu_model
);
3919 fprintf(stderr
, "Unable to find CPU definition\n");
3922 cpu
= ENV_GET_CPU(env
);
3927 if (getenv("QEMU_STRACE")) {
3931 target_environ
= envlist_to_environ(envlist
, NULL
);
3932 envlist_free(envlist
);
3934 #if defined(CONFIG_USE_GUEST_BASE)
3936 * Now that page sizes are configured in cpu_init() we can do
3937 * proper page alignment for guest_base.
3939 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3941 if (reserved_va
|| have_guest_base
) {
3942 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3944 if (guest_base
== (unsigned long)-1) {
3945 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3946 "space for use as guest address space (check your virtual "
3947 "memory ulimit setting or reserve less using -R option)\n",
3953 mmap_next_start
= reserved_va
;
3956 #endif /* CONFIG_USE_GUEST_BASE */
3959 * Read in mmap_min_addr kernel parameter. This value is used
3960 * When loading the ELF image to determine whether guest_base
3961 * is needed. It is also used in mmap_find_vma.
3966 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3968 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3969 mmap_min_addr
= tmp
;
3970 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
3977 * Prepare copy of argv vector for target.
3979 target_argc
= argc
- optind
;
3980 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
3981 if (target_argv
== NULL
) {
3982 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
3987 * If argv0 is specified (using '-0' switch) we replace
3988 * argv[0] pointer with the given one.
3991 if (argv0
!= NULL
) {
3992 target_argv
[i
++] = strdup(argv0
);
3994 for (; i
< target_argc
; i
++) {
3995 target_argv
[i
] = strdup(argv
[optind
+ i
]);
3997 target_argv
[target_argc
] = NULL
;
3999 ts
= g_malloc0 (sizeof(TaskState
));
4000 init_task_state(ts
);
4001 /* build Task State */
4007 execfd
= qemu_getauxval(AT_EXECFD
);
4009 execfd
= open(filename
, O_RDONLY
);
4011 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4016 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4019 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4023 for (wrk
= target_environ
; *wrk
; wrk
++) {
4027 free(target_environ
);
4029 if (qemu_log_enabled()) {
4030 #if defined(CONFIG_USE_GUEST_BASE)
4031 qemu_log("guest_base 0x%lx\n", guest_base
);
4035 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4036 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4037 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4039 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4041 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4042 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4044 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4045 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4048 target_set_brk(info
->brk
);
4052 #if defined(CONFIG_USE_GUEST_BASE)
4053 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4054 generating the prologue until now so that the prologue can take
4055 the real value of GUEST_BASE into account. */
4056 tcg_prologue_init(&tcg_ctx
);
4059 #if defined(TARGET_I386)
4060 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4061 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4062 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4063 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4064 env
->hflags
|= HF_OSFXSR_MASK
;
4066 #ifndef TARGET_ABI32
4067 /* enable 64 bit mode if possible */
4068 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4069 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4072 env
->cr
[4] |= CR4_PAE_MASK
;
4073 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4074 env
->hflags
|= HF_LMA_MASK
;
4077 /* flags setup : we activate the IRQs by default as in user mode */
4078 env
->eflags
|= IF_MASK
;
4080 /* linux register setup */
4081 #ifndef TARGET_ABI32
4082 env
->regs
[R_EAX
] = regs
->rax
;
4083 env
->regs
[R_EBX
] = regs
->rbx
;
4084 env
->regs
[R_ECX
] = regs
->rcx
;
4085 env
->regs
[R_EDX
] = regs
->rdx
;
4086 env
->regs
[R_ESI
] = regs
->rsi
;
4087 env
->regs
[R_EDI
] = regs
->rdi
;
4088 env
->regs
[R_EBP
] = regs
->rbp
;
4089 env
->regs
[R_ESP
] = regs
->rsp
;
4090 env
->eip
= regs
->rip
;
4092 env
->regs
[R_EAX
] = regs
->eax
;
4093 env
->regs
[R_EBX
] = regs
->ebx
;
4094 env
->regs
[R_ECX
] = regs
->ecx
;
4095 env
->regs
[R_EDX
] = regs
->edx
;
4096 env
->regs
[R_ESI
] = regs
->esi
;
4097 env
->regs
[R_EDI
] = regs
->edi
;
4098 env
->regs
[R_EBP
] = regs
->ebp
;
4099 env
->regs
[R_ESP
] = regs
->esp
;
4100 env
->eip
= regs
->eip
;
4103 /* linux interrupt setup */
4104 #ifndef TARGET_ABI32
4105 env
->idt
.limit
= 511;
4107 env
->idt
.limit
= 255;
4109 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4110 PROT_READ
|PROT_WRITE
,
4111 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4112 idt_table
= g2h(env
->idt
.base
);
4135 /* linux segment setup */
4137 uint64_t *gdt_table
;
4138 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4139 PROT_READ
|PROT_WRITE
,
4140 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4141 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4142 gdt_table
= g2h(env
->gdt
.base
);
4144 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4145 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4146 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4148 /* 64 bit code segment */
4149 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4150 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4152 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4154 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4155 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4156 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4158 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4159 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4161 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4162 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4163 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4164 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4165 /* This hack makes Wine work... */
4166 env
->segs
[R_FS
].selector
= 0;
4168 cpu_x86_load_seg(env
, R_DS
, 0);
4169 cpu_x86_load_seg(env
, R_ES
, 0);
4170 cpu_x86_load_seg(env
, R_FS
, 0);
4171 cpu_x86_load_seg(env
, R_GS
, 0);
4173 #elif defined(TARGET_AARCH64)
4177 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4179 "The selected ARM CPU does not support 64 bit mode\n");
4183 for (i
= 0; i
< 31; i
++) {
4184 env
->xregs
[i
] = regs
->regs
[i
];
4187 env
->xregs
[31] = regs
->sp
;
4189 #elif defined(TARGET_ARM)
4192 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4193 for(i
= 0; i
< 16; i
++) {
4194 env
->regs
[i
] = regs
->uregs
[i
];
4197 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4198 && (info
->elf_flags
& EF_ARM_BE8
)) {
4199 env
->bswap_code
= 1;
4202 #elif defined(TARGET_UNICORE32)
4205 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4206 for (i
= 0; i
< 32; i
++) {
4207 env
->regs
[i
] = regs
->uregs
[i
];
4210 #elif defined(TARGET_SPARC)
4214 env
->npc
= regs
->npc
;
4216 for(i
= 0; i
< 8; i
++)
4217 env
->gregs
[i
] = regs
->u_regs
[i
];
4218 for(i
= 0; i
< 8; i
++)
4219 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4221 #elif defined(TARGET_PPC)
4225 #if defined(TARGET_PPC64)
4226 #if defined(TARGET_ABI32)
4227 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4229 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4232 env
->nip
= regs
->nip
;
4233 for(i
= 0; i
< 32; i
++) {
4234 env
->gpr
[i
] = regs
->gpr
[i
];
4237 #elif defined(TARGET_M68K)
4240 env
->dregs
[0] = regs
->d0
;
4241 env
->dregs
[1] = regs
->d1
;
4242 env
->dregs
[2] = regs
->d2
;
4243 env
->dregs
[3] = regs
->d3
;
4244 env
->dregs
[4] = regs
->d4
;
4245 env
->dregs
[5] = regs
->d5
;
4246 env
->dregs
[6] = regs
->d6
;
4247 env
->dregs
[7] = regs
->d7
;
4248 env
->aregs
[0] = regs
->a0
;
4249 env
->aregs
[1] = regs
->a1
;
4250 env
->aregs
[2] = regs
->a2
;
4251 env
->aregs
[3] = regs
->a3
;
4252 env
->aregs
[4] = regs
->a4
;
4253 env
->aregs
[5] = regs
->a5
;
4254 env
->aregs
[6] = regs
->a6
;
4255 env
->aregs
[7] = regs
->usp
;
4257 ts
->sim_syscalls
= 1;
4259 #elif defined(TARGET_MICROBLAZE)
4261 env
->regs
[0] = regs
->r0
;
4262 env
->regs
[1] = regs
->r1
;
4263 env
->regs
[2] = regs
->r2
;
4264 env
->regs
[3] = regs
->r3
;
4265 env
->regs
[4] = regs
->r4
;
4266 env
->regs
[5] = regs
->r5
;
4267 env
->regs
[6] = regs
->r6
;
4268 env
->regs
[7] = regs
->r7
;
4269 env
->regs
[8] = regs
->r8
;
4270 env
->regs
[9] = regs
->r9
;
4271 env
->regs
[10] = regs
->r10
;
4272 env
->regs
[11] = regs
->r11
;
4273 env
->regs
[12] = regs
->r12
;
4274 env
->regs
[13] = regs
->r13
;
4275 env
->regs
[14] = regs
->r14
;
4276 env
->regs
[15] = regs
->r15
;
4277 env
->regs
[16] = regs
->r16
;
4278 env
->regs
[17] = regs
->r17
;
4279 env
->regs
[18] = regs
->r18
;
4280 env
->regs
[19] = regs
->r19
;
4281 env
->regs
[20] = regs
->r20
;
4282 env
->regs
[21] = regs
->r21
;
4283 env
->regs
[22] = regs
->r22
;
4284 env
->regs
[23] = regs
->r23
;
4285 env
->regs
[24] = regs
->r24
;
4286 env
->regs
[25] = regs
->r25
;
4287 env
->regs
[26] = regs
->r26
;
4288 env
->regs
[27] = regs
->r27
;
4289 env
->regs
[28] = regs
->r28
;
4290 env
->regs
[29] = regs
->r29
;
4291 env
->regs
[30] = regs
->r30
;
4292 env
->regs
[31] = regs
->r31
;
4293 env
->sregs
[SR_PC
] = regs
->pc
;
4295 #elif defined(TARGET_MIPS)
4299 for(i
= 0; i
< 32; i
++) {
4300 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4302 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4303 if (regs
->cp0_epc
& 1) {
4304 env
->hflags
|= MIPS_HFLAG_M16
;
4307 #elif defined(TARGET_OPENRISC)
4311 for (i
= 0; i
< 32; i
++) {
4312 env
->gpr
[i
] = regs
->gpr
[i
];
4318 #elif defined(TARGET_SH4)
4322 for(i
= 0; i
< 16; i
++) {
4323 env
->gregs
[i
] = regs
->regs
[i
];
4327 #elif defined(TARGET_ALPHA)
4331 for(i
= 0; i
< 28; i
++) {
4332 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4334 env
->ir
[IR_SP
] = regs
->usp
;
4337 #elif defined(TARGET_CRIS)
4339 env
->regs
[0] = regs
->r0
;
4340 env
->regs
[1] = regs
->r1
;
4341 env
->regs
[2] = regs
->r2
;
4342 env
->regs
[3] = regs
->r3
;
4343 env
->regs
[4] = regs
->r4
;
4344 env
->regs
[5] = regs
->r5
;
4345 env
->regs
[6] = regs
->r6
;
4346 env
->regs
[7] = regs
->r7
;
4347 env
->regs
[8] = regs
->r8
;
4348 env
->regs
[9] = regs
->r9
;
4349 env
->regs
[10] = regs
->r10
;
4350 env
->regs
[11] = regs
->r11
;
4351 env
->regs
[12] = regs
->r12
;
4352 env
->regs
[13] = regs
->r13
;
4353 env
->regs
[14] = info
->start_stack
;
4354 env
->regs
[15] = regs
->acr
;
4355 env
->pc
= regs
->erp
;
4357 #elif defined(TARGET_S390X)
4360 for (i
= 0; i
< 16; i
++) {
4361 env
->regs
[i
] = regs
->gprs
[i
];
4363 env
->psw
.mask
= regs
->psw
.mask
;
4364 env
->psw
.addr
= regs
->psw
.addr
;
4367 #error unsupported target CPU
4370 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4371 ts
->stack_base
= info
->start_stack
;
4372 ts
->heap_base
= info
->brk
;
4373 /* This will be filled in on the first SYS_HEAPINFO call. */
4378 if (gdbserver_start(gdbstub_port
) < 0) {
4379 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4383 gdb_handlesig(cpu
, 0);