4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
27 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
31 #include "qemu/timer.h"
32 #include "qemu/envlist.h"
42 static const char *cpu_model
;
43 unsigned long mmap_min_addr
;
44 #if defined(CONFIG_USE_GUEST_BASE)
47 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
49 * When running 32-on-64 we should make sure we can fit all of the possible
50 * guest address space into a contiguous chunk of virtual host memory.
52 * This way we will never overlap with our own libraries or binaries or stack
53 * or anything else that QEMU maps.
56 /* MIPS only supports 31 bits of virtual address space for user space */
57 uintptr_t reserved_va
= 0x77000000;
59 uintptr_t reserved_va
= 0xf7000000;
62 uintptr_t reserved_va
;
66 static void usage(void);
68 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
69 const char *qemu_uname_release
;
71 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
72 we allocate a bigger stack. Need a better solution, for example
73 by remapping the process stack directly at the right place */
74 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
76 void gemu_log(const char *fmt
, ...)
81 vfprintf(stderr
, fmt
, ap
);
85 #if defined(TARGET_I386)
86 int cpu_get_pic_interrupt(CPUX86State
*env
)
92 /***********************************************************/
93 /* Helper routines for implementing atomic operations. */
95 /* To implement exclusive operations we force all cpus to syncronise.
96 We don't require a full sync, only that no cpus are executing guest code.
97 The alternative is to map target atomic ops onto host equivalents,
98 which requires quite a lot of per host/target work. */
99 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
100 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
101 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
102 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
103 static int pending_cpus
;
105 /* Make sure everything is in a consistent state for calling fork(). */
106 void fork_start(void)
108 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
109 pthread_mutex_lock(&exclusive_lock
);
113 void fork_end(int child
)
115 mmap_fork_end(child
);
117 CPUState
*cpu
, *next_cpu
;
118 /* Child processes created by fork() only have a single thread.
119 Discard information about the parent threads. */
120 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
121 if (cpu
!= thread_cpu
) {
122 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
126 pthread_mutex_init(&exclusive_lock
, NULL
);
127 pthread_mutex_init(&cpu_list_mutex
, NULL
);
128 pthread_cond_init(&exclusive_cond
, NULL
);
129 pthread_cond_init(&exclusive_resume
, NULL
);
130 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
131 gdbserver_fork((CPUArchState
*)thread_cpu
->env_ptr
);
133 pthread_mutex_unlock(&exclusive_lock
);
134 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
138 /* Wait for pending exclusive operations to complete. The exclusive lock
140 static inline void exclusive_idle(void)
142 while (pending_cpus
) {
143 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
147 /* Start an exclusive operation.
148 Must only be called from outside cpu_arm_exec. */
149 static inline void start_exclusive(void)
153 pthread_mutex_lock(&exclusive_lock
);
157 /* Make all other cpus stop executing. */
158 CPU_FOREACH(other_cpu
) {
159 if (other_cpu
->running
) {
164 if (pending_cpus
> 1) {
165 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
169 /* Finish an exclusive operation. */
170 static inline void end_exclusive(void)
173 pthread_cond_broadcast(&exclusive_resume
);
174 pthread_mutex_unlock(&exclusive_lock
);
177 /* Wait for exclusive ops to finish, and begin cpu execution. */
178 static inline void cpu_exec_start(CPUState
*cpu
)
180 pthread_mutex_lock(&exclusive_lock
);
183 pthread_mutex_unlock(&exclusive_lock
);
186 /* Mark cpu as not executing, and release pending exclusive ops. */
187 static inline void cpu_exec_end(CPUState
*cpu
)
189 pthread_mutex_lock(&exclusive_lock
);
190 cpu
->running
= false;
191 if (pending_cpus
> 1) {
193 if (pending_cpus
== 1) {
194 pthread_cond_signal(&exclusive_cond
);
198 pthread_mutex_unlock(&exclusive_lock
);
201 void cpu_list_lock(void)
203 pthread_mutex_lock(&cpu_list_mutex
);
206 void cpu_list_unlock(void)
208 pthread_mutex_unlock(&cpu_list_mutex
);
213 /***********************************************************/
214 /* CPUX86 core interface */
216 void cpu_smm_update(CPUX86State
*env
)
220 uint64_t cpu_get_tsc(CPUX86State
*env
)
222 return cpu_get_real_ticks();
225 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
230 e1
= (addr
<< 16) | (limit
& 0xffff);
231 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
238 static uint64_t *idt_table
;
240 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
241 uint64_t addr
, unsigned int sel
)
244 e1
= (addr
& 0xffff) | (sel
<< 16);
245 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
249 p
[2] = tswap32(addr
>> 32);
252 /* only dpl matters as we do only user space emulation */
253 static void set_idt(int n
, unsigned int dpl
)
255 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
258 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
259 uint32_t addr
, unsigned int sel
)
262 e1
= (addr
& 0xffff) | (sel
<< 16);
263 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
269 /* only dpl matters as we do only user space emulation */
270 static void set_idt(int n
, unsigned int dpl
)
272 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
276 void cpu_loop(CPUX86State
*env
)
278 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
281 target_siginfo_t info
;
287 trapnr
= cpu_x86_exec(env
);
290 /* linux syscall from int $0x80 */
291 env
->regs
[R_EAX
] = do_syscall(env
,
303 /* linux syscall from syscall instruction */
304 env
->regs
[R_EAX
] = do_syscall(env
,
318 case TARGET_VSYSCALL_ADDR(__NR_vgettimeofday
):
319 syscall_num
= __NR_gettimeofday
;
321 case TARGET_VSYSCALL_ADDR(__NR_vtime
):
323 syscall_num
= __NR_time
;
325 /* XXX: not yet implemented (arm eabi host) */
326 cpu_abort(cs
, "Unimplemented vsyscall vtime");
329 case TARGET_VSYSCALL_ADDR(__NR_vgetcpu
):
330 /* XXX: not yet implemented */
331 cpu_abort(cs
, "Unimplemented vsyscall vgetcpu");
335 "Invalid vsyscall to address " TARGET_FMT_lx
"\n",
338 env
->regs
[R_EAX
] = do_syscall(env
,
348 env
->eip
= ldq(env
->regs
[R_ESP
]);
349 env
->regs
[R_ESP
] += 8;
354 info
.si_signo
= SIGBUS
;
356 info
.si_code
= TARGET_SI_KERNEL
;
357 info
._sifields
._sigfault
._addr
= 0;
358 queue_signal(env
, info
.si_signo
, &info
);
361 /* XXX: potential problem if ABI32 */
362 #ifndef TARGET_X86_64
363 if (env
->eflags
& VM_MASK
) {
364 handle_vm86_fault(env
);
368 info
.si_signo
= SIGSEGV
;
370 info
.si_code
= TARGET_SI_KERNEL
;
371 info
._sifields
._sigfault
._addr
= 0;
372 queue_signal(env
, info
.si_signo
, &info
);
376 info
.si_signo
= SIGSEGV
;
378 if (!(env
->error_code
& 1))
379 info
.si_code
= TARGET_SEGV_MAPERR
;
381 info
.si_code
= TARGET_SEGV_ACCERR
;
382 info
._sifields
._sigfault
._addr
= env
->cr
[2];
383 queue_signal(env
, info
.si_signo
, &info
);
386 #ifndef TARGET_X86_64
387 if (env
->eflags
& VM_MASK
) {
388 handle_vm86_trap(env
, trapnr
);
392 /* division by zero */
393 info
.si_signo
= SIGFPE
;
395 info
.si_code
= TARGET_FPE_INTDIV
;
396 info
._sifields
._sigfault
._addr
= env
->eip
;
397 queue_signal(env
, info
.si_signo
, &info
);
402 #ifndef TARGET_X86_64
403 if (env
->eflags
& VM_MASK
) {
404 handle_vm86_trap(env
, trapnr
);
408 info
.si_signo
= SIGTRAP
;
410 if (trapnr
== EXCP01_DB
) {
411 info
.si_code
= TARGET_TRAP_BRKPT
;
412 info
._sifields
._sigfault
._addr
= env
->eip
;
414 info
.si_code
= TARGET_SI_KERNEL
;
415 info
._sifields
._sigfault
._addr
= 0;
417 queue_signal(env
, info
.si_signo
, &info
);
422 #ifndef TARGET_X86_64
423 if (env
->eflags
& VM_MASK
) {
424 handle_vm86_trap(env
, trapnr
);
428 info
.si_signo
= SIGSEGV
;
430 info
.si_code
= TARGET_SI_KERNEL
;
431 info
._sifields
._sigfault
._addr
= 0;
432 queue_signal(env
, info
.si_signo
, &info
);
436 info
.si_signo
= SIGILL
;
438 info
.si_code
= TARGET_ILL_ILLOPN
;
439 info
._sifields
._sigfault
._addr
= env
->eip
;
440 queue_signal(env
, info
.si_signo
, &info
);
443 /* just indicate that signals should be handled asap */
449 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
454 info
.si_code
= TARGET_TRAP_BRKPT
;
455 queue_signal(env
, info
.si_signo
, &info
);
460 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
461 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
465 process_pending_signals(env
);
472 #define get_user_code_u32(x, gaddr, env) \
473 ({ abi_long __r = get_user_u32((x), (gaddr)); \
474 if (!__r && bswap_code(arm_sctlr_b(env))) { \
480 #define get_user_code_u16(x, gaddr, env) \
481 ({ abi_long __r = get_user_u16((x), (gaddr)); \
482 if (!__r && bswap_code(arm_sctlr_b(env))) { \
488 #define get_user_data_u32(x, gaddr, env) \
489 ({ abi_long __r = get_user_u32((x), (gaddr)); \
490 if (!__r && arm_cpu_bswap_data(env)) { \
496 #define get_user_data_u16(x, gaddr, env) \
497 ({ abi_long __r = get_user_u16((x), (gaddr)); \
498 if (!__r && arm_cpu_bswap_data(env)) { \
504 #define put_user_data_u32(x, gaddr, env) \
505 ({ typeof(x) __x = (x); \
506 if (arm_cpu_bswap_data(env)) { \
507 __x = bswap32(__x); \
509 put_user_u32(__x, (gaddr)); \
512 #define put_user_data_u16(x, gaddr, env) \
513 ({ typeof(x) __x = (x); \
514 if (arm_cpu_bswap_data(env)) { \
515 __x = bswap16(__x); \
517 put_user_u16(__x, (gaddr)); \
521 /* Commpage handling -- there is no commpage for AArch64 */
524 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
526 * r0 = pointer to oldval
527 * r1 = pointer to newval
528 * r2 = pointer to target value
531 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
532 * C set if *ptr was changed, clear if no exchange happened
534 * Note segv's in kernel helpers are a bit tricky, we can set the
535 * data address sensibly but the PC address is just the entry point.
537 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
539 uint64_t oldval
, newval
, val
;
541 target_siginfo_t info
;
543 /* Based on the 32 bit code in do_kernel_trap */
545 /* XXX: This only works between threads, not between processes.
546 It's probably possible to implement this with native host
547 operations. However things like ldrex/strex are much harder so
548 there's not much point trying. */
550 cpsr
= cpsr_read(env
);
553 if (get_user_u64(oldval
, env
->regs
[0])) {
554 env
->exception
.vaddress
= env
->regs
[0];
558 if (get_user_u64(newval
, env
->regs
[1])) {
559 env
->exception
.vaddress
= env
->regs
[1];
563 if (get_user_u64(val
, addr
)) {
564 env
->exception
.vaddress
= addr
;
571 if (put_user_u64(val
, addr
)) {
572 env
->exception
.vaddress
= addr
;
582 cpsr_write(env
, cpsr
, CPSR_C
);
588 /* We get the PC of the entry address - which is as good as anything,
589 on a real kernel what you get depends on which mode it uses. */
590 info
.si_signo
= SIGSEGV
;
592 /* XXX: check env->error_code */
593 info
.si_code
= TARGET_SEGV_MAPERR
;
594 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
595 queue_signal(env
, info
.si_signo
, &info
);
600 /* Handle a jump to the kernel code page. */
602 do_kernel_trap(CPUARMState
*env
)
608 switch (env
->regs
[15]) {
609 case 0xffff0fa0: /* __kernel_memory_barrier */
610 /* ??? No-op. Will need to do better for SMP. */
612 case 0xffff0fc0: /* __kernel_cmpxchg */
613 /* XXX: This only works between threads, not between processes.
614 It's probably possible to implement this with native host
615 operations. However things like ldrex/strex are much harder so
616 there's not much point trying. */
618 cpsr
= cpsr_read(env
);
620 /* FIXME: This should SEGV if the access fails. */
621 if (get_user_u32(val
, addr
))
623 if (val
== env
->regs
[0]) {
625 /* FIXME: Check for segfaults. */
626 put_user_u32(val
, addr
);
633 cpsr_write(env
, cpsr
, CPSR_C
);
636 case 0xffff0fe0: /* __kernel_get_tls */
637 env
->regs
[0] = env
->cp15
.tpidrro_el0
;
639 case 0xffff0f60: /* __kernel_cmpxchg64 */
640 arm_kernel_cmpxchg64_helper(env
);
646 /* Jump back to the caller. */
647 addr
= env
->regs
[14];
652 env
->regs
[15] = addr
;
657 /* Store exclusive handling for AArch32 */
658 static int do_strex(CPUARMState
*env
)
666 if (env
->exclusive_addr
!= env
->exclusive_test
) {
669 /* We know we're always AArch32 so the address is in uint32_t range
670 * unless it was the -1 exclusive-monitor-lost value (which won't
671 * match exclusive_test above).
673 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
674 addr
= env
->exclusive_addr
;
675 size
= env
->exclusive_info
& 0xf;
678 segv
= get_user_u8(val
, addr
);
681 segv
= get_user_data_u16(val
, addr
, env
);
685 segv
= get_user_data_u32(val
, addr
, env
);
691 env
->exception
.vaddress
= addr
;
696 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
698 env
->exception
.vaddress
= addr
+ 4;
701 if (arm_cpu_bswap_data(env
)) {
702 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
704 val
= deposit64(val
, 32, 32, valhi
);
707 if (val
!= env
->exclusive_val
) {
711 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
714 segv
= put_user_u8(val
, addr
);
717 segv
= put_user_data_u16(val
, addr
, env
);
721 segv
= put_user_data_u32(val
, addr
, env
);
725 env
->exception
.vaddress
= addr
;
729 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
730 segv
= put_user_data_u32(val
, addr
+ 4, env
);
732 env
->exception
.vaddress
= addr
+ 4;
739 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
745 void cpu_loop(CPUARMState
*env
)
747 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
749 unsigned int n
, insn
;
750 target_siginfo_t info
;
755 trapnr
= cpu_arm_exec(env
);
760 TaskState
*ts
= cs
->opaque
;
764 /* we handle the FPU emulation here, as Linux */
765 /* we get the opcode */
766 /* FIXME - what to do if get_user() fails? */
767 get_user_code_u32(opcode
, env
->regs
[15], env
);
769 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
770 if (rc
== 0) { /* illegal instruction */
771 info
.si_signo
= SIGILL
;
773 info
.si_code
= TARGET_ILL_ILLOPN
;
774 info
._sifields
._sigfault
._addr
= env
->regs
[15];
775 queue_signal(env
, info
.si_signo
, &info
);
776 } else if (rc
< 0) { /* FP exception */
779 /* translate softfloat flags to FPSR flags */
780 if (-rc
& float_flag_invalid
)
782 if (-rc
& float_flag_divbyzero
)
784 if (-rc
& float_flag_overflow
)
786 if (-rc
& float_flag_underflow
)
788 if (-rc
& float_flag_inexact
)
791 FPSR fpsr
= ts
->fpa
.fpsr
;
792 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
794 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
795 info
.si_signo
= SIGFPE
;
798 /* ordered by priority, least first */
799 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
800 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
801 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
802 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
803 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
805 info
._sifields
._sigfault
._addr
= env
->regs
[15];
806 queue_signal(env
, info
.si_signo
, &info
);
811 /* accumulate unenabled exceptions */
812 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
814 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
816 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
818 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
820 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
823 } else { /* everything OK */
834 if (trapnr
== EXCP_BKPT
) {
836 /* FIXME - what to do if get_user() fails? */
837 get_user_code_u16(insn
, env
->regs
[15], env
);
841 /* FIXME - what to do if get_user() fails? */
842 get_user_code_u32(insn
, env
->regs
[15], env
);
843 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
848 /* FIXME - what to do if get_user() fails? */
849 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
852 /* FIXME - what to do if get_user() fails? */
853 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
858 if (n
== ARM_NR_cacheflush
) {
860 } else if (n
== ARM_NR_semihosting
861 || n
== ARM_NR_thumb_semihosting
) {
862 env
->regs
[0] = do_arm_semihosting (env
);
863 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
865 if (env
->thumb
|| n
== 0) {
868 n
-= ARM_SYSCALL_BASE
;
871 if ( n
> ARM_NR_BASE
) {
873 case ARM_NR_cacheflush
:
877 cpu_set_tls(env
, env
->regs
[0]);
880 case ARM_NR_breakpoint
:
881 env
->regs
[15] -= env
->thumb
? 2 : 4;
884 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
886 env
->regs
[0] = -TARGET_ENOSYS
;
890 env
->regs
[0] = do_syscall(env
,
906 /* just indicate that signals should be handled asap */
909 if (!do_strex(env
)) {
912 /* fall through for segv */
913 case EXCP_PREFETCH_ABORT
:
914 case EXCP_DATA_ABORT
:
915 addr
= env
->exception
.vaddress
;
917 info
.si_signo
= SIGSEGV
;
919 /* XXX: check env->error_code */
920 info
.si_code
= TARGET_SEGV_MAPERR
;
921 info
._sifields
._sigfault
._addr
= addr
;
922 queue_signal(env
, info
.si_signo
, &info
);
930 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
935 info
.si_code
= TARGET_TRAP_BRKPT
;
936 queue_signal(env
, info
.si_signo
, &info
);
940 case EXCP_KERNEL_TRAP
:
941 if (do_kernel_trap(env
))
946 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
948 cpu_dump_state(cs
, stderr
, fprintf
, 0);
951 process_pending_signals(env
);
958 * Handle AArch64 store-release exclusive
960 * rs = gets the status result of store exclusive
961 * rt = is the register that is stored
962 * rt2 = is the second register store (in STP)
965 static int do_strex_a64(CPUARMState
*env
)
976 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
977 size
= extract32(env
->exclusive_info
, 0, 2);
978 is_pair
= extract32(env
->exclusive_info
, 2, 1);
979 rs
= extract32(env
->exclusive_info
, 4, 5);
980 rt
= extract32(env
->exclusive_info
, 9, 5);
981 rt2
= extract32(env
->exclusive_info
, 14, 5);
983 addr
= env
->exclusive_addr
;
985 if (addr
!= env
->exclusive_test
) {
991 segv
= get_user_u8(val
, addr
);
994 segv
= get_user_u16(val
, addr
);
997 segv
= get_user_u32(val
, addr
);
1000 segv
= get_user_u64(val
, addr
);
1006 env
->exception
.vaddress
= addr
;
1009 if (val
!= env
->exclusive_val
) {
1014 segv
= get_user_u32(val
, addr
+ 4);
1016 segv
= get_user_u64(val
, addr
+ 8);
1019 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1022 if (val
!= env
->exclusive_high
) {
1026 /* handle the zero register */
1027 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
1030 segv
= put_user_u8(val
, addr
);
1033 segv
= put_user_u16(val
, addr
);
1036 segv
= put_user_u32(val
, addr
);
1039 segv
= put_user_u64(val
, addr
);
1046 /* handle the zero register */
1047 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1049 segv
= put_user_u32(val
, addr
+ 4);
1051 segv
= put_user_u64(val
, addr
+ 8);
1054 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1061 /* rs == 31 encodes a write to the ZR, thus throwing away
1062 * the status return. This is rather silly but valid.
1065 env
->xregs
[rs
] = rc
;
1068 /* instruction faulted, PC does not advance */
1069 /* either way a strex releases any exclusive lock we have */
1070 env
->exclusive_addr
= -1;
1075 /* AArch64 main loop */
1076 void cpu_loop(CPUARMState
*env
)
1078 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1080 target_siginfo_t info
;
1084 trapnr
= cpu_arm_exec(env
);
1089 env
->xregs
[0] = do_syscall(env
,
1099 case EXCP_INTERRUPT
:
1100 /* just indicate that signals should be handled asap */
1103 info
.si_signo
= SIGILL
;
1105 info
.si_code
= TARGET_ILL_ILLOPN
;
1106 info
._sifields
._sigfault
._addr
= env
->pc
;
1107 queue_signal(env
, info
.si_signo
, &info
);
1110 if (!do_strex_a64(env
)) {
1113 /* fall through for segv */
1114 case EXCP_PREFETCH_ABORT
:
1115 case EXCP_DATA_ABORT
:
1116 info
.si_signo
= SIGSEGV
;
1118 /* XXX: check env->error_code */
1119 info
.si_code
= TARGET_SEGV_MAPERR
;
1120 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1121 queue_signal(env
, info
.si_signo
, &info
);
1125 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1127 info
.si_signo
= sig
;
1129 info
.si_code
= TARGET_TRAP_BRKPT
;
1130 queue_signal(env
, info
.si_signo
, &info
);
1134 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1136 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1139 process_pending_signals(env
);
1140 /* Exception return on AArch64 always clears the exclusive monitor,
1141 * so any return to running guest code implies this.
1142 * A strex (successful or otherwise) also clears the monitor, so
1143 * we don't need to specialcase EXCP_STREX.
1145 env
->exclusive_addr
= -1;
1148 #endif /* ndef TARGET_ABI32 */
1152 #ifdef TARGET_UNICORE32
1154 void cpu_loop(CPUUniCore32State
*env
)
1156 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1158 unsigned int n
, insn
;
1159 target_siginfo_t info
;
1163 trapnr
= uc32_cpu_exec(env
);
1166 case UC32_EXCP_PRIV
:
1169 get_user_u32(insn
, env
->regs
[31] - 4);
1170 n
= insn
& 0xffffff;
1172 if (n
>= UC32_SYSCALL_BASE
) {
1174 n
-= UC32_SYSCALL_BASE
;
1175 if (n
== UC32_SYSCALL_NR_set_tls
) {
1176 cpu_set_tls(env
, env
->regs
[0]);
1179 env
->regs
[0] = do_syscall(env
,
1194 case UC32_EXCP_DTRAP
:
1195 case UC32_EXCP_ITRAP
:
1196 info
.si_signo
= SIGSEGV
;
1198 /* XXX: check env->error_code */
1199 info
.si_code
= TARGET_SEGV_MAPERR
;
1200 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1201 queue_signal(env
, info
.si_signo
, &info
);
1203 case EXCP_INTERRUPT
:
1204 /* just indicate that signals should be handled asap */
1210 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1212 info
.si_signo
= sig
;
1214 info
.si_code
= TARGET_TRAP_BRKPT
;
1215 queue_signal(env
, info
.si_signo
, &info
);
1222 process_pending_signals(env
);
1226 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1227 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1233 #define SPARC64_STACK_BIAS 2047
1237 /* WARNING: dealing with register windows _is_ complicated. More info
1238 can be found at http://www.sics.se/~psm/sparcstack.html */
1239 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1241 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1242 /* wrap handling : if cwp is on the last window, then we use the
1243 registers 'after' the end */
1244 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1245 index
+= 16 * env
->nwindows
;
1249 /* save the register window 'cwp1' */
1250 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1255 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1256 #ifdef TARGET_SPARC64
1258 sp_ptr
+= SPARC64_STACK_BIAS
;
1260 #if defined(DEBUG_WIN)
1261 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1264 for(i
= 0; i
< 16; i
++) {
1265 /* FIXME - what to do if put_user() fails? */
1266 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1267 sp_ptr
+= sizeof(abi_ulong
);
1271 static void save_window(CPUSPARCState
*env
)
1273 #ifndef TARGET_SPARC64
1274 unsigned int new_wim
;
1275 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1276 ((1LL << env
->nwindows
) - 1);
1277 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1280 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1286 static void restore_window(CPUSPARCState
*env
)
1288 #ifndef TARGET_SPARC64
1289 unsigned int new_wim
;
1291 unsigned int i
, cwp1
;
1294 #ifndef TARGET_SPARC64
1295 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1296 ((1LL << env
->nwindows
) - 1);
1299 /* restore the invalid window */
1300 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1301 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1302 #ifdef TARGET_SPARC64
1304 sp_ptr
+= SPARC64_STACK_BIAS
;
1306 #if defined(DEBUG_WIN)
1307 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1310 for(i
= 0; i
< 16; i
++) {
1311 /* FIXME - what to do if get_user() fails? */
1312 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1313 sp_ptr
+= sizeof(abi_ulong
);
1315 #ifdef TARGET_SPARC64
1317 if (env
->cleanwin
< env
->nwindows
- 1)
1325 static void flush_windows(CPUSPARCState
*env
)
1331 /* if restore would invoke restore_window(), then we can stop */
1332 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1333 #ifndef TARGET_SPARC64
1334 if (env
->wim
& (1 << cwp1
))
1337 if (env
->canrestore
== 0)
1342 save_window_offset(env
, cwp1
);
1345 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1346 #ifndef TARGET_SPARC64
1347 /* set wim so that restore will reload the registers */
1348 env
->wim
= 1 << cwp1
;
1350 #if defined(DEBUG_WIN)
1351 printf("flush_windows: nb=%d\n", offset
- 1);
1355 void cpu_loop (CPUSPARCState
*env
)
1357 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1360 target_siginfo_t info
;
1363 trapnr
= cpu_sparc_exec (env
);
1365 /* Compute PSR before exposing state. */
1366 if (env
->cc_op
!= CC_OP_FLAGS
) {
1371 #ifndef TARGET_SPARC64
1378 ret
= do_syscall (env
, env
->gregs
[1],
1379 env
->regwptr
[0], env
->regwptr
[1],
1380 env
->regwptr
[2], env
->regwptr
[3],
1381 env
->regwptr
[4], env
->regwptr
[5],
1383 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1384 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1385 env
->xcc
|= PSR_CARRY
;
1387 env
->psr
|= PSR_CARRY
;
1391 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1392 env
->xcc
&= ~PSR_CARRY
;
1394 env
->psr
&= ~PSR_CARRY
;
1397 env
->regwptr
[0] = ret
;
1398 /* next instruction */
1400 env
->npc
= env
->npc
+ 4;
1402 case 0x83: /* flush windows */
1407 /* next instruction */
1409 env
->npc
= env
->npc
+ 4;
1411 #ifndef TARGET_SPARC64
1412 case TT_WIN_OVF
: /* window overflow */
1415 case TT_WIN_UNF
: /* window underflow */
1416 restore_window(env
);
1421 info
.si_signo
= TARGET_SIGSEGV
;
1423 /* XXX: check env->error_code */
1424 info
.si_code
= TARGET_SEGV_MAPERR
;
1425 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1426 queue_signal(env
, info
.si_signo
, &info
);
1430 case TT_SPILL
: /* window overflow */
1433 case TT_FILL
: /* window underflow */
1434 restore_window(env
);
1439 info
.si_signo
= TARGET_SIGSEGV
;
1441 /* XXX: check env->error_code */
1442 info
.si_code
= TARGET_SEGV_MAPERR
;
1443 if (trapnr
== TT_DFAULT
)
1444 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1446 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1447 queue_signal(env
, info
.si_signo
, &info
);
1450 #ifndef TARGET_ABI32
1453 sparc64_get_context(env
);
1457 sparc64_set_context(env
);
1461 case EXCP_INTERRUPT
:
1462 /* just indicate that signals should be handled asap */
1466 info
.si_signo
= TARGET_SIGILL
;
1468 info
.si_code
= TARGET_ILL_ILLOPC
;
1469 info
._sifields
._sigfault
._addr
= env
->pc
;
1470 queue_signal(env
, info
.si_signo
, &info
);
1477 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1480 info
.si_signo
= sig
;
1482 info
.si_code
= TARGET_TRAP_BRKPT
;
1483 queue_signal(env
, info
.si_signo
, &info
);
1488 printf ("Unhandled trap: 0x%x\n", trapnr
);
1489 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1492 process_pending_signals (env
);
1499 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1505 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1507 return cpu_ppc_get_tb(env
);
1510 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1512 return cpu_ppc_get_tb(env
) >> 32;
1515 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1517 return cpu_ppc_get_tb(env
);
1520 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1522 return cpu_ppc_get_tb(env
) >> 32;
1525 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1526 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1528 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1530 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1533 /* XXX: to be fixed */
1534 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1539 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1544 #define EXCP_DUMP(env, fmt, ...) \
1546 CPUState *cs = ENV_GET_CPU(env); \
1547 fprintf(stderr, fmt , ## __VA_ARGS__); \
1548 cpu_dump_state(cs, stderr, fprintf, 0); \
1549 qemu_log(fmt, ## __VA_ARGS__); \
1550 if (qemu_log_enabled()) { \
1551 log_cpu_state(cs, 0); \
1555 static int do_store_exclusive(CPUPPCState
*env
)
1558 target_ulong page_addr
;
1559 target_ulong val
, val2
__attribute__((unused
)) = 0;
1563 addr
= env
->reserve_ea
;
1564 page_addr
= addr
& TARGET_PAGE_MASK
;
1567 flags
= page_get_flags(page_addr
);
1568 if ((flags
& PAGE_READ
) == 0) {
1571 int reg
= env
->reserve_info
& 0x1f;
1572 int size
= env
->reserve_info
>> 5;
1575 if (addr
== env
->reserve_addr
) {
1577 case 1: segv
= get_user_u8(val
, addr
); break;
1578 case 2: segv
= get_user_u16(val
, addr
); break;
1579 case 4: segv
= get_user_u32(val
, addr
); break;
1580 #if defined(TARGET_PPC64)
1581 case 8: segv
= get_user_u64(val
, addr
); break;
1583 segv
= get_user_u64(val
, addr
);
1585 segv
= get_user_u64(val2
, addr
+ 8);
1592 if (!segv
&& val
== env
->reserve_val
) {
1593 val
= env
->gpr
[reg
];
1595 case 1: segv
= put_user_u8(val
, addr
); break;
1596 case 2: segv
= put_user_u16(val
, addr
); break;
1597 case 4: segv
= put_user_u32(val
, addr
); break;
1598 #if defined(TARGET_PPC64)
1599 case 8: segv
= put_user_u64(val
, addr
); break;
1601 if (val2
== env
->reserve_val2
) {
1604 val
= env
->gpr
[reg
+1];
1606 val2
= env
->gpr
[reg
+1];
1608 segv
= put_user_u64(val
, addr
);
1610 segv
= put_user_u64(val2
, addr
+ 8);
1623 env
->crf
[0] = (stored
<< 1) | xer_so
;
1624 env
->reserve_addr
= (target_ulong
)-1;
1634 void cpu_loop(CPUPPCState
*env
)
1636 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1637 target_siginfo_t info
;
1643 trapnr
= cpu_ppc_exec(env
);
1646 case POWERPC_EXCP_NONE
:
1649 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1650 cpu_abort(cs
, "Critical interrupt while in user mode. "
1653 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1654 cpu_abort(cs
, "Machine check exception while in user mode. "
1657 case POWERPC_EXCP_DSI
: /* Data storage exception */
1658 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1660 /* XXX: check this. Seems bugged */
1661 switch (env
->error_code
& 0xFF000000) {
1663 info
.si_signo
= TARGET_SIGSEGV
;
1665 info
.si_code
= TARGET_SEGV_MAPERR
;
1668 info
.si_signo
= TARGET_SIGILL
;
1670 info
.si_code
= TARGET_ILL_ILLADR
;
1673 info
.si_signo
= TARGET_SIGSEGV
;
1675 info
.si_code
= TARGET_SEGV_ACCERR
;
1678 /* Let's send a regular segfault... */
1679 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1681 info
.si_signo
= TARGET_SIGSEGV
;
1683 info
.si_code
= TARGET_SEGV_MAPERR
;
1686 info
._sifields
._sigfault
._addr
= env
->nip
;
1687 queue_signal(env
, info
.si_signo
, &info
);
1689 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1690 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1691 "\n", env
->spr
[SPR_SRR0
]);
1692 /* XXX: check this */
1693 switch (env
->error_code
& 0xFF000000) {
1695 info
.si_signo
= TARGET_SIGSEGV
;
1697 info
.si_code
= TARGET_SEGV_MAPERR
;
1701 info
.si_signo
= TARGET_SIGSEGV
;
1703 info
.si_code
= TARGET_SEGV_ACCERR
;
1706 /* Let's send a regular segfault... */
1707 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1709 info
.si_signo
= TARGET_SIGSEGV
;
1711 info
.si_code
= TARGET_SEGV_MAPERR
;
1714 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1715 queue_signal(env
, info
.si_signo
, &info
);
1717 case POWERPC_EXCP_EXTERNAL
: /* External input */
1718 cpu_abort(cs
, "External interrupt while in user mode. "
1721 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1722 EXCP_DUMP(env
, "Unaligned memory access\n");
1723 /* XXX: check this */
1724 info
.si_signo
= TARGET_SIGBUS
;
1726 info
.si_code
= TARGET_BUS_ADRALN
;
1727 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1728 queue_signal(env
, info
.si_signo
, &info
);
1730 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1731 /* XXX: check this */
1732 switch (env
->error_code
& ~0xF) {
1733 case POWERPC_EXCP_FP
:
1734 EXCP_DUMP(env
, "Floating point program exception\n");
1735 info
.si_signo
= TARGET_SIGFPE
;
1737 switch (env
->error_code
& 0xF) {
1738 case POWERPC_EXCP_FP_OX
:
1739 info
.si_code
= TARGET_FPE_FLTOVF
;
1741 case POWERPC_EXCP_FP_UX
:
1742 info
.si_code
= TARGET_FPE_FLTUND
;
1744 case POWERPC_EXCP_FP_ZX
:
1745 case POWERPC_EXCP_FP_VXZDZ
:
1746 info
.si_code
= TARGET_FPE_FLTDIV
;
1748 case POWERPC_EXCP_FP_XX
:
1749 info
.si_code
= TARGET_FPE_FLTRES
;
1751 case POWERPC_EXCP_FP_VXSOFT
:
1752 info
.si_code
= TARGET_FPE_FLTINV
;
1754 case POWERPC_EXCP_FP_VXSNAN
:
1755 case POWERPC_EXCP_FP_VXISI
:
1756 case POWERPC_EXCP_FP_VXIDI
:
1757 case POWERPC_EXCP_FP_VXIMZ
:
1758 case POWERPC_EXCP_FP_VXVC
:
1759 case POWERPC_EXCP_FP_VXSQRT
:
1760 case POWERPC_EXCP_FP_VXCVI
:
1761 info
.si_code
= TARGET_FPE_FLTSUB
;
1764 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1769 case POWERPC_EXCP_INVAL
:
1770 EXCP_DUMP(env
, "Invalid instruction\n");
1771 info
.si_signo
= TARGET_SIGILL
;
1773 switch (env
->error_code
& 0xF) {
1774 case POWERPC_EXCP_INVAL_INVAL
:
1775 info
.si_code
= TARGET_ILL_ILLOPC
;
1777 case POWERPC_EXCP_INVAL_LSWX
:
1778 info
.si_code
= TARGET_ILL_ILLOPN
;
1780 case POWERPC_EXCP_INVAL_SPR
:
1781 info
.si_code
= TARGET_ILL_PRVREG
;
1783 case POWERPC_EXCP_INVAL_FP
:
1784 info
.si_code
= TARGET_ILL_COPROC
;
1787 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1788 env
->error_code
& 0xF);
1789 info
.si_code
= TARGET_ILL_ILLADR
;
1793 case POWERPC_EXCP_PRIV
:
1794 EXCP_DUMP(env
, "Privilege violation\n");
1795 info
.si_signo
= TARGET_SIGILL
;
1797 switch (env
->error_code
& 0xF) {
1798 case POWERPC_EXCP_PRIV_OPC
:
1799 info
.si_code
= TARGET_ILL_PRVOPC
;
1801 case POWERPC_EXCP_PRIV_REG
:
1802 info
.si_code
= TARGET_ILL_PRVREG
;
1805 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1806 env
->error_code
& 0xF);
1807 info
.si_code
= TARGET_ILL_PRVOPC
;
1811 case POWERPC_EXCP_TRAP
:
1812 cpu_abort(cs
, "Tried to call a TRAP\n");
1815 /* Should not happen ! */
1816 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1820 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1821 queue_signal(env
, info
.si_signo
, &info
);
1823 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1824 EXCP_DUMP(env
, "No floating point allowed\n");
1825 info
.si_signo
= TARGET_SIGILL
;
1827 info
.si_code
= TARGET_ILL_COPROC
;
1828 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1829 queue_signal(env
, info
.si_signo
, &info
);
1831 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1832 cpu_abort(cs
, "Syscall exception while in user mode. "
1835 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1836 EXCP_DUMP(env
, "No APU instruction allowed\n");
1837 info
.si_signo
= TARGET_SIGILL
;
1839 info
.si_code
= TARGET_ILL_COPROC
;
1840 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1841 queue_signal(env
, info
.si_signo
, &info
);
1843 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1844 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1847 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1848 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1851 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1852 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1855 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1856 cpu_abort(cs
, "Data TLB exception while in user mode. "
1859 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1860 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1863 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1864 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1865 info
.si_signo
= TARGET_SIGILL
;
1867 info
.si_code
= TARGET_ILL_COPROC
;
1868 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1869 queue_signal(env
, info
.si_signo
, &info
);
1871 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1872 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1874 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1875 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1877 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1878 cpu_abort(cs
, "Performance monitor exception not handled\n");
1880 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1881 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1884 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1885 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1888 case POWERPC_EXCP_RESET
: /* System reset exception */
1889 cpu_abort(cs
, "Reset interrupt while in user mode. "
1892 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1893 cpu_abort(cs
, "Data segment exception while in user mode. "
1896 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1897 cpu_abort(cs
, "Instruction segment exception "
1898 "while in user mode. Aborting\n");
1900 /* PowerPC 64 with hypervisor mode support */
1901 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1902 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1903 "while in user mode. Aborting\n");
1905 case POWERPC_EXCP_TRACE
: /* Trace exception */
1907 * we use this exception to emulate step-by-step execution mode.
1910 /* PowerPC 64 with hypervisor mode support */
1911 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1912 cpu_abort(cs
, "Hypervisor data storage exception "
1913 "while in user mode. Aborting\n");
1915 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1916 cpu_abort(cs
, "Hypervisor instruction storage exception "
1917 "while in user mode. Aborting\n");
1919 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1920 cpu_abort(cs
, "Hypervisor data segment exception "
1921 "while in user mode. Aborting\n");
1923 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1924 cpu_abort(cs
, "Hypervisor instruction segment exception "
1925 "while in user mode. Aborting\n");
1927 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1928 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1929 info
.si_signo
= TARGET_SIGILL
;
1931 info
.si_code
= TARGET_ILL_COPROC
;
1932 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1933 queue_signal(env
, info
.si_signo
, &info
);
1935 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1936 cpu_abort(cs
, "Programmable interval timer interrupt "
1937 "while in user mode. Aborting\n");
1939 case POWERPC_EXCP_IO
: /* IO error exception */
1940 cpu_abort(cs
, "IO error exception while in user mode. "
1943 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1944 cpu_abort(cs
, "Run mode exception while in user mode. "
1947 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1948 cpu_abort(cs
, "Emulation trap exception not handled\n");
1950 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1951 cpu_abort(cs
, "Instruction fetch TLB exception "
1952 "while in user-mode. Aborting");
1954 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1955 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1958 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1959 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1962 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1963 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1965 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1966 cpu_abort(cs
, "Instruction address breakpoint exception "
1969 case POWERPC_EXCP_SMI
: /* System management interrupt */
1970 cpu_abort(cs
, "System management interrupt while in user mode. "
1973 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1974 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1977 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1978 cpu_abort(cs
, "Performance monitor exception not handled\n");
1980 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1981 cpu_abort(cs
, "Vector assist exception not handled\n");
1983 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1984 cpu_abort(cs
, "Soft patch exception not handled\n");
1986 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1987 cpu_abort(cs
, "Maintenance exception while in user mode. "
1990 case POWERPC_EXCP_STOP
: /* stop translation */
1991 /* We did invalidate the instruction cache. Go on */
1993 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1994 /* We just stopped because of a branch. Go on */
1996 case POWERPC_EXCP_SYSCALL_USER
:
1997 /* system call in user-mode emulation */
1999 * PPC ABI uses overflow flag in cr0 to signal an error
2002 env
->crf
[0] &= ~0x1;
2003 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
2004 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
2006 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
2007 /* Returning from a successful sigreturn syscall.
2008 Avoid corrupting register state. */
2011 if (ret
> (target_ulong
)(-515)) {
2017 case POWERPC_EXCP_STCX
:
2018 if (do_store_exclusive(env
)) {
2019 info
.si_signo
= TARGET_SIGSEGV
;
2021 info
.si_code
= TARGET_SEGV_MAPERR
;
2022 info
._sifields
._sigfault
._addr
= env
->nip
;
2023 queue_signal(env
, info
.si_signo
, &info
);
2030 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2032 info
.si_signo
= sig
;
2034 info
.si_code
= TARGET_TRAP_BRKPT
;
2035 queue_signal(env
, info
.si_signo
, &info
);
2039 case EXCP_INTERRUPT
:
2040 /* just indicate that signals should be handled asap */
2043 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2046 process_pending_signals(env
);
2053 # ifdef TARGET_ABI_MIPSO32
2054 # define MIPS_SYS(name, args) args,
2055 static const uint8_t mips_syscall_args
[] = {
2056 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2057 MIPS_SYS(sys_exit
, 1)
2058 MIPS_SYS(sys_fork
, 0)
2059 MIPS_SYS(sys_read
, 3)
2060 MIPS_SYS(sys_write
, 3)
2061 MIPS_SYS(sys_open
, 3) /* 4005 */
2062 MIPS_SYS(sys_close
, 1)
2063 MIPS_SYS(sys_waitpid
, 3)
2064 MIPS_SYS(sys_creat
, 2)
2065 MIPS_SYS(sys_link
, 2)
2066 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2067 MIPS_SYS(sys_execve
, 0)
2068 MIPS_SYS(sys_chdir
, 1)
2069 MIPS_SYS(sys_time
, 1)
2070 MIPS_SYS(sys_mknod
, 3)
2071 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2072 MIPS_SYS(sys_lchown
, 3)
2073 MIPS_SYS(sys_ni_syscall
, 0)
2074 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2075 MIPS_SYS(sys_lseek
, 3)
2076 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2077 MIPS_SYS(sys_mount
, 5)
2078 MIPS_SYS(sys_umount
, 1)
2079 MIPS_SYS(sys_setuid
, 1)
2080 MIPS_SYS(sys_getuid
, 0)
2081 MIPS_SYS(sys_stime
, 1) /* 4025 */
2082 MIPS_SYS(sys_ptrace
, 4)
2083 MIPS_SYS(sys_alarm
, 1)
2084 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2085 MIPS_SYS(sys_pause
, 0)
2086 MIPS_SYS(sys_utime
, 2) /* 4030 */
2087 MIPS_SYS(sys_ni_syscall
, 0)
2088 MIPS_SYS(sys_ni_syscall
, 0)
2089 MIPS_SYS(sys_access
, 2)
2090 MIPS_SYS(sys_nice
, 1)
2091 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2092 MIPS_SYS(sys_sync
, 0)
2093 MIPS_SYS(sys_kill
, 2)
2094 MIPS_SYS(sys_rename
, 2)
2095 MIPS_SYS(sys_mkdir
, 2)
2096 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2097 MIPS_SYS(sys_dup
, 1)
2098 MIPS_SYS(sys_pipe
, 0)
2099 MIPS_SYS(sys_times
, 1)
2100 MIPS_SYS(sys_ni_syscall
, 0)
2101 MIPS_SYS(sys_brk
, 1) /* 4045 */
2102 MIPS_SYS(sys_setgid
, 1)
2103 MIPS_SYS(sys_getgid
, 0)
2104 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2105 MIPS_SYS(sys_geteuid
, 0)
2106 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2107 MIPS_SYS(sys_acct
, 0)
2108 MIPS_SYS(sys_umount2
, 2)
2109 MIPS_SYS(sys_ni_syscall
, 0)
2110 MIPS_SYS(sys_ioctl
, 3)
2111 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2112 MIPS_SYS(sys_ni_syscall
, 2)
2113 MIPS_SYS(sys_setpgid
, 2)
2114 MIPS_SYS(sys_ni_syscall
, 0)
2115 MIPS_SYS(sys_olduname
, 1)
2116 MIPS_SYS(sys_umask
, 1) /* 4060 */
2117 MIPS_SYS(sys_chroot
, 1)
2118 MIPS_SYS(sys_ustat
, 2)
2119 MIPS_SYS(sys_dup2
, 2)
2120 MIPS_SYS(sys_getppid
, 0)
2121 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2122 MIPS_SYS(sys_setsid
, 0)
2123 MIPS_SYS(sys_sigaction
, 3)
2124 MIPS_SYS(sys_sgetmask
, 0)
2125 MIPS_SYS(sys_ssetmask
, 1)
2126 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2127 MIPS_SYS(sys_setregid
, 2)
2128 MIPS_SYS(sys_sigsuspend
, 0)
2129 MIPS_SYS(sys_sigpending
, 1)
2130 MIPS_SYS(sys_sethostname
, 2)
2131 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2132 MIPS_SYS(sys_getrlimit
, 2)
2133 MIPS_SYS(sys_getrusage
, 2)
2134 MIPS_SYS(sys_gettimeofday
, 2)
2135 MIPS_SYS(sys_settimeofday
, 2)
2136 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2137 MIPS_SYS(sys_setgroups
, 2)
2138 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2139 MIPS_SYS(sys_symlink
, 2)
2140 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2141 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2142 MIPS_SYS(sys_uselib
, 1)
2143 MIPS_SYS(sys_swapon
, 2)
2144 MIPS_SYS(sys_reboot
, 3)
2145 MIPS_SYS(old_readdir
, 3)
2146 MIPS_SYS(old_mmap
, 6) /* 4090 */
2147 MIPS_SYS(sys_munmap
, 2)
2148 MIPS_SYS(sys_truncate
, 2)
2149 MIPS_SYS(sys_ftruncate
, 2)
2150 MIPS_SYS(sys_fchmod
, 2)
2151 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2152 MIPS_SYS(sys_getpriority
, 2)
2153 MIPS_SYS(sys_setpriority
, 3)
2154 MIPS_SYS(sys_ni_syscall
, 0)
2155 MIPS_SYS(sys_statfs
, 2)
2156 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2157 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2158 MIPS_SYS(sys_socketcall
, 2)
2159 MIPS_SYS(sys_syslog
, 3)
2160 MIPS_SYS(sys_setitimer
, 3)
2161 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2162 MIPS_SYS(sys_newstat
, 2)
2163 MIPS_SYS(sys_newlstat
, 2)
2164 MIPS_SYS(sys_newfstat
, 2)
2165 MIPS_SYS(sys_uname
, 1)
2166 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2167 MIPS_SYS(sys_vhangup
, 0)
2168 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2169 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2170 MIPS_SYS(sys_wait4
, 4)
2171 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2172 MIPS_SYS(sys_sysinfo
, 1)
2173 MIPS_SYS(sys_ipc
, 6)
2174 MIPS_SYS(sys_fsync
, 1)
2175 MIPS_SYS(sys_sigreturn
, 0)
2176 MIPS_SYS(sys_clone
, 6) /* 4120 */
2177 MIPS_SYS(sys_setdomainname
, 2)
2178 MIPS_SYS(sys_newuname
, 1)
2179 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2180 MIPS_SYS(sys_adjtimex
, 1)
2181 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2182 MIPS_SYS(sys_sigprocmask
, 3)
2183 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2184 MIPS_SYS(sys_init_module
, 5)
2185 MIPS_SYS(sys_delete_module
, 1)
2186 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2187 MIPS_SYS(sys_quotactl
, 0)
2188 MIPS_SYS(sys_getpgid
, 1)
2189 MIPS_SYS(sys_fchdir
, 1)
2190 MIPS_SYS(sys_bdflush
, 2)
2191 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2192 MIPS_SYS(sys_personality
, 1)
2193 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2194 MIPS_SYS(sys_setfsuid
, 1)
2195 MIPS_SYS(sys_setfsgid
, 1)
2196 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2197 MIPS_SYS(sys_getdents
, 3)
2198 MIPS_SYS(sys_select
, 5)
2199 MIPS_SYS(sys_flock
, 2)
2200 MIPS_SYS(sys_msync
, 3)
2201 MIPS_SYS(sys_readv
, 3) /* 4145 */
2202 MIPS_SYS(sys_writev
, 3)
2203 MIPS_SYS(sys_cacheflush
, 3)
2204 MIPS_SYS(sys_cachectl
, 3)
2205 MIPS_SYS(sys_sysmips
, 4)
2206 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2207 MIPS_SYS(sys_getsid
, 1)
2208 MIPS_SYS(sys_fdatasync
, 0)
2209 MIPS_SYS(sys_sysctl
, 1)
2210 MIPS_SYS(sys_mlock
, 2)
2211 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2212 MIPS_SYS(sys_mlockall
, 1)
2213 MIPS_SYS(sys_munlockall
, 0)
2214 MIPS_SYS(sys_sched_setparam
, 2)
2215 MIPS_SYS(sys_sched_getparam
, 2)
2216 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2217 MIPS_SYS(sys_sched_getscheduler
, 1)
2218 MIPS_SYS(sys_sched_yield
, 0)
2219 MIPS_SYS(sys_sched_get_priority_max
, 1)
2220 MIPS_SYS(sys_sched_get_priority_min
, 1)
2221 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2222 MIPS_SYS(sys_nanosleep
, 2)
2223 MIPS_SYS(sys_mremap
, 5)
2224 MIPS_SYS(sys_accept
, 3)
2225 MIPS_SYS(sys_bind
, 3)
2226 MIPS_SYS(sys_connect
, 3) /* 4170 */
2227 MIPS_SYS(sys_getpeername
, 3)
2228 MIPS_SYS(sys_getsockname
, 3)
2229 MIPS_SYS(sys_getsockopt
, 5)
2230 MIPS_SYS(sys_listen
, 2)
2231 MIPS_SYS(sys_recv
, 4) /* 4175 */
2232 MIPS_SYS(sys_recvfrom
, 6)
2233 MIPS_SYS(sys_recvmsg
, 3)
2234 MIPS_SYS(sys_send
, 4)
2235 MIPS_SYS(sys_sendmsg
, 3)
2236 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2237 MIPS_SYS(sys_setsockopt
, 5)
2238 MIPS_SYS(sys_shutdown
, 2)
2239 MIPS_SYS(sys_socket
, 3)
2240 MIPS_SYS(sys_socketpair
, 4)
2241 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2242 MIPS_SYS(sys_getresuid
, 3)
2243 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2244 MIPS_SYS(sys_poll
, 3)
2245 MIPS_SYS(sys_nfsservctl
, 3)
2246 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2247 MIPS_SYS(sys_getresgid
, 3)
2248 MIPS_SYS(sys_prctl
, 5)
2249 MIPS_SYS(sys_rt_sigreturn
, 0)
2250 MIPS_SYS(sys_rt_sigaction
, 4)
2251 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2252 MIPS_SYS(sys_rt_sigpending
, 2)
2253 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2254 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2255 MIPS_SYS(sys_rt_sigsuspend
, 0)
2256 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2257 MIPS_SYS(sys_pwrite64
, 6)
2258 MIPS_SYS(sys_chown
, 3)
2259 MIPS_SYS(sys_getcwd
, 2)
2260 MIPS_SYS(sys_capget
, 2)
2261 MIPS_SYS(sys_capset
, 2) /* 4205 */
2262 MIPS_SYS(sys_sigaltstack
, 2)
2263 MIPS_SYS(sys_sendfile
, 4)
2264 MIPS_SYS(sys_ni_syscall
, 0)
2265 MIPS_SYS(sys_ni_syscall
, 0)
2266 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2267 MIPS_SYS(sys_truncate64
, 4)
2268 MIPS_SYS(sys_ftruncate64
, 4)
2269 MIPS_SYS(sys_stat64
, 2)
2270 MIPS_SYS(sys_lstat64
, 2)
2271 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2272 MIPS_SYS(sys_pivot_root
, 2)
2273 MIPS_SYS(sys_mincore
, 3)
2274 MIPS_SYS(sys_madvise
, 3)
2275 MIPS_SYS(sys_getdents64
, 3)
2276 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2277 MIPS_SYS(sys_ni_syscall
, 0)
2278 MIPS_SYS(sys_gettid
, 0)
2279 MIPS_SYS(sys_readahead
, 5)
2280 MIPS_SYS(sys_setxattr
, 5)
2281 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2282 MIPS_SYS(sys_fsetxattr
, 5)
2283 MIPS_SYS(sys_getxattr
, 4)
2284 MIPS_SYS(sys_lgetxattr
, 4)
2285 MIPS_SYS(sys_fgetxattr
, 4)
2286 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2287 MIPS_SYS(sys_llistxattr
, 3)
2288 MIPS_SYS(sys_flistxattr
, 3)
2289 MIPS_SYS(sys_removexattr
, 2)
2290 MIPS_SYS(sys_lremovexattr
, 2)
2291 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2292 MIPS_SYS(sys_tkill
, 2)
2293 MIPS_SYS(sys_sendfile64
, 5)
2294 MIPS_SYS(sys_futex
, 6)
2295 MIPS_SYS(sys_sched_setaffinity
, 3)
2296 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2297 MIPS_SYS(sys_io_setup
, 2)
2298 MIPS_SYS(sys_io_destroy
, 1)
2299 MIPS_SYS(sys_io_getevents
, 5)
2300 MIPS_SYS(sys_io_submit
, 3)
2301 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2302 MIPS_SYS(sys_exit_group
, 1)
2303 MIPS_SYS(sys_lookup_dcookie
, 3)
2304 MIPS_SYS(sys_epoll_create
, 1)
2305 MIPS_SYS(sys_epoll_ctl
, 4)
2306 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2307 MIPS_SYS(sys_remap_file_pages
, 5)
2308 MIPS_SYS(sys_set_tid_address
, 1)
2309 MIPS_SYS(sys_restart_syscall
, 0)
2310 MIPS_SYS(sys_fadvise64_64
, 7)
2311 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2312 MIPS_SYS(sys_fstatfs64
, 2)
2313 MIPS_SYS(sys_timer_create
, 3)
2314 MIPS_SYS(sys_timer_settime
, 4)
2315 MIPS_SYS(sys_timer_gettime
, 2)
2316 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2317 MIPS_SYS(sys_timer_delete
, 1)
2318 MIPS_SYS(sys_clock_settime
, 2)
2319 MIPS_SYS(sys_clock_gettime
, 2)
2320 MIPS_SYS(sys_clock_getres
, 2)
2321 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2322 MIPS_SYS(sys_tgkill
, 3)
2323 MIPS_SYS(sys_utimes
, 2)
2324 MIPS_SYS(sys_mbind
, 4)
2325 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2326 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2327 MIPS_SYS(sys_mq_open
, 4)
2328 MIPS_SYS(sys_mq_unlink
, 1)
2329 MIPS_SYS(sys_mq_timedsend
, 5)
2330 MIPS_SYS(sys_mq_timedreceive
, 5)
2331 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2332 MIPS_SYS(sys_mq_getsetattr
, 3)
2333 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2334 MIPS_SYS(sys_waitid
, 4)
2335 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2336 MIPS_SYS(sys_add_key
, 5)
2337 MIPS_SYS(sys_request_key
, 4)
2338 MIPS_SYS(sys_keyctl
, 5)
2339 MIPS_SYS(sys_set_thread_area
, 1)
2340 MIPS_SYS(sys_inotify_init
, 0)
2341 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2342 MIPS_SYS(sys_inotify_rm_watch
, 2)
2343 MIPS_SYS(sys_migrate_pages
, 4)
2344 MIPS_SYS(sys_openat
, 4)
2345 MIPS_SYS(sys_mkdirat
, 3)
2346 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2347 MIPS_SYS(sys_fchownat
, 5)
2348 MIPS_SYS(sys_futimesat
, 3)
2349 MIPS_SYS(sys_fstatat64
, 4)
2350 MIPS_SYS(sys_unlinkat
, 3)
2351 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2352 MIPS_SYS(sys_linkat
, 5)
2353 MIPS_SYS(sys_symlinkat
, 3)
2354 MIPS_SYS(sys_readlinkat
, 4)
2355 MIPS_SYS(sys_fchmodat
, 3)
2356 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2357 MIPS_SYS(sys_pselect6
, 6)
2358 MIPS_SYS(sys_ppoll
, 5)
2359 MIPS_SYS(sys_unshare
, 1)
2360 MIPS_SYS(sys_splice
, 6)
2361 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2362 MIPS_SYS(sys_tee
, 4)
2363 MIPS_SYS(sys_vmsplice
, 4)
2364 MIPS_SYS(sys_move_pages
, 6)
2365 MIPS_SYS(sys_set_robust_list
, 2)
2366 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2367 MIPS_SYS(sys_kexec_load
, 4)
2368 MIPS_SYS(sys_getcpu
, 3)
2369 MIPS_SYS(sys_epoll_pwait
, 6)
2370 MIPS_SYS(sys_ioprio_set
, 3)
2371 MIPS_SYS(sys_ioprio_get
, 2)
2372 MIPS_SYS(sys_utimensat
, 4)
2373 MIPS_SYS(sys_signalfd
, 3)
2374 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2375 MIPS_SYS(sys_eventfd
, 1)
2376 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2377 MIPS_SYS(sys_timerfd_create
, 2)
2378 MIPS_SYS(sys_timerfd_gettime
, 2)
2379 MIPS_SYS(sys_timerfd_settime
, 4)
2380 MIPS_SYS(sys_signalfd4
, 4)
2381 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2382 MIPS_SYS(sys_epoll_create1
, 1)
2383 MIPS_SYS(sys_dup3
, 3)
2384 MIPS_SYS(sys_pipe2
, 2)
2385 MIPS_SYS(sys_inotify_init1
, 1)
2386 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2387 MIPS_SYS(sys_pwritev
, 6)
2388 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2389 MIPS_SYS(sys_perf_event_open
, 5)
2390 MIPS_SYS(sys_accept4
, 4)
2391 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2392 MIPS_SYS(sys_fanotify_init
, 2)
2393 MIPS_SYS(sys_fanotify_mark
, 6)
2394 MIPS_SYS(sys_prlimit64
, 4)
2395 MIPS_SYS(sys_name_to_handle_at
, 5)
2396 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2397 MIPS_SYS(sys_clock_adjtime
, 2)
2398 MIPS_SYS(sys_syncfs
, 1)
2403 static int do_store_exclusive(CPUMIPSState
*env
)
2406 target_ulong page_addr
;
2414 page_addr
= addr
& TARGET_PAGE_MASK
;
2417 flags
= page_get_flags(page_addr
);
2418 if ((flags
& PAGE_READ
) == 0) {
2421 reg
= env
->llreg
& 0x1f;
2422 d
= (env
->llreg
& 0x20) != 0;
2424 segv
= get_user_s64(val
, addr
);
2426 segv
= get_user_s32(val
, addr
);
2429 if (val
!= env
->llval
) {
2430 env
->active_tc
.gpr
[reg
] = 0;
2433 segv
= put_user_u64(env
->llnewval
, addr
);
2435 segv
= put_user_u32(env
->llnewval
, addr
);
2438 env
->active_tc
.gpr
[reg
] = 1;
2445 env
->active_tc
.PC
+= 4;
2458 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2466 info
->si_signo
= TARGET_SIGFPE
;
2468 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2469 queue_signal(env
, info
->si_signo
, &*info
);
2473 info
->si_signo
= TARGET_SIGTRAP
;
2475 queue_signal(env
, info
->si_signo
, &*info
);
2483 void cpu_loop(CPUMIPSState
*env
)
2485 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2486 target_siginfo_t info
;
2489 # ifdef TARGET_ABI_MIPSO32
2490 unsigned int syscall_num
;
2495 trapnr
= cpu_mips_exec(env
);
2499 env
->active_tc
.PC
+= 4;
2500 # ifdef TARGET_ABI_MIPSO32
2501 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2502 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2503 ret
= -TARGET_ENOSYS
;
2507 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2509 nb_args
= mips_syscall_args
[syscall_num
];
2510 sp_reg
= env
->active_tc
.gpr
[29];
2512 /* these arguments are taken from the stack */
2514 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2518 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2522 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2526 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2532 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2533 env
->active_tc
.gpr
[4],
2534 env
->active_tc
.gpr
[5],
2535 env
->active_tc
.gpr
[6],
2536 env
->active_tc
.gpr
[7],
2537 arg5
, arg6
, arg7
, arg8
);
2541 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2542 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2543 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2544 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2545 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2547 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2548 /* Returning from a successful sigreturn syscall.
2549 Avoid clobbering register state. */
2552 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2553 env
->active_tc
.gpr
[7] = 1; /* error flag */
2556 env
->active_tc
.gpr
[7] = 0; /* error flag */
2558 env
->active_tc
.gpr
[2] = ret
;
2564 info
.si_signo
= TARGET_SIGSEGV
;
2566 /* XXX: check env->error_code */
2567 info
.si_code
= TARGET_SEGV_MAPERR
;
2568 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2569 queue_signal(env
, info
.si_signo
, &info
);
2573 info
.si_signo
= TARGET_SIGILL
;
2576 queue_signal(env
, info
.si_signo
, &info
);
2578 case EXCP_INTERRUPT
:
2579 /* just indicate that signals should be handled asap */
2585 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2588 info
.si_signo
= sig
;
2590 info
.si_code
= TARGET_TRAP_BRKPT
;
2591 queue_signal(env
, info
.si_signo
, &info
);
2596 if (do_store_exclusive(env
)) {
2597 info
.si_signo
= TARGET_SIGSEGV
;
2599 info
.si_code
= TARGET_SEGV_MAPERR
;
2600 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2601 queue_signal(env
, info
.si_signo
, &info
);
2605 info
.si_signo
= TARGET_SIGILL
;
2607 info
.si_code
= TARGET_ILL_ILLOPC
;
2608 queue_signal(env
, info
.si_signo
, &info
);
2610 /* The code below was inspired by the MIPS Linux kernel trap
2611 * handling code in arch/mips/kernel/traps.c.
2615 abi_ulong trap_instr
;
2618 if (env
->hflags
& MIPS_HFLAG_M16
) {
2619 if (env
->insn_flags
& ASE_MICROMIPS
) {
2620 /* microMIPS mode */
2621 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2626 if ((trap_instr
>> 10) == 0x11) {
2627 /* 16-bit instruction */
2628 code
= trap_instr
& 0xf;
2630 /* 32-bit instruction */
2633 ret
= get_user_u16(instr_lo
,
2634 env
->active_tc
.PC
+ 2);
2638 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2639 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2640 /* Unfortunately, microMIPS also suffers from
2641 the old assembler bug... */
2642 if (code
>= (1 << 10)) {
2648 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2652 code
= (trap_instr
>> 6) & 0x3f;
2655 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2660 /* As described in the original Linux kernel code, the
2661 * below checks on 'code' are to work around an old
2664 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2665 if (code
>= (1 << 10)) {
2670 if (do_break(env
, &info
, code
) != 0) {
2677 abi_ulong trap_instr
;
2678 unsigned int code
= 0;
2680 if (env
->hflags
& MIPS_HFLAG_M16
) {
2681 /* microMIPS mode */
2684 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2685 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2687 trap_instr
= (instr
[0] << 16) | instr
[1];
2689 ret
= get_user_ual(trap_instr
, env
->active_tc
.PC
);
2696 /* The immediate versions don't provide a code. */
2697 if (!(trap_instr
& 0xFC000000)) {
2698 if (env
->hflags
& MIPS_HFLAG_M16
) {
2699 /* microMIPS mode */
2700 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2702 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2706 if (do_break(env
, &info
, code
) != 0) {
2713 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2715 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2718 process_pending_signals(env
);
2723 #ifdef TARGET_OPENRISC
2725 void cpu_loop(CPUOpenRISCState
*env
)
2727 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2731 trapnr
= cpu_exec(env
);
2736 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2740 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2745 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2746 gdbsig
= TARGET_SIGSEGV
;
2749 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2752 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2756 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2760 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2764 qemu_log("\nTLB miss\n");
2767 qemu_log("\nRange\n");
2771 env
->pc
+= 4; /* 0xc00; */
2772 env
->gpr
[11] = do_syscall(env
,
2773 env
->gpr
[11], /* return value */
2774 env
->gpr
[3], /* r3 - r7 are params */
2782 qemu_log("\nFloating point error\n");
2785 qemu_log("\nTrap\n");
2792 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2794 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2795 gdbsig
= TARGET_SIGILL
;
2799 gdb_handlesig(cs
, gdbsig
);
2800 if (gdbsig
!= TARGET_SIGTRAP
) {
2805 process_pending_signals(env
);
2809 #endif /* TARGET_OPENRISC */
2812 void cpu_loop(CPUSH4State
*env
)
2814 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2816 target_siginfo_t info
;
2819 trapnr
= cpu_sh4_exec (env
);
2824 ret
= do_syscall(env
,
2833 env
->gregs
[0] = ret
;
2835 case EXCP_INTERRUPT
:
2836 /* just indicate that signals should be handled asap */
2842 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2845 info
.si_signo
= sig
;
2847 info
.si_code
= TARGET_TRAP_BRKPT
;
2848 queue_signal(env
, info
.si_signo
, &info
);
2854 info
.si_signo
= SIGSEGV
;
2856 info
.si_code
= TARGET_SEGV_MAPERR
;
2857 info
._sifields
._sigfault
._addr
= env
->tea
;
2858 queue_signal(env
, info
.si_signo
, &info
);
2862 printf ("Unhandled trap: 0x%x\n", trapnr
);
2863 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2866 process_pending_signals (env
);
2872 void cpu_loop(CPUCRISState
*env
)
2874 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2876 target_siginfo_t info
;
2879 trapnr
= cpu_cris_exec (env
);
2883 info
.si_signo
= SIGSEGV
;
2885 /* XXX: check env->error_code */
2886 info
.si_code
= TARGET_SEGV_MAPERR
;
2887 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2888 queue_signal(env
, info
.si_signo
, &info
);
2891 case EXCP_INTERRUPT
:
2892 /* just indicate that signals should be handled asap */
2895 ret
= do_syscall(env
,
2904 env
->regs
[10] = ret
;
2910 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2913 info
.si_signo
= sig
;
2915 info
.si_code
= TARGET_TRAP_BRKPT
;
2916 queue_signal(env
, info
.si_signo
, &info
);
2921 printf ("Unhandled trap: 0x%x\n", trapnr
);
2922 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2925 process_pending_signals (env
);
2930 #ifdef TARGET_MICROBLAZE
2931 void cpu_loop(CPUMBState
*env
)
2933 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2935 target_siginfo_t info
;
2938 trapnr
= cpu_mb_exec (env
);
2942 info
.si_signo
= SIGSEGV
;
2944 /* XXX: check env->error_code */
2945 info
.si_code
= TARGET_SEGV_MAPERR
;
2946 info
._sifields
._sigfault
._addr
= 0;
2947 queue_signal(env
, info
.si_signo
, &info
);
2950 case EXCP_INTERRUPT
:
2951 /* just indicate that signals should be handled asap */
2954 /* Return address is 4 bytes after the call. */
2956 env
->sregs
[SR_PC
] = env
->regs
[14];
2957 ret
= do_syscall(env
,
2969 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2970 if (env
->iflags
& D_FLAG
) {
2971 env
->sregs
[SR_ESR
] |= 1 << 12;
2972 env
->sregs
[SR_PC
] -= 4;
2973 /* FIXME: if branch was immed, replay the imm as well. */
2976 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2978 switch (env
->sregs
[SR_ESR
] & 31) {
2979 case ESR_EC_DIVZERO
:
2980 info
.si_signo
= SIGFPE
;
2982 info
.si_code
= TARGET_FPE_FLTDIV
;
2983 info
._sifields
._sigfault
._addr
= 0;
2984 queue_signal(env
, info
.si_signo
, &info
);
2987 info
.si_signo
= SIGFPE
;
2989 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2990 info
.si_code
= TARGET_FPE_FLTINV
;
2992 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2993 info
.si_code
= TARGET_FPE_FLTDIV
;
2995 info
._sifields
._sigfault
._addr
= 0;
2996 queue_signal(env
, info
.si_signo
, &info
);
2999 printf ("Unhandled hw-exception: 0x%x\n",
3000 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
3001 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3010 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3013 info
.si_signo
= sig
;
3015 info
.si_code
= TARGET_TRAP_BRKPT
;
3016 queue_signal(env
, info
.si_signo
, &info
);
3021 printf ("Unhandled trap: 0x%x\n", trapnr
);
3022 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3025 process_pending_signals (env
);
3032 void cpu_loop(CPUM68KState
*env
)
3034 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3037 target_siginfo_t info
;
3038 TaskState
*ts
= cs
->opaque
;
3041 trapnr
= cpu_m68k_exec(env
);
3045 if (ts
->sim_syscalls
) {
3047 nr
= lduw(env
->pc
+ 2);
3049 do_m68k_simcall(env
, nr
);
3055 case EXCP_HALT_INSN
:
3056 /* Semihosing syscall. */
3058 do_m68k_semihosting(env
, env
->dregs
[0]);
3062 case EXCP_UNSUPPORTED
:
3064 info
.si_signo
= SIGILL
;
3066 info
.si_code
= TARGET_ILL_ILLOPN
;
3067 info
._sifields
._sigfault
._addr
= env
->pc
;
3068 queue_signal(env
, info
.si_signo
, &info
);
3072 ts
->sim_syscalls
= 0;
3075 env
->dregs
[0] = do_syscall(env
,
3086 case EXCP_INTERRUPT
:
3087 /* just indicate that signals should be handled asap */
3091 info
.si_signo
= SIGSEGV
;
3093 /* XXX: check env->error_code */
3094 info
.si_code
= TARGET_SEGV_MAPERR
;
3095 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3096 queue_signal(env
, info
.si_signo
, &info
);
3103 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3106 info
.si_signo
= sig
;
3108 info
.si_code
= TARGET_TRAP_BRKPT
;
3109 queue_signal(env
, info
.si_signo
, &info
);
3114 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3116 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3119 process_pending_signals(env
);
3122 #endif /* TARGET_M68K */
3125 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3127 target_ulong addr
, val
, tmp
;
3128 target_siginfo_t info
;
3131 addr
= env
->lock_addr
;
3132 tmp
= env
->lock_st_addr
;
3133 env
->lock_addr
= -1;
3134 env
->lock_st_addr
= 0;
3140 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3144 if (val
== env
->lock_value
) {
3146 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3163 info
.si_signo
= TARGET_SIGSEGV
;
3165 info
.si_code
= TARGET_SEGV_MAPERR
;
3166 info
._sifields
._sigfault
._addr
= addr
;
3167 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3170 void cpu_loop(CPUAlphaState
*env
)
3172 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3174 target_siginfo_t info
;
3178 trapnr
= cpu_alpha_exec (env
);
3180 /* All of the traps imply a transition through PALcode, which
3181 implies an REI instruction has been executed. Which means
3182 that the intr_flag should be cleared. */
3187 fprintf(stderr
, "Reset requested. Exit\n");
3191 fprintf(stderr
, "Machine check exception. Exit\n");
3194 case EXCP_SMP_INTERRUPT
:
3195 case EXCP_CLK_INTERRUPT
:
3196 case EXCP_DEV_INTERRUPT
:
3197 fprintf(stderr
, "External interrupt. Exit\n");
3201 env
->lock_addr
= -1;
3202 info
.si_signo
= TARGET_SIGSEGV
;
3204 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3205 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3206 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3207 queue_signal(env
, info
.si_signo
, &info
);
3210 env
->lock_addr
= -1;
3211 info
.si_signo
= TARGET_SIGBUS
;
3213 info
.si_code
= TARGET_BUS_ADRALN
;
3214 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3215 queue_signal(env
, info
.si_signo
, &info
);
3219 env
->lock_addr
= -1;
3220 info
.si_signo
= TARGET_SIGILL
;
3222 info
.si_code
= TARGET_ILL_ILLOPC
;
3223 info
._sifields
._sigfault
._addr
= env
->pc
;
3224 queue_signal(env
, info
.si_signo
, &info
);
3227 env
->lock_addr
= -1;
3228 info
.si_signo
= TARGET_SIGFPE
;
3230 info
.si_code
= TARGET_FPE_FLTINV
;
3231 info
._sifields
._sigfault
._addr
= env
->pc
;
3232 queue_signal(env
, info
.si_signo
, &info
);
3235 /* No-op. Linux simply re-enables the FPU. */
3238 env
->lock_addr
= -1;
3239 switch (env
->error_code
) {
3242 info
.si_signo
= TARGET_SIGTRAP
;
3244 info
.si_code
= TARGET_TRAP_BRKPT
;
3245 info
._sifields
._sigfault
._addr
= env
->pc
;
3246 queue_signal(env
, info
.si_signo
, &info
);
3250 info
.si_signo
= TARGET_SIGTRAP
;
3253 info
._sifields
._sigfault
._addr
= env
->pc
;
3254 queue_signal(env
, info
.si_signo
, &info
);
3258 trapnr
= env
->ir
[IR_V0
];
3259 sysret
= do_syscall(env
, trapnr
,
3260 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3261 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3262 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3264 if (trapnr
== TARGET_NR_sigreturn
3265 || trapnr
== TARGET_NR_rt_sigreturn
) {
3268 /* Syscall writes 0 to V0 to bypass error check, similar
3269 to how this is handled internal to Linux kernel.
3270 (Ab)use trapnr temporarily as boolean indicating error. */
3271 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3272 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3273 env
->ir
[IR_A3
] = trapnr
;
3277 /* ??? We can probably elide the code using page_unprotect
3278 that is checking for self-modifying code. Instead we
3279 could simply call tb_flush here. Until we work out the
3280 changes required to turn off the extra write protection,
3281 this can be a no-op. */
3285 /* Handled in the translator for usermode. */
3289 /* Handled in the translator for usermode. */
3293 info
.si_signo
= TARGET_SIGFPE
;
3294 switch (env
->ir
[IR_A0
]) {
3295 case TARGET_GEN_INTOVF
:
3296 info
.si_code
= TARGET_FPE_INTOVF
;
3298 case TARGET_GEN_INTDIV
:
3299 info
.si_code
= TARGET_FPE_INTDIV
;
3301 case TARGET_GEN_FLTOVF
:
3302 info
.si_code
= TARGET_FPE_FLTOVF
;
3304 case TARGET_GEN_FLTUND
:
3305 info
.si_code
= TARGET_FPE_FLTUND
;
3307 case TARGET_GEN_FLTINV
:
3308 info
.si_code
= TARGET_FPE_FLTINV
;
3310 case TARGET_GEN_FLTINE
:
3311 info
.si_code
= TARGET_FPE_FLTRES
;
3313 case TARGET_GEN_ROPRAND
:
3317 info
.si_signo
= TARGET_SIGTRAP
;
3322 info
._sifields
._sigfault
._addr
= env
->pc
;
3323 queue_signal(env
, info
.si_signo
, &info
);
3330 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3331 if (info
.si_signo
) {
3332 env
->lock_addr
= -1;
3334 info
.si_code
= TARGET_TRAP_BRKPT
;
3335 queue_signal(env
, info
.si_signo
, &info
);
3340 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3342 case EXCP_INTERRUPT
:
3343 /* Just indicate that signals should be handled asap. */
3346 printf ("Unhandled trap: 0x%x\n", trapnr
);
3347 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3350 process_pending_signals (env
);
3353 #endif /* TARGET_ALPHA */
3356 void cpu_loop(CPUS390XState
*env
)
3358 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3360 target_siginfo_t info
;
3364 trapnr
= cpu_s390x_exec(env
);
3366 case EXCP_INTERRUPT
:
3367 /* Just indicate that signals should be handled asap. */
3371 n
= env
->int_svc_code
;
3373 /* syscalls > 255 */
3376 env
->psw
.addr
+= env
->int_svc_ilen
;
3377 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3378 env
->regs
[4], env
->regs
[5],
3379 env
->regs
[6], env
->regs
[7], 0, 0);
3383 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3385 n
= TARGET_TRAP_BRKPT
;
3390 n
= env
->int_pgm_code
;
3393 case PGM_PRIVILEGED
:
3395 n
= TARGET_ILL_ILLOPC
;
3397 case PGM_PROTECTION
:
3398 case PGM_ADDRESSING
:
3400 /* XXX: check env->error_code */
3401 n
= TARGET_SEGV_MAPERR
;
3402 addr
= env
->__excp_addr
;
3405 case PGM_SPECIFICATION
:
3406 case PGM_SPECIAL_OP
:
3410 n
= TARGET_ILL_ILLOPN
;
3413 case PGM_FIXPT_OVERFLOW
:
3415 n
= TARGET_FPE_INTOVF
;
3417 case PGM_FIXPT_DIVIDE
:
3419 n
= TARGET_FPE_INTDIV
;
3423 n
= (env
->fpc
>> 8) & 0xff;
3425 /* compare-and-trap */
3428 /* An IEEE exception, simulated or otherwise. */
3430 n
= TARGET_FPE_FLTINV
;
3431 } else if (n
& 0x40) {
3432 n
= TARGET_FPE_FLTDIV
;
3433 } else if (n
& 0x20) {
3434 n
= TARGET_FPE_FLTOVF
;
3435 } else if (n
& 0x10) {
3436 n
= TARGET_FPE_FLTUND
;
3437 } else if (n
& 0x08) {
3438 n
= TARGET_FPE_FLTRES
;
3440 /* ??? Quantum exception; BFP, DFP error. */
3448 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3449 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3455 addr
= env
->psw
.addr
;
3457 info
.si_signo
= sig
;
3460 info
._sifields
._sigfault
._addr
= addr
;
3461 queue_signal(env
, info
.si_signo
, &info
);
3465 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3466 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3469 process_pending_signals (env
);
3473 #endif /* TARGET_S390X */
3475 THREAD CPUState
*thread_cpu
;
3477 void task_settid(TaskState
*ts
)
3479 if (ts
->ts_tid
== 0) {
3480 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3484 void stop_all_tasks(void)
3487 * We trust that when using NPTL, start_exclusive()
3488 * handles thread stopping correctly.
3493 /* Assumes contents are already zeroed. */
3494 void init_task_state(TaskState
*ts
)
3499 ts
->first_free
= ts
->sigqueue_table
;
3500 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3501 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3503 ts
->sigqueue_table
[i
].next
= NULL
;
3506 CPUArchState
*cpu_copy(CPUArchState
*env
)
3508 CPUState
*cpu
= ENV_GET_CPU(env
);
3509 CPUArchState
*new_env
= cpu_init(cpu_model
);
3510 CPUState
*new_cpu
= ENV_GET_CPU(new_env
);
3511 #if defined(TARGET_HAS_ICE)
3516 /* Reset non arch specific state */
3519 memcpy(new_env
, env
, sizeof(CPUArchState
));
3521 /* Clone all break/watchpoints.
3522 Note: Once we support ptrace with hw-debug register access, make sure
3523 BP_CPU break/watchpoints are handled correctly on clone. */
3524 QTAILQ_INIT(&cpu
->breakpoints
);
3525 QTAILQ_INIT(&cpu
->watchpoints
);
3526 #if defined(TARGET_HAS_ICE)
3527 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3528 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3530 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3531 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3538 static void handle_arg_help(const char *arg
)
3543 static void handle_arg_log(const char *arg
)
3547 mask
= qemu_str_to_log_mask(arg
);
3549 qemu_print_log_usage(stdout
);
3555 static void handle_arg_log_filename(const char *arg
)
3557 qemu_set_log_filename(arg
);
3560 static void handle_arg_set_env(const char *arg
)
3562 char *r
, *p
, *token
;
3563 r
= p
= strdup(arg
);
3564 while ((token
= strsep(&p
, ",")) != NULL
) {
3565 if (envlist_setenv(envlist
, token
) != 0) {
3572 static void handle_arg_unset_env(const char *arg
)
3574 char *r
, *p
, *token
;
3575 r
= p
= strdup(arg
);
3576 while ((token
= strsep(&p
, ",")) != NULL
) {
3577 if (envlist_unsetenv(envlist
, token
) != 0) {
3584 static void handle_arg_argv0(const char *arg
)
3586 argv0
= strdup(arg
);
3589 static void handle_arg_stack_size(const char *arg
)
3592 guest_stack_size
= strtoul(arg
, &p
, 0);
3593 if (guest_stack_size
== 0) {
3598 guest_stack_size
*= 1024 * 1024;
3599 } else if (*p
== 'k' || *p
== 'K') {
3600 guest_stack_size
*= 1024;
3604 static void handle_arg_ld_prefix(const char *arg
)
3606 interp_prefix
= strdup(arg
);
3609 static void handle_arg_pagesize(const char *arg
)
3611 qemu_host_page_size
= atoi(arg
);
3612 if (qemu_host_page_size
== 0 ||
3613 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3614 fprintf(stderr
, "page size must be a power of two\n");
3619 static void handle_arg_randseed(const char *arg
)
3621 unsigned long long seed
;
3623 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3624 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3630 static void handle_arg_gdb(const char *arg
)
3632 gdbstub_port
= atoi(arg
);
3635 static void handle_arg_uname(const char *arg
)
3637 qemu_uname_release
= strdup(arg
);
3640 static void handle_arg_cpu(const char *arg
)
3642 cpu_model
= strdup(arg
);
3643 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3644 /* XXX: implement xxx_cpu_list for targets that still miss it */
3645 #if defined(cpu_list_id)
3646 cpu_list_id(stdout
, &fprintf
, "");
3647 #elif defined(cpu_list)
3648 cpu_list(stdout
, &fprintf
); /* deprecated */
3650 /* TODO: add cpu selection for alpha, microblaze, unicore32, s390x. */
3651 printf("Target ignores cpu selection\n");
3657 #if defined(CONFIG_USE_GUEST_BASE)
3658 static void handle_arg_guest_base(const char *arg
)
3660 guest_base
= strtol(arg
, NULL
, 0);
3661 have_guest_base
= 1;
3664 static void handle_arg_reserved_va(const char *arg
)
3668 reserved_va
= strtoul(arg
, &p
, 0);
3682 unsigned long unshifted
= reserved_va
;
3684 reserved_va
<<= shift
;
3685 if (((reserved_va
>> shift
) != unshifted
)
3686 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3687 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3690 fprintf(stderr
, "Reserved virtual address too big\n");
3695 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3701 static void handle_arg_singlestep(const char *arg
)
3706 static void handle_arg_strace(const char *arg
)
3711 static void handle_arg_version(const char *arg
)
3713 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3714 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3718 struct qemu_argument
{
3722 void (*handle_opt
)(const char *arg
);
3723 const char *example
;
3727 static const struct qemu_argument arg_table
[] = {
3728 {"h", "", false, handle_arg_help
,
3729 "", "print this help"},
3730 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3731 "port", "wait gdb connection to 'port'"},
3732 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3733 "path", "set the elf interpreter prefix to 'path'"},
3734 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3735 "size", "set the stack size to 'size' bytes"},
3736 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3737 "model", "select CPU (-cpu help for list)"},
3738 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3739 "var=value", "sets targets environment variable (see below)"},
3740 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3741 "var", "unsets targets environment variable (see below)"},
3742 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3743 "argv0", "forces target process argv[0] to be 'argv0'"},
3744 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3745 "uname", "set qemu uname release string to 'uname'"},
3746 #if defined(CONFIG_USE_GUEST_BASE)
3747 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3748 "address", "set guest_base address to 'address'"},
3749 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3750 "size", "reserve 'size' bytes for guest virtual address space"},
3752 {"d", "QEMU_LOG", true, handle_arg_log
,
3753 "item[,...]", "enable logging of specified items "
3754 "(use '-d help' for a list of items)"},
3755 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3756 "logfile", "write logs to 'logfile' (default stderr)"},
3757 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3758 "pagesize", "set the host page size to 'pagesize'"},
3759 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3760 "", "run in singlestep mode"},
3761 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3762 "", "log system calls"},
3763 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3764 "", "Seed for pseudo-random number generator"},
3765 {"version", "QEMU_VERSION", false, handle_arg_version
,
3766 "", "display version information and exit"},
3767 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3770 static void QEMU_NORETURN
usage(void)
3772 const struct qemu_argument
*arginfo
;
3776 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3777 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3779 "Options and associated environment variables:\n"
3782 /* Calculate column widths. We must always have at least enough space
3783 * for the column header.
3785 maxarglen
= strlen("Argument");
3786 maxenvlen
= strlen("Env-variable");
3788 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3789 int arglen
= strlen(arginfo
->argv
);
3790 if (arginfo
->has_arg
) {
3791 arglen
+= strlen(arginfo
->example
) + 1;
3793 if (strlen(arginfo
->env
) > maxenvlen
) {
3794 maxenvlen
= strlen(arginfo
->env
);
3796 if (arglen
> maxarglen
) {
3801 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3802 maxenvlen
, "Env-variable");
3804 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3805 if (arginfo
->has_arg
) {
3806 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3807 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3808 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3810 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3811 maxenvlen
, arginfo
->env
,
3818 "QEMU_LD_PREFIX = %s\n"
3819 "QEMU_STACK_SIZE = %ld byte\n",
3824 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3825 "QEMU_UNSET_ENV environment variables to set and unset\n"
3826 "environment variables for the target process.\n"
3827 "It is possible to provide several variables by separating them\n"
3828 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3829 "provide the -E and -U options multiple times.\n"
3830 "The following lines are equivalent:\n"
3831 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3832 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3833 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3834 "Note that if you provide several changes to a single variable\n"
3835 "the last change will stay in effect.\n");
3840 static int parse_args(int argc
, char **argv
)
3844 const struct qemu_argument
*arginfo
;
3846 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3847 if (arginfo
->env
== NULL
) {
3851 r
= getenv(arginfo
->env
);
3853 arginfo
->handle_opt(r
);
3859 if (optind
>= argc
) {
3868 if (!strcmp(r
, "-")) {
3872 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3873 if (!strcmp(r
, arginfo
->argv
)) {
3874 if (arginfo
->has_arg
) {
3875 if (optind
>= argc
) {
3878 arginfo
->handle_opt(argv
[optind
]);
3881 arginfo
->handle_opt(NULL
);
3887 /* no option matched the current argv */
3888 if (arginfo
->handle_opt
== NULL
) {
3893 if (optind
>= argc
) {
3897 filename
= argv
[optind
];
3898 exec_path
= argv
[optind
];
3903 int main(int argc
, char **argv
)
3905 struct target_pt_regs regs1
, *regs
= ®s1
;
3906 struct image_info info1
, *info
= &info1
;
3907 struct linux_binprm bprm
;
3912 char **target_environ
, **wrk
;
3919 module_call_init(MODULE_INIT_QOM
);
3921 if ((envlist
= envlist_create()) == NULL
) {
3922 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3926 /* add current environment into the list */
3927 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3928 (void) envlist_setenv(envlist
, *wrk
);
3931 /* Read the stack limit from the kernel. If it's "unlimited",
3932 then we can do little else besides use the default. */
3935 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3936 && lim
.rlim_cur
!= RLIM_INFINITY
3937 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3938 guest_stack_size
= lim
.rlim_cur
;
3943 #if defined(cpudef_setup)
3944 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3949 optind
= parse_args(argc
, argv
);
3952 memset(regs
, 0, sizeof(struct target_pt_regs
));
3954 /* Zero out image_info */
3955 memset(info
, 0, sizeof(struct image_info
));
3957 memset(&bprm
, 0, sizeof (bprm
));
3959 /* Scan interp_prefix dir for replacement files. */
3960 init_paths(interp_prefix
);
3962 init_qemu_uname_release();
3964 if (cpu_model
== NULL
) {
3965 #if defined(TARGET_I386)
3966 #ifdef TARGET_X86_64
3967 cpu_model
= "qemu64";
3969 cpu_model
= "qemu32";
3971 #elif defined(TARGET_ARM)
3973 #elif defined(TARGET_UNICORE32)
3975 #elif defined(TARGET_M68K)
3977 #elif defined(TARGET_SPARC)
3978 #ifdef TARGET_SPARC64
3979 cpu_model
= "TI UltraSparc II";
3981 cpu_model
= "Fujitsu MB86904";
3983 #elif defined(TARGET_MIPS)
3984 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3989 #elif defined TARGET_OPENRISC
3990 cpu_model
= "or1200";
3991 #elif defined(TARGET_PPC)
3992 # ifdef TARGET_PPC64
3993 cpu_model
= "POWER7";
4002 cpu_exec_init_all();
4003 /* NOTE: we need to init the CPU at this stage to get
4004 qemu_host_page_size */
4005 env
= cpu_init(cpu_model
);
4007 fprintf(stderr
, "Unable to find CPU definition\n");
4010 cpu
= ENV_GET_CPU(env
);
4015 if (getenv("QEMU_STRACE")) {
4019 if (getenv("QEMU_RAND_SEED")) {
4020 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4023 target_environ
= envlist_to_environ(envlist
, NULL
);
4024 envlist_free(envlist
);
4026 #if defined(CONFIG_USE_GUEST_BASE)
4028 * Now that page sizes are configured in cpu_init() we can do
4029 * proper page alignment for guest_base.
4031 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4033 if (reserved_va
|| have_guest_base
) {
4034 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4036 if (guest_base
== (unsigned long)-1) {
4037 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4038 "space for use as guest address space (check your virtual "
4039 "memory ulimit setting or reserve less using -R option)\n",
4045 mmap_next_start
= reserved_va
;
4048 #endif /* CONFIG_USE_GUEST_BASE */
4051 * Read in mmap_min_addr kernel parameter. This value is used
4052 * When loading the ELF image to determine whether guest_base
4053 * is needed. It is also used in mmap_find_vma.
4058 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4060 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4061 mmap_min_addr
= tmp
;
4062 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4069 * Prepare copy of argv vector for target.
4071 target_argc
= argc
- optind
;
4072 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4073 if (target_argv
== NULL
) {
4074 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4079 * If argv0 is specified (using '-0' switch) we replace
4080 * argv[0] pointer with the given one.
4083 if (argv0
!= NULL
) {
4084 target_argv
[i
++] = strdup(argv0
);
4086 for (; i
< target_argc
; i
++) {
4087 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4089 target_argv
[target_argc
] = NULL
;
4091 ts
= g_malloc0 (sizeof(TaskState
));
4092 init_task_state(ts
);
4093 /* build Task State */
4099 execfd
= qemu_getauxval(AT_EXECFD
);
4101 execfd
= open(filename
, O_RDONLY
);
4103 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4108 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4111 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4115 for (wrk
= target_environ
; *wrk
; wrk
++) {
4119 free(target_environ
);
4121 if (qemu_log_enabled()) {
4122 #if defined(CONFIG_USE_GUEST_BASE)
4123 qemu_log("guest_base 0x%" PRIxPTR
"\n", guest_base
);
4127 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4128 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4129 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4131 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4133 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4134 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4136 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4137 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4140 target_set_brk(info
->brk
);
4144 #if defined(CONFIG_USE_GUEST_BASE)
4145 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4146 generating the prologue until now so that the prologue can take
4147 the real value of GUEST_BASE into account. */
4148 tcg_prologue_init(&tcg_ctx
);
4151 #if defined(TARGET_I386)
4152 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4153 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4154 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4155 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4156 env
->hflags
|= HF_OSFXSR_MASK
;
4158 #ifndef TARGET_ABI32
4159 /* enable 64 bit mode if possible */
4160 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4161 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4164 env
->cr
[4] |= CR4_PAE_MASK
;
4165 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4166 env
->hflags
|= HF_LMA_MASK
;
4169 /* flags setup : we activate the IRQs by default as in user mode */
4170 env
->eflags
|= IF_MASK
;
4172 /* linux register setup */
4173 #ifndef TARGET_ABI32
4174 env
->regs
[R_EAX
] = regs
->rax
;
4175 env
->regs
[R_EBX
] = regs
->rbx
;
4176 env
->regs
[R_ECX
] = regs
->rcx
;
4177 env
->regs
[R_EDX
] = regs
->rdx
;
4178 env
->regs
[R_ESI
] = regs
->rsi
;
4179 env
->regs
[R_EDI
] = regs
->rdi
;
4180 env
->regs
[R_EBP
] = regs
->rbp
;
4181 env
->regs
[R_ESP
] = regs
->rsp
;
4182 env
->eip
= regs
->rip
;
4184 env
->regs
[R_EAX
] = regs
->eax
;
4185 env
->regs
[R_EBX
] = regs
->ebx
;
4186 env
->regs
[R_ECX
] = regs
->ecx
;
4187 env
->regs
[R_EDX
] = regs
->edx
;
4188 env
->regs
[R_ESI
] = regs
->esi
;
4189 env
->regs
[R_EDI
] = regs
->edi
;
4190 env
->regs
[R_EBP
] = regs
->ebp
;
4191 env
->regs
[R_ESP
] = regs
->esp
;
4192 env
->eip
= regs
->eip
;
4195 /* linux interrupt setup */
4196 #ifndef TARGET_ABI32
4197 env
->idt
.limit
= 511;
4199 env
->idt
.limit
= 255;
4201 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4202 PROT_READ
|PROT_WRITE
,
4203 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4204 idt_table
= g2h(env
->idt
.base
);
4227 /* linux segment setup */
4229 uint64_t *gdt_table
;
4230 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4231 PROT_READ
|PROT_WRITE
,
4232 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4233 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4234 gdt_table
= g2h(env
->gdt
.base
);
4236 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4237 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4238 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4240 /* 64 bit code segment */
4241 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4242 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4244 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4246 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4247 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4248 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4250 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4251 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4253 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4254 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4255 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4256 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4257 /* This hack makes Wine work... */
4258 env
->segs
[R_FS
].selector
= 0;
4260 cpu_x86_load_seg(env
, R_DS
, 0);
4261 cpu_x86_load_seg(env
, R_ES
, 0);
4262 cpu_x86_load_seg(env
, R_FS
, 0);
4263 cpu_x86_load_seg(env
, R_GS
, 0);
4265 #elif defined(TARGET_AARCH64)
4269 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4271 "The selected ARM CPU does not support 64 bit mode\n");
4275 for (i
= 0; i
< 31; i
++) {
4276 env
->xregs
[i
] = regs
->regs
[i
];
4279 env
->xregs
[31] = regs
->sp
;
4281 #elif defined(TARGET_ARM)
4284 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4285 for(i
= 0; i
< 16; i
++) {
4286 env
->regs
[i
] = regs
->uregs
[i
];
4288 #ifdef TARGET_WORDS_BIGENDIAN
4290 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4291 && (info
->elf_flags
& EF_ARM_BE8
)) {
4292 env
->uncached_cpsr
|= CPSR_E
;
4293 env
->signal_cpsr_e
= CPSR_E
;
4295 if (arm_feature(env
, ARM_FEATURE_V7
)) {
4296 fprintf(stderr
, "BE32 binaries only supported until ARMv6\n");
4299 env
->cp15
.c1_sys
|= SCTLR_B
;
4303 #elif defined(TARGET_UNICORE32)
4306 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4307 for (i
= 0; i
< 32; i
++) {
4308 env
->regs
[i
] = regs
->uregs
[i
];
4311 #elif defined(TARGET_SPARC)
4315 env
->npc
= regs
->npc
;
4317 for(i
= 0; i
< 8; i
++)
4318 env
->gregs
[i
] = regs
->u_regs
[i
];
4319 for(i
= 0; i
< 8; i
++)
4320 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4322 #elif defined(TARGET_PPC)
4326 #if defined(TARGET_PPC64)
4327 #if defined(TARGET_ABI32)
4328 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4330 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4333 env
->nip
= regs
->nip
;
4334 for(i
= 0; i
< 32; i
++) {
4335 env
->gpr
[i
] = regs
->gpr
[i
];
4338 #elif defined(TARGET_M68K)
4341 env
->dregs
[0] = regs
->d0
;
4342 env
->dregs
[1] = regs
->d1
;
4343 env
->dregs
[2] = regs
->d2
;
4344 env
->dregs
[3] = regs
->d3
;
4345 env
->dregs
[4] = regs
->d4
;
4346 env
->dregs
[5] = regs
->d5
;
4347 env
->dregs
[6] = regs
->d6
;
4348 env
->dregs
[7] = regs
->d7
;
4349 env
->aregs
[0] = regs
->a0
;
4350 env
->aregs
[1] = regs
->a1
;
4351 env
->aregs
[2] = regs
->a2
;
4352 env
->aregs
[3] = regs
->a3
;
4353 env
->aregs
[4] = regs
->a4
;
4354 env
->aregs
[5] = regs
->a5
;
4355 env
->aregs
[6] = regs
->a6
;
4356 env
->aregs
[7] = regs
->usp
;
4358 ts
->sim_syscalls
= 1;
4360 #elif defined(TARGET_MICROBLAZE)
4362 env
->regs
[0] = regs
->r0
;
4363 env
->regs
[1] = regs
->r1
;
4364 env
->regs
[2] = regs
->r2
;
4365 env
->regs
[3] = regs
->r3
;
4366 env
->regs
[4] = regs
->r4
;
4367 env
->regs
[5] = regs
->r5
;
4368 env
->regs
[6] = regs
->r6
;
4369 env
->regs
[7] = regs
->r7
;
4370 env
->regs
[8] = regs
->r8
;
4371 env
->regs
[9] = regs
->r9
;
4372 env
->regs
[10] = regs
->r10
;
4373 env
->regs
[11] = regs
->r11
;
4374 env
->regs
[12] = regs
->r12
;
4375 env
->regs
[13] = regs
->r13
;
4376 env
->regs
[14] = regs
->r14
;
4377 env
->regs
[15] = regs
->r15
;
4378 env
->regs
[16] = regs
->r16
;
4379 env
->regs
[17] = regs
->r17
;
4380 env
->regs
[18] = regs
->r18
;
4381 env
->regs
[19] = regs
->r19
;
4382 env
->regs
[20] = regs
->r20
;
4383 env
->regs
[21] = regs
->r21
;
4384 env
->regs
[22] = regs
->r22
;
4385 env
->regs
[23] = regs
->r23
;
4386 env
->regs
[24] = regs
->r24
;
4387 env
->regs
[25] = regs
->r25
;
4388 env
->regs
[26] = regs
->r26
;
4389 env
->regs
[27] = regs
->r27
;
4390 env
->regs
[28] = regs
->r28
;
4391 env
->regs
[29] = regs
->r29
;
4392 env
->regs
[30] = regs
->r30
;
4393 env
->regs
[31] = regs
->r31
;
4394 env
->sregs
[SR_PC
] = regs
->pc
;
4396 #elif defined(TARGET_MIPS)
4400 for(i
= 0; i
< 32; i
++) {
4401 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4403 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4404 if (regs
->cp0_epc
& 1) {
4405 env
->hflags
|= MIPS_HFLAG_M16
;
4408 #elif defined(TARGET_OPENRISC)
4412 for (i
= 0; i
< 32; i
++) {
4413 env
->gpr
[i
] = regs
->gpr
[i
];
4419 #elif defined(TARGET_SH4)
4423 for(i
= 0; i
< 16; i
++) {
4424 env
->gregs
[i
] = regs
->regs
[i
];
4428 #elif defined(TARGET_ALPHA)
4432 for(i
= 0; i
< 28; i
++) {
4433 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4435 env
->ir
[IR_SP
] = regs
->usp
;
4438 #elif defined(TARGET_CRIS)
4440 env
->regs
[0] = regs
->r0
;
4441 env
->regs
[1] = regs
->r1
;
4442 env
->regs
[2] = regs
->r2
;
4443 env
->regs
[3] = regs
->r3
;
4444 env
->regs
[4] = regs
->r4
;
4445 env
->regs
[5] = regs
->r5
;
4446 env
->regs
[6] = regs
->r6
;
4447 env
->regs
[7] = regs
->r7
;
4448 env
->regs
[8] = regs
->r8
;
4449 env
->regs
[9] = regs
->r9
;
4450 env
->regs
[10] = regs
->r10
;
4451 env
->regs
[11] = regs
->r11
;
4452 env
->regs
[12] = regs
->r12
;
4453 env
->regs
[13] = regs
->r13
;
4454 env
->regs
[14] = info
->start_stack
;
4455 env
->regs
[15] = regs
->acr
;
4456 env
->pc
= regs
->erp
;
4458 #elif defined(TARGET_S390X)
4461 for (i
= 0; i
< 16; i
++) {
4462 env
->regs
[i
] = regs
->gprs
[i
];
4464 env
->psw
.mask
= regs
->psw
.mask
;
4465 env
->psw
.addr
= regs
->psw
.addr
;
4468 #error unsupported target CPU
4471 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4472 ts
->stack_base
= info
->start_stack
;
4473 ts
->heap_base
= info
->brk
;
4474 /* This will be filled in on the first SYS_HEAPINFO call. */
4479 if (gdbserver_start(gdbstub_port
) < 0) {
4480 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4484 gdb_handlesig(cpu
, 0);