4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
31 #include "cache-utils.h"
34 #include "qemu-timer.h"
37 #define DEBUG_LOGFILE "/tmp/qemu.log"
46 const char *cpu_model
;
47 unsigned long mmap_min_addr
;
48 #if defined(CONFIG_USE_GUEST_BASE)
49 unsigned long guest_base
;
51 unsigned long reserved_va
;
54 static void usage(void);
56 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
57 const char *qemu_uname_release
= CONFIG_UNAME_RELEASE
;
59 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
60 we allocate a bigger stack. Need a better solution, for example
61 by remapping the process stack directly at the right place */
62 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
64 void gemu_log(const char *fmt
, ...)
69 vfprintf(stderr
, fmt
, ap
);
73 #if defined(TARGET_I386)
74 int cpu_get_pic_interrupt(CPUX86State
*env
)
80 /* timers for rdtsc */
84 static uint64_t emu_time
;
86 int64_t cpu_get_real_ticks(void)
93 #if defined(CONFIG_USE_NPTL)
94 /***********************************************************/
95 /* Helper routines for implementing atomic operations. */
97 /* To implement exclusive operations we force all cpus to syncronise.
98 We don't require a full sync, only that no cpus are executing guest code.
99 The alternative is to map target atomic ops onto host equivalents,
100 which requires quite a lot of per host/target work. */
101 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
102 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
103 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
104 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
105 static int pending_cpus
;
107 /* Make sure everything is in a consistent state for calling fork(). */
108 void fork_start(void)
110 pthread_mutex_lock(&tb_lock
);
111 pthread_mutex_lock(&exclusive_lock
);
115 void fork_end(int child
)
117 mmap_fork_end(child
);
119 /* Child processes created by fork() only have a single thread.
120 Discard information about the parent threads. */
121 first_cpu
= thread_env
;
122 thread_env
->next_cpu
= NULL
;
124 pthread_mutex_init(&exclusive_lock
, NULL
);
125 pthread_mutex_init(&cpu_list_mutex
, NULL
);
126 pthread_cond_init(&exclusive_cond
, NULL
);
127 pthread_cond_init(&exclusive_resume
, NULL
);
128 pthread_mutex_init(&tb_lock
, NULL
);
129 gdbserver_fork(thread_env
);
131 pthread_mutex_unlock(&exclusive_lock
);
132 pthread_mutex_unlock(&tb_lock
);
136 /* Wait for pending exclusive operations to complete. The exclusive lock
138 static inline void exclusive_idle(void)
140 while (pending_cpus
) {
141 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
145 /* Start an exclusive operation.
146 Must only be called from outside cpu_arm_exec. */
147 static inline void start_exclusive(void)
150 pthread_mutex_lock(&exclusive_lock
);
154 /* Make all other cpus stop executing. */
155 for (other
= first_cpu
; other
; other
= other
->next_cpu
) {
156 if (other
->running
) {
161 if (pending_cpus
> 1) {
162 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
166 /* Finish an exclusive operation. */
167 static inline void end_exclusive(void)
170 pthread_cond_broadcast(&exclusive_resume
);
171 pthread_mutex_unlock(&exclusive_lock
);
174 /* Wait for exclusive ops to finish, and begin cpu execution. */
175 static inline void cpu_exec_start(CPUArchState
*env
)
177 pthread_mutex_lock(&exclusive_lock
);
180 pthread_mutex_unlock(&exclusive_lock
);
183 /* Mark cpu as not executing, and release pending exclusive ops. */
184 static inline void cpu_exec_end(CPUArchState
*env
)
186 pthread_mutex_lock(&exclusive_lock
);
188 if (pending_cpus
> 1) {
190 if (pending_cpus
== 1) {
191 pthread_cond_signal(&exclusive_cond
);
195 pthread_mutex_unlock(&exclusive_lock
);
198 void cpu_list_lock(void)
200 pthread_mutex_lock(&cpu_list_mutex
);
203 void cpu_list_unlock(void)
205 pthread_mutex_unlock(&cpu_list_mutex
);
207 #else /* if !CONFIG_USE_NPTL */
208 /* These are no-ops because we are not threadsafe. */
209 static inline void cpu_exec_start(CPUArchState
*env
)
213 static inline void cpu_exec_end(CPUArchState
*env
)
217 static inline void start_exclusive(void)
221 static inline void end_exclusive(void)
225 void fork_start(void)
229 void fork_end(int child
)
232 gdbserver_fork(thread_env
);
236 void cpu_list_lock(void)
240 void cpu_list_unlock(void)
247 /***********************************************************/
248 /* CPUX86 core interface */
250 void cpu_smm_update(CPUX86State
*env
)
254 uint64_t cpu_get_tsc(CPUX86State
*env
)
256 return cpu_get_real_ticks();
259 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
264 e1
= (addr
<< 16) | (limit
& 0xffff);
265 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
272 static uint64_t *idt_table
;
274 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
275 uint64_t addr
, unsigned int sel
)
278 e1
= (addr
& 0xffff) | (sel
<< 16);
279 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
283 p
[2] = tswap32(addr
>> 32);
286 /* only dpl matters as we do only user space emulation */
287 static void set_idt(int n
, unsigned int dpl
)
289 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
292 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
293 uint32_t addr
, unsigned int sel
)
296 e1
= (addr
& 0xffff) | (sel
<< 16);
297 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
303 /* only dpl matters as we do only user space emulation */
304 static void set_idt(int n
, unsigned int dpl
)
306 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
310 void cpu_loop(CPUX86State
*env
)
314 target_siginfo_t info
;
317 trapnr
= cpu_x86_exec(env
);
320 /* linux syscall from int $0x80 */
321 env
->regs
[R_EAX
] = do_syscall(env
,
333 /* linux syscall from syscall instruction */
334 env
->regs
[R_EAX
] = do_syscall(env
,
343 env
->eip
= env
->exception_next_eip
;
348 info
.si_signo
= SIGBUS
;
350 info
.si_code
= TARGET_SI_KERNEL
;
351 info
._sifields
._sigfault
._addr
= 0;
352 queue_signal(env
, info
.si_signo
, &info
);
355 /* XXX: potential problem if ABI32 */
356 #ifndef TARGET_X86_64
357 if (env
->eflags
& VM_MASK
) {
358 handle_vm86_fault(env
);
362 info
.si_signo
= SIGSEGV
;
364 info
.si_code
= TARGET_SI_KERNEL
;
365 info
._sifields
._sigfault
._addr
= 0;
366 queue_signal(env
, info
.si_signo
, &info
);
370 info
.si_signo
= SIGSEGV
;
372 if (!(env
->error_code
& 1))
373 info
.si_code
= TARGET_SEGV_MAPERR
;
375 info
.si_code
= TARGET_SEGV_ACCERR
;
376 info
._sifields
._sigfault
._addr
= env
->cr
[2];
377 queue_signal(env
, info
.si_signo
, &info
);
380 #ifndef TARGET_X86_64
381 if (env
->eflags
& VM_MASK
) {
382 handle_vm86_trap(env
, trapnr
);
386 /* division by zero */
387 info
.si_signo
= SIGFPE
;
389 info
.si_code
= TARGET_FPE_INTDIV
;
390 info
._sifields
._sigfault
._addr
= env
->eip
;
391 queue_signal(env
, info
.si_signo
, &info
);
396 #ifndef TARGET_X86_64
397 if (env
->eflags
& VM_MASK
) {
398 handle_vm86_trap(env
, trapnr
);
402 info
.si_signo
= SIGTRAP
;
404 if (trapnr
== EXCP01_DB
) {
405 info
.si_code
= TARGET_TRAP_BRKPT
;
406 info
._sifields
._sigfault
._addr
= env
->eip
;
408 info
.si_code
= TARGET_SI_KERNEL
;
409 info
._sifields
._sigfault
._addr
= 0;
411 queue_signal(env
, info
.si_signo
, &info
);
416 #ifndef TARGET_X86_64
417 if (env
->eflags
& VM_MASK
) {
418 handle_vm86_trap(env
, trapnr
);
422 info
.si_signo
= SIGSEGV
;
424 info
.si_code
= TARGET_SI_KERNEL
;
425 info
._sifields
._sigfault
._addr
= 0;
426 queue_signal(env
, info
.si_signo
, &info
);
430 info
.si_signo
= SIGILL
;
432 info
.si_code
= TARGET_ILL_ILLOPN
;
433 info
._sifields
._sigfault
._addr
= env
->eip
;
434 queue_signal(env
, info
.si_signo
, &info
);
437 /* just indicate that signals should be handled asap */
443 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
448 info
.si_code
= TARGET_TRAP_BRKPT
;
449 queue_signal(env
, info
.si_signo
, &info
);
454 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
455 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
459 process_pending_signals(env
);
467 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
469 * r0 = pointer to oldval
470 * r1 = pointer to newval
471 * r2 = pointer to target value
474 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
475 * C set if *ptr was changed, clear if no exchange happened
477 * Note segv's in kernel helpers are a bit tricky, we can set the
478 * data address sensibly but the PC address is just the entry point.
480 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
482 uint64_t oldval
, newval
, val
;
484 target_siginfo_t info
;
486 /* Based on the 32 bit code in do_kernel_trap */
488 /* XXX: This only works between threads, not between processes.
489 It's probably possible to implement this with native host
490 operations. However things like ldrex/strex are much harder so
491 there's not much point trying. */
493 cpsr
= cpsr_read(env
);
496 if (get_user_u64(oldval
, env
->regs
[0])) {
497 env
->cp15
.c6_data
= env
->regs
[0];
501 if (get_user_u64(newval
, env
->regs
[1])) {
502 env
->cp15
.c6_data
= env
->regs
[1];
506 if (get_user_u64(val
, addr
)) {
507 env
->cp15
.c6_data
= addr
;
514 if (put_user_u64(val
, addr
)) {
515 env
->cp15
.c6_data
= addr
;
525 cpsr_write(env
, cpsr
, CPSR_C
);
531 /* We get the PC of the entry address - which is as good as anything,
532 on a real kernel what you get depends on which mode it uses. */
533 info
.si_signo
= SIGSEGV
;
535 /* XXX: check env->error_code */
536 info
.si_code
= TARGET_SEGV_MAPERR
;
537 info
._sifields
._sigfault
._addr
= env
->cp15
.c6_data
;
538 queue_signal(env
, info
.si_signo
, &info
);
543 /* Handle a jump to the kernel code page. */
545 do_kernel_trap(CPUARMState
*env
)
551 switch (env
->regs
[15]) {
552 case 0xffff0fa0: /* __kernel_memory_barrier */
553 /* ??? No-op. Will need to do better for SMP. */
555 case 0xffff0fc0: /* __kernel_cmpxchg */
556 /* XXX: This only works between threads, not between processes.
557 It's probably possible to implement this with native host
558 operations. However things like ldrex/strex are much harder so
559 there's not much point trying. */
561 cpsr
= cpsr_read(env
);
563 /* FIXME: This should SEGV if the access fails. */
564 if (get_user_u32(val
, addr
))
566 if (val
== env
->regs
[0]) {
568 /* FIXME: Check for segfaults. */
569 put_user_u32(val
, addr
);
576 cpsr_write(env
, cpsr
, CPSR_C
);
579 case 0xffff0fe0: /* __kernel_get_tls */
580 env
->regs
[0] = env
->cp15
.c13_tls2
;
582 case 0xffff0f60: /* __kernel_cmpxchg64 */
583 arm_kernel_cmpxchg64_helper(env
);
589 /* Jump back to the caller. */
590 addr
= env
->regs
[14];
595 env
->regs
[15] = addr
;
600 static int do_strex(CPUARMState
*env
)
608 addr
= env
->exclusive_addr
;
609 if (addr
!= env
->exclusive_test
) {
612 size
= env
->exclusive_info
& 0xf;
615 segv
= get_user_u8(val
, addr
);
618 segv
= get_user_u16(val
, addr
);
622 segv
= get_user_u32(val
, addr
);
628 env
->cp15
.c6_data
= addr
;
631 if (val
!= env
->exclusive_val
) {
635 segv
= get_user_u32(val
, addr
+ 4);
637 env
->cp15
.c6_data
= addr
+ 4;
640 if (val
!= env
->exclusive_high
) {
644 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
647 segv
= put_user_u8(val
, addr
);
650 segv
= put_user_u16(val
, addr
);
654 segv
= put_user_u32(val
, addr
);
658 env
->cp15
.c6_data
= addr
;
662 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
663 segv
= put_user_u32(val
, addr
+ 4);
665 env
->cp15
.c6_data
= addr
+ 4;
672 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
678 void cpu_loop(CPUARMState
*env
)
681 unsigned int n
, insn
;
682 target_siginfo_t info
;
687 trapnr
= cpu_arm_exec(env
);
692 TaskState
*ts
= env
->opaque
;
696 /* we handle the FPU emulation here, as Linux */
697 /* we get the opcode */
698 /* FIXME - what to do if get_user() fails? */
699 get_user_u32(opcode
, env
->regs
[15]);
701 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
702 if (rc
== 0) { /* illegal instruction */
703 info
.si_signo
= SIGILL
;
705 info
.si_code
= TARGET_ILL_ILLOPN
;
706 info
._sifields
._sigfault
._addr
= env
->regs
[15];
707 queue_signal(env
, info
.si_signo
, &info
);
708 } else if (rc
< 0) { /* FP exception */
711 /* translate softfloat flags to FPSR flags */
712 if (-rc
& float_flag_invalid
)
714 if (-rc
& float_flag_divbyzero
)
716 if (-rc
& float_flag_overflow
)
718 if (-rc
& float_flag_underflow
)
720 if (-rc
& float_flag_inexact
)
723 FPSR fpsr
= ts
->fpa
.fpsr
;
724 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
726 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
727 info
.si_signo
= SIGFPE
;
730 /* ordered by priority, least first */
731 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
732 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
733 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
734 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
735 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
737 info
._sifields
._sigfault
._addr
= env
->regs
[15];
738 queue_signal(env
, info
.si_signo
, &info
);
743 /* accumulate unenabled exceptions */
744 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
746 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
748 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
750 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
752 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
755 } else { /* everything OK */
766 if (trapnr
== EXCP_BKPT
) {
768 /* FIXME - what to do if get_user() fails? */
769 get_user_u16(insn
, env
->regs
[15]);
773 /* FIXME - what to do if get_user() fails? */
774 get_user_u32(insn
, env
->regs
[15]);
775 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
780 /* FIXME - what to do if get_user() fails? */
781 get_user_u16(insn
, env
->regs
[15] - 2);
784 /* FIXME - what to do if get_user() fails? */
785 get_user_u32(insn
, env
->regs
[15] - 4);
790 if (n
== ARM_NR_cacheflush
) {
792 } else if (n
== ARM_NR_semihosting
793 || n
== ARM_NR_thumb_semihosting
) {
794 env
->regs
[0] = do_arm_semihosting (env
);
795 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
796 || (env
->thumb
&& n
== ARM_THUMB_SYSCALL
)) {
798 if (env
->thumb
|| n
== 0) {
801 n
-= ARM_SYSCALL_BASE
;
804 if ( n
> ARM_NR_BASE
) {
806 case ARM_NR_cacheflush
:
810 cpu_set_tls(env
, env
->regs
[0]);
814 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
816 env
->regs
[0] = -TARGET_ENOSYS
;
820 env
->regs
[0] = do_syscall(env
,
836 /* just indicate that signals should be handled asap */
838 case EXCP_PREFETCH_ABORT
:
839 addr
= env
->cp15
.c6_insn
;
841 case EXCP_DATA_ABORT
:
842 addr
= env
->cp15
.c6_data
;
845 info
.si_signo
= SIGSEGV
;
847 /* XXX: check env->error_code */
848 info
.si_code
= TARGET_SEGV_MAPERR
;
849 info
._sifields
._sigfault
._addr
= addr
;
850 queue_signal(env
, info
.si_signo
, &info
);
857 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
862 info
.si_code
= TARGET_TRAP_BRKPT
;
863 queue_signal(env
, info
.si_signo
, &info
);
867 case EXCP_KERNEL_TRAP
:
868 if (do_kernel_trap(env
))
873 addr
= env
->cp15
.c6_data
;
879 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
881 cpu_dump_state(env
, stderr
, fprintf
, 0);
884 process_pending_signals(env
);
890 #ifdef TARGET_UNICORE32
892 void cpu_loop(CPUUniCore32State
*env
)
895 unsigned int n
, insn
;
896 target_siginfo_t info
;
900 trapnr
= uc32_cpu_exec(env
);
906 get_user_u32(insn
, env
->regs
[31] - 4);
909 if (n
>= UC32_SYSCALL_BASE
) {
911 n
-= UC32_SYSCALL_BASE
;
912 if (n
== UC32_SYSCALL_NR_set_tls
) {
913 cpu_set_tls(env
, env
->regs
[0]);
916 env
->regs
[0] = do_syscall(env
,
932 info
.si_signo
= SIGSEGV
;
934 /* XXX: check env->error_code */
935 info
.si_code
= TARGET_SEGV_MAPERR
;
936 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
937 queue_signal(env
, info
.si_signo
, &info
);
940 /* just indicate that signals should be handled asap */
946 sig
= gdb_handlesig(env
, TARGET_SIGTRAP
);
950 info
.si_code
= TARGET_TRAP_BRKPT
;
951 queue_signal(env
, info
.si_signo
, &info
);
958 process_pending_signals(env
);
962 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
963 cpu_dump_state(env
, stderr
, fprintf
, 0);
969 #define SPARC64_STACK_BIAS 2047
973 /* WARNING: dealing with register windows _is_ complicated. More info
974 can be found at http://www.sics.se/~psm/sparcstack.html */
975 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
977 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
978 /* wrap handling : if cwp is on the last window, then we use the
979 registers 'after' the end */
980 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
981 index
+= 16 * env
->nwindows
;
985 /* save the register window 'cwp1' */
986 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
991 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
992 #ifdef TARGET_SPARC64
994 sp_ptr
+= SPARC64_STACK_BIAS
;
996 #if defined(DEBUG_WIN)
997 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1000 for(i
= 0; i
< 16; i
++) {
1001 /* FIXME - what to do if put_user() fails? */
1002 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1003 sp_ptr
+= sizeof(abi_ulong
);
1007 static void save_window(CPUSPARCState
*env
)
1009 #ifndef TARGET_SPARC64
1010 unsigned int new_wim
;
1011 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1012 ((1LL << env
->nwindows
) - 1);
1013 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1016 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1022 static void restore_window(CPUSPARCState
*env
)
1024 #ifndef TARGET_SPARC64
1025 unsigned int new_wim
;
1027 unsigned int i
, cwp1
;
1030 #ifndef TARGET_SPARC64
1031 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1032 ((1LL << env
->nwindows
) - 1);
1035 /* restore the invalid window */
1036 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1037 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1038 #ifdef TARGET_SPARC64
1040 sp_ptr
+= SPARC64_STACK_BIAS
;
1042 #if defined(DEBUG_WIN)
1043 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1046 for(i
= 0; i
< 16; i
++) {
1047 /* FIXME - what to do if get_user() fails? */
1048 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1049 sp_ptr
+= sizeof(abi_ulong
);
1051 #ifdef TARGET_SPARC64
1053 if (env
->cleanwin
< env
->nwindows
- 1)
1061 static void flush_windows(CPUSPARCState
*env
)
1067 /* if restore would invoke restore_window(), then we can stop */
1068 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1069 #ifndef TARGET_SPARC64
1070 if (env
->wim
& (1 << cwp1
))
1073 if (env
->canrestore
== 0)
1078 save_window_offset(env
, cwp1
);
1081 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1082 #ifndef TARGET_SPARC64
1083 /* set wim so that restore will reload the registers */
1084 env
->wim
= 1 << cwp1
;
1086 #if defined(DEBUG_WIN)
1087 printf("flush_windows: nb=%d\n", offset
- 1);
1091 void cpu_loop (CPUSPARCState
*env
)
1095 target_siginfo_t info
;
1098 trapnr
= cpu_sparc_exec (env
);
1101 #ifndef TARGET_SPARC64
1108 ret
= do_syscall (env
, env
->gregs
[1],
1109 env
->regwptr
[0], env
->regwptr
[1],
1110 env
->regwptr
[2], env
->regwptr
[3],
1111 env
->regwptr
[4], env
->regwptr
[5],
1113 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1114 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1115 env
->xcc
|= PSR_CARRY
;
1117 env
->psr
|= PSR_CARRY
;
1121 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1122 env
->xcc
&= ~PSR_CARRY
;
1124 env
->psr
&= ~PSR_CARRY
;
1127 env
->regwptr
[0] = ret
;
1128 /* next instruction */
1130 env
->npc
= env
->npc
+ 4;
1132 case 0x83: /* flush windows */
1137 /* next instruction */
1139 env
->npc
= env
->npc
+ 4;
1141 #ifndef TARGET_SPARC64
1142 case TT_WIN_OVF
: /* window overflow */
1145 case TT_WIN_UNF
: /* window underflow */
1146 restore_window(env
);
1151 info
.si_signo
= TARGET_SIGSEGV
;
1153 /* XXX: check env->error_code */
1154 info
.si_code
= TARGET_SEGV_MAPERR
;
1155 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1156 queue_signal(env
, info
.si_signo
, &info
);
1160 case TT_SPILL
: /* window overflow */
1163 case TT_FILL
: /* window underflow */
1164 restore_window(env
);
1169 info
.si_signo
= TARGET_SIGSEGV
;
1171 /* XXX: check env->error_code */
1172 info
.si_code
= TARGET_SEGV_MAPERR
;
1173 if (trapnr
== TT_DFAULT
)
1174 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1176 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1177 queue_signal(env
, info
.si_signo
, &info
);
1180 #ifndef TARGET_ABI32
1183 sparc64_get_context(env
);
1187 sparc64_set_context(env
);
1191 case EXCP_INTERRUPT
:
1192 /* just indicate that signals should be handled asap */
1196 info
.si_signo
= TARGET_SIGILL
;
1198 info
.si_code
= TARGET_ILL_ILLOPC
;
1199 info
._sifields
._sigfault
._addr
= env
->pc
;
1200 queue_signal(env
, info
.si_signo
, &info
);
1207 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
1210 info
.si_signo
= sig
;
1212 info
.si_code
= TARGET_TRAP_BRKPT
;
1213 queue_signal(env
, info
.si_signo
, &info
);
1218 printf ("Unhandled trap: 0x%x\n", trapnr
);
1219 cpu_dump_state(env
, stderr
, fprintf
, 0);
1222 process_pending_signals (env
);
1229 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1235 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1237 return cpu_ppc_get_tb(env
);
1240 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1242 return cpu_ppc_get_tb(env
) >> 32;
1245 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1247 return cpu_ppc_get_tb(env
);
1250 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1252 return cpu_ppc_get_tb(env
) >> 32;
1255 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1256 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1258 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1260 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1263 /* XXX: to be fixed */
1264 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1269 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1274 #define EXCP_DUMP(env, fmt, ...) \
1276 fprintf(stderr, fmt , ## __VA_ARGS__); \
1277 cpu_dump_state(env, stderr, fprintf, 0); \
1278 qemu_log(fmt, ## __VA_ARGS__); \
1280 log_cpu_state(env, 0); \
1283 static int do_store_exclusive(CPUPPCState
*env
)
1286 target_ulong page_addr
;
1291 addr
= env
->reserve_ea
;
1292 page_addr
= addr
& TARGET_PAGE_MASK
;
1295 flags
= page_get_flags(page_addr
);
1296 if ((flags
& PAGE_READ
) == 0) {
1299 int reg
= env
->reserve_info
& 0x1f;
1300 int size
= (env
->reserve_info
>> 5) & 0xf;
1303 if (addr
== env
->reserve_addr
) {
1305 case 1: segv
= get_user_u8(val
, addr
); break;
1306 case 2: segv
= get_user_u16(val
, addr
); break;
1307 case 4: segv
= get_user_u32(val
, addr
); break;
1308 #if defined(TARGET_PPC64)
1309 case 8: segv
= get_user_u64(val
, addr
); break;
1313 if (!segv
&& val
== env
->reserve_val
) {
1314 val
= env
->gpr
[reg
];
1316 case 1: segv
= put_user_u8(val
, addr
); break;
1317 case 2: segv
= put_user_u16(val
, addr
); break;
1318 case 4: segv
= put_user_u32(val
, addr
); break;
1319 #if defined(TARGET_PPC64)
1320 case 8: segv
= put_user_u64(val
, addr
); break;
1329 env
->crf
[0] = (stored
<< 1) | xer_so
;
1330 env
->reserve_addr
= (target_ulong
)-1;
1340 void cpu_loop(CPUPPCState
*env
)
1342 target_siginfo_t info
;
1347 cpu_exec_start(env
);
1348 trapnr
= cpu_ppc_exec(env
);
1351 case POWERPC_EXCP_NONE
:
1354 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1355 cpu_abort(env
, "Critical interrupt while in user mode. "
1358 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1359 cpu_abort(env
, "Machine check exception while in user mode. "
1362 case POWERPC_EXCP_DSI
: /* Data storage exception */
1363 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1365 /* XXX: check this. Seems bugged */
1366 switch (env
->error_code
& 0xFF000000) {
1368 info
.si_signo
= TARGET_SIGSEGV
;
1370 info
.si_code
= TARGET_SEGV_MAPERR
;
1373 info
.si_signo
= TARGET_SIGILL
;
1375 info
.si_code
= TARGET_ILL_ILLADR
;
1378 info
.si_signo
= TARGET_SIGSEGV
;
1380 info
.si_code
= TARGET_SEGV_ACCERR
;
1383 /* Let's send a regular segfault... */
1384 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1386 info
.si_signo
= TARGET_SIGSEGV
;
1388 info
.si_code
= TARGET_SEGV_MAPERR
;
1391 info
._sifields
._sigfault
._addr
= env
->nip
;
1392 queue_signal(env
, info
.si_signo
, &info
);
1394 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1395 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1396 "\n", env
->spr
[SPR_SRR0
]);
1397 /* XXX: check this */
1398 switch (env
->error_code
& 0xFF000000) {
1400 info
.si_signo
= TARGET_SIGSEGV
;
1402 info
.si_code
= TARGET_SEGV_MAPERR
;
1406 info
.si_signo
= TARGET_SIGSEGV
;
1408 info
.si_code
= TARGET_SEGV_ACCERR
;
1411 /* Let's send a regular segfault... */
1412 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1414 info
.si_signo
= TARGET_SIGSEGV
;
1416 info
.si_code
= TARGET_SEGV_MAPERR
;
1419 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1420 queue_signal(env
, info
.si_signo
, &info
);
1422 case POWERPC_EXCP_EXTERNAL
: /* External input */
1423 cpu_abort(env
, "External interrupt while in user mode. "
1426 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1427 EXCP_DUMP(env
, "Unaligned memory access\n");
1428 /* XXX: check this */
1429 info
.si_signo
= TARGET_SIGBUS
;
1431 info
.si_code
= TARGET_BUS_ADRALN
;
1432 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1433 queue_signal(env
, info
.si_signo
, &info
);
1435 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1436 /* XXX: check this */
1437 switch (env
->error_code
& ~0xF) {
1438 case POWERPC_EXCP_FP
:
1439 EXCP_DUMP(env
, "Floating point program exception\n");
1440 info
.si_signo
= TARGET_SIGFPE
;
1442 switch (env
->error_code
& 0xF) {
1443 case POWERPC_EXCP_FP_OX
:
1444 info
.si_code
= TARGET_FPE_FLTOVF
;
1446 case POWERPC_EXCP_FP_UX
:
1447 info
.si_code
= TARGET_FPE_FLTUND
;
1449 case POWERPC_EXCP_FP_ZX
:
1450 case POWERPC_EXCP_FP_VXZDZ
:
1451 info
.si_code
= TARGET_FPE_FLTDIV
;
1453 case POWERPC_EXCP_FP_XX
:
1454 info
.si_code
= TARGET_FPE_FLTRES
;
1456 case POWERPC_EXCP_FP_VXSOFT
:
1457 info
.si_code
= TARGET_FPE_FLTINV
;
1459 case POWERPC_EXCP_FP_VXSNAN
:
1460 case POWERPC_EXCP_FP_VXISI
:
1461 case POWERPC_EXCP_FP_VXIDI
:
1462 case POWERPC_EXCP_FP_VXIMZ
:
1463 case POWERPC_EXCP_FP_VXVC
:
1464 case POWERPC_EXCP_FP_VXSQRT
:
1465 case POWERPC_EXCP_FP_VXCVI
:
1466 info
.si_code
= TARGET_FPE_FLTSUB
;
1469 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1474 case POWERPC_EXCP_INVAL
:
1475 EXCP_DUMP(env
, "Invalid instruction\n");
1476 info
.si_signo
= TARGET_SIGILL
;
1478 switch (env
->error_code
& 0xF) {
1479 case POWERPC_EXCP_INVAL_INVAL
:
1480 info
.si_code
= TARGET_ILL_ILLOPC
;
1482 case POWERPC_EXCP_INVAL_LSWX
:
1483 info
.si_code
= TARGET_ILL_ILLOPN
;
1485 case POWERPC_EXCP_INVAL_SPR
:
1486 info
.si_code
= TARGET_ILL_PRVREG
;
1488 case POWERPC_EXCP_INVAL_FP
:
1489 info
.si_code
= TARGET_ILL_COPROC
;
1492 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1493 env
->error_code
& 0xF);
1494 info
.si_code
= TARGET_ILL_ILLADR
;
1498 case POWERPC_EXCP_PRIV
:
1499 EXCP_DUMP(env
, "Privilege violation\n");
1500 info
.si_signo
= TARGET_SIGILL
;
1502 switch (env
->error_code
& 0xF) {
1503 case POWERPC_EXCP_PRIV_OPC
:
1504 info
.si_code
= TARGET_ILL_PRVOPC
;
1506 case POWERPC_EXCP_PRIV_REG
:
1507 info
.si_code
= TARGET_ILL_PRVREG
;
1510 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1511 env
->error_code
& 0xF);
1512 info
.si_code
= TARGET_ILL_PRVOPC
;
1516 case POWERPC_EXCP_TRAP
:
1517 cpu_abort(env
, "Tried to call a TRAP\n");
1520 /* Should not happen ! */
1521 cpu_abort(env
, "Unknown program exception (%02x)\n",
1525 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1526 queue_signal(env
, info
.si_signo
, &info
);
1528 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1529 EXCP_DUMP(env
, "No floating point allowed\n");
1530 info
.si_signo
= TARGET_SIGILL
;
1532 info
.si_code
= TARGET_ILL_COPROC
;
1533 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1534 queue_signal(env
, info
.si_signo
, &info
);
1536 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1537 cpu_abort(env
, "Syscall exception while in user mode. "
1540 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1541 EXCP_DUMP(env
, "No APU instruction allowed\n");
1542 info
.si_signo
= TARGET_SIGILL
;
1544 info
.si_code
= TARGET_ILL_COPROC
;
1545 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1546 queue_signal(env
, info
.si_signo
, &info
);
1548 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1549 cpu_abort(env
, "Decrementer interrupt while in user mode. "
1552 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1553 cpu_abort(env
, "Fix interval timer interrupt while in user mode. "
1556 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1557 cpu_abort(env
, "Watchdog timer interrupt while in user mode. "
1560 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1561 cpu_abort(env
, "Data TLB exception while in user mode. "
1564 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1565 cpu_abort(env
, "Instruction TLB exception while in user mode. "
1568 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1569 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1570 info
.si_signo
= TARGET_SIGILL
;
1572 info
.si_code
= TARGET_ILL_COPROC
;
1573 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1574 queue_signal(env
, info
.si_signo
, &info
);
1576 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1577 cpu_abort(env
, "Embedded floating-point data IRQ not handled\n");
1579 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1580 cpu_abort(env
, "Embedded floating-point round IRQ not handled\n");
1582 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1583 cpu_abort(env
, "Performance monitor exception not handled\n");
1585 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1586 cpu_abort(env
, "Doorbell interrupt while in user mode. "
1589 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1590 cpu_abort(env
, "Doorbell critical interrupt while in user mode. "
1593 case POWERPC_EXCP_RESET
: /* System reset exception */
1594 cpu_abort(env
, "Reset interrupt while in user mode. "
1597 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1598 cpu_abort(env
, "Data segment exception while in user mode. "
1601 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1602 cpu_abort(env
, "Instruction segment exception "
1603 "while in user mode. Aborting\n");
1605 /* PowerPC 64 with hypervisor mode support */
1606 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1607 cpu_abort(env
, "Hypervisor decrementer interrupt "
1608 "while in user mode. Aborting\n");
1610 case POWERPC_EXCP_TRACE
: /* Trace exception */
1612 * we use this exception to emulate step-by-step execution mode.
1615 /* PowerPC 64 with hypervisor mode support */
1616 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1617 cpu_abort(env
, "Hypervisor data storage exception "
1618 "while in user mode. Aborting\n");
1620 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1621 cpu_abort(env
, "Hypervisor instruction storage exception "
1622 "while in user mode. Aborting\n");
1624 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1625 cpu_abort(env
, "Hypervisor data segment exception "
1626 "while in user mode. Aborting\n");
1628 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1629 cpu_abort(env
, "Hypervisor instruction segment exception "
1630 "while in user mode. Aborting\n");
1632 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1633 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1634 info
.si_signo
= TARGET_SIGILL
;
1636 info
.si_code
= TARGET_ILL_COPROC
;
1637 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1638 queue_signal(env
, info
.si_signo
, &info
);
1640 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1641 cpu_abort(env
, "Programmable interval timer interrupt "
1642 "while in user mode. Aborting\n");
1644 case POWERPC_EXCP_IO
: /* IO error exception */
1645 cpu_abort(env
, "IO error exception while in user mode. "
1648 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1649 cpu_abort(env
, "Run mode exception while in user mode. "
1652 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1653 cpu_abort(env
, "Emulation trap exception not handled\n");
1655 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1656 cpu_abort(env
, "Instruction fetch TLB exception "
1657 "while in user-mode. Aborting");
1659 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1660 cpu_abort(env
, "Data load TLB exception while in user-mode. "
1663 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1664 cpu_abort(env
, "Data store TLB exception while in user-mode. "
1667 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1668 cpu_abort(env
, "Floating-point assist exception not handled\n");
1670 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1671 cpu_abort(env
, "Instruction address breakpoint exception "
1674 case POWERPC_EXCP_SMI
: /* System management interrupt */
1675 cpu_abort(env
, "System management interrupt while in user mode. "
1678 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1679 cpu_abort(env
, "Thermal interrupt interrupt while in user mode. "
1682 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1683 cpu_abort(env
, "Performance monitor exception not handled\n");
1685 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1686 cpu_abort(env
, "Vector assist exception not handled\n");
1688 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1689 cpu_abort(env
, "Soft patch exception not handled\n");
1691 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1692 cpu_abort(env
, "Maintenance exception while in user mode. "
1695 case POWERPC_EXCP_STOP
: /* stop translation */
1696 /* We did invalidate the instruction cache. Go on */
1698 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1699 /* We just stopped because of a branch. Go on */
1701 case POWERPC_EXCP_SYSCALL_USER
:
1702 /* system call in user-mode emulation */
1704 * PPC ABI uses overflow flag in cr0 to signal an error
1707 env
->crf
[0] &= ~0x1;
1708 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1709 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1711 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1712 /* Returning from a successful sigreturn syscall.
1713 Avoid corrupting register state. */
1716 if (ret
> (target_ulong
)(-515)) {
1722 case POWERPC_EXCP_STCX
:
1723 if (do_store_exclusive(env
)) {
1724 info
.si_signo
= TARGET_SIGSEGV
;
1726 info
.si_code
= TARGET_SEGV_MAPERR
;
1727 info
._sifields
._sigfault
._addr
= env
->nip
;
1728 queue_signal(env
, info
.si_signo
, &info
);
1735 sig
= gdb_handlesig(env
, TARGET_SIGTRAP
);
1737 info
.si_signo
= sig
;
1739 info
.si_code
= TARGET_TRAP_BRKPT
;
1740 queue_signal(env
, info
.si_signo
, &info
);
1744 case EXCP_INTERRUPT
:
1745 /* just indicate that signals should be handled asap */
1748 cpu_abort(env
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1751 process_pending_signals(env
);
1758 #define MIPS_SYS(name, args) args,
1760 static const uint8_t mips_syscall_args
[] = {
1761 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1762 MIPS_SYS(sys_exit
, 1)
1763 MIPS_SYS(sys_fork
, 0)
1764 MIPS_SYS(sys_read
, 3)
1765 MIPS_SYS(sys_write
, 3)
1766 MIPS_SYS(sys_open
, 3) /* 4005 */
1767 MIPS_SYS(sys_close
, 1)
1768 MIPS_SYS(sys_waitpid
, 3)
1769 MIPS_SYS(sys_creat
, 2)
1770 MIPS_SYS(sys_link
, 2)
1771 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1772 MIPS_SYS(sys_execve
, 0)
1773 MIPS_SYS(sys_chdir
, 1)
1774 MIPS_SYS(sys_time
, 1)
1775 MIPS_SYS(sys_mknod
, 3)
1776 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1777 MIPS_SYS(sys_lchown
, 3)
1778 MIPS_SYS(sys_ni_syscall
, 0)
1779 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
1780 MIPS_SYS(sys_lseek
, 3)
1781 MIPS_SYS(sys_getpid
, 0) /* 4020 */
1782 MIPS_SYS(sys_mount
, 5)
1783 MIPS_SYS(sys_oldumount
, 1)
1784 MIPS_SYS(sys_setuid
, 1)
1785 MIPS_SYS(sys_getuid
, 0)
1786 MIPS_SYS(sys_stime
, 1) /* 4025 */
1787 MIPS_SYS(sys_ptrace
, 4)
1788 MIPS_SYS(sys_alarm
, 1)
1789 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
1790 MIPS_SYS(sys_pause
, 0)
1791 MIPS_SYS(sys_utime
, 2) /* 4030 */
1792 MIPS_SYS(sys_ni_syscall
, 0)
1793 MIPS_SYS(sys_ni_syscall
, 0)
1794 MIPS_SYS(sys_access
, 2)
1795 MIPS_SYS(sys_nice
, 1)
1796 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
1797 MIPS_SYS(sys_sync
, 0)
1798 MIPS_SYS(sys_kill
, 2)
1799 MIPS_SYS(sys_rename
, 2)
1800 MIPS_SYS(sys_mkdir
, 2)
1801 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
1802 MIPS_SYS(sys_dup
, 1)
1803 MIPS_SYS(sys_pipe
, 0)
1804 MIPS_SYS(sys_times
, 1)
1805 MIPS_SYS(sys_ni_syscall
, 0)
1806 MIPS_SYS(sys_brk
, 1) /* 4045 */
1807 MIPS_SYS(sys_setgid
, 1)
1808 MIPS_SYS(sys_getgid
, 0)
1809 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
1810 MIPS_SYS(sys_geteuid
, 0)
1811 MIPS_SYS(sys_getegid
, 0) /* 4050 */
1812 MIPS_SYS(sys_acct
, 0)
1813 MIPS_SYS(sys_umount
, 2)
1814 MIPS_SYS(sys_ni_syscall
, 0)
1815 MIPS_SYS(sys_ioctl
, 3)
1816 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
1817 MIPS_SYS(sys_ni_syscall
, 2)
1818 MIPS_SYS(sys_setpgid
, 2)
1819 MIPS_SYS(sys_ni_syscall
, 0)
1820 MIPS_SYS(sys_olduname
, 1)
1821 MIPS_SYS(sys_umask
, 1) /* 4060 */
1822 MIPS_SYS(sys_chroot
, 1)
1823 MIPS_SYS(sys_ustat
, 2)
1824 MIPS_SYS(sys_dup2
, 2)
1825 MIPS_SYS(sys_getppid
, 0)
1826 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
1827 MIPS_SYS(sys_setsid
, 0)
1828 MIPS_SYS(sys_sigaction
, 3)
1829 MIPS_SYS(sys_sgetmask
, 0)
1830 MIPS_SYS(sys_ssetmask
, 1)
1831 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
1832 MIPS_SYS(sys_setregid
, 2)
1833 MIPS_SYS(sys_sigsuspend
, 0)
1834 MIPS_SYS(sys_sigpending
, 1)
1835 MIPS_SYS(sys_sethostname
, 2)
1836 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
1837 MIPS_SYS(sys_getrlimit
, 2)
1838 MIPS_SYS(sys_getrusage
, 2)
1839 MIPS_SYS(sys_gettimeofday
, 2)
1840 MIPS_SYS(sys_settimeofday
, 2)
1841 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
1842 MIPS_SYS(sys_setgroups
, 2)
1843 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
1844 MIPS_SYS(sys_symlink
, 2)
1845 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
1846 MIPS_SYS(sys_readlink
, 3) /* 4085 */
1847 MIPS_SYS(sys_uselib
, 1)
1848 MIPS_SYS(sys_swapon
, 2)
1849 MIPS_SYS(sys_reboot
, 3)
1850 MIPS_SYS(old_readdir
, 3)
1851 MIPS_SYS(old_mmap
, 6) /* 4090 */
1852 MIPS_SYS(sys_munmap
, 2)
1853 MIPS_SYS(sys_truncate
, 2)
1854 MIPS_SYS(sys_ftruncate
, 2)
1855 MIPS_SYS(sys_fchmod
, 2)
1856 MIPS_SYS(sys_fchown
, 3) /* 4095 */
1857 MIPS_SYS(sys_getpriority
, 2)
1858 MIPS_SYS(sys_setpriority
, 3)
1859 MIPS_SYS(sys_ni_syscall
, 0)
1860 MIPS_SYS(sys_statfs
, 2)
1861 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
1862 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
1863 MIPS_SYS(sys_socketcall
, 2)
1864 MIPS_SYS(sys_syslog
, 3)
1865 MIPS_SYS(sys_setitimer
, 3)
1866 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
1867 MIPS_SYS(sys_newstat
, 2)
1868 MIPS_SYS(sys_newlstat
, 2)
1869 MIPS_SYS(sys_newfstat
, 2)
1870 MIPS_SYS(sys_uname
, 1)
1871 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
1872 MIPS_SYS(sys_vhangup
, 0)
1873 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
1874 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
1875 MIPS_SYS(sys_wait4
, 4)
1876 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
1877 MIPS_SYS(sys_sysinfo
, 1)
1878 MIPS_SYS(sys_ipc
, 6)
1879 MIPS_SYS(sys_fsync
, 1)
1880 MIPS_SYS(sys_sigreturn
, 0)
1881 MIPS_SYS(sys_clone
, 6) /* 4120 */
1882 MIPS_SYS(sys_setdomainname
, 2)
1883 MIPS_SYS(sys_newuname
, 1)
1884 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
1885 MIPS_SYS(sys_adjtimex
, 1)
1886 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
1887 MIPS_SYS(sys_sigprocmask
, 3)
1888 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
1889 MIPS_SYS(sys_init_module
, 5)
1890 MIPS_SYS(sys_delete_module
, 1)
1891 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
1892 MIPS_SYS(sys_quotactl
, 0)
1893 MIPS_SYS(sys_getpgid
, 1)
1894 MIPS_SYS(sys_fchdir
, 1)
1895 MIPS_SYS(sys_bdflush
, 2)
1896 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
1897 MIPS_SYS(sys_personality
, 1)
1898 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
1899 MIPS_SYS(sys_setfsuid
, 1)
1900 MIPS_SYS(sys_setfsgid
, 1)
1901 MIPS_SYS(sys_llseek
, 5) /* 4140 */
1902 MIPS_SYS(sys_getdents
, 3)
1903 MIPS_SYS(sys_select
, 5)
1904 MIPS_SYS(sys_flock
, 2)
1905 MIPS_SYS(sys_msync
, 3)
1906 MIPS_SYS(sys_readv
, 3) /* 4145 */
1907 MIPS_SYS(sys_writev
, 3)
1908 MIPS_SYS(sys_cacheflush
, 3)
1909 MIPS_SYS(sys_cachectl
, 3)
1910 MIPS_SYS(sys_sysmips
, 4)
1911 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
1912 MIPS_SYS(sys_getsid
, 1)
1913 MIPS_SYS(sys_fdatasync
, 0)
1914 MIPS_SYS(sys_sysctl
, 1)
1915 MIPS_SYS(sys_mlock
, 2)
1916 MIPS_SYS(sys_munlock
, 2) /* 4155 */
1917 MIPS_SYS(sys_mlockall
, 1)
1918 MIPS_SYS(sys_munlockall
, 0)
1919 MIPS_SYS(sys_sched_setparam
, 2)
1920 MIPS_SYS(sys_sched_getparam
, 2)
1921 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
1922 MIPS_SYS(sys_sched_getscheduler
, 1)
1923 MIPS_SYS(sys_sched_yield
, 0)
1924 MIPS_SYS(sys_sched_get_priority_max
, 1)
1925 MIPS_SYS(sys_sched_get_priority_min
, 1)
1926 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
1927 MIPS_SYS(sys_nanosleep
, 2)
1928 MIPS_SYS(sys_mremap
, 4)
1929 MIPS_SYS(sys_accept
, 3)
1930 MIPS_SYS(sys_bind
, 3)
1931 MIPS_SYS(sys_connect
, 3) /* 4170 */
1932 MIPS_SYS(sys_getpeername
, 3)
1933 MIPS_SYS(sys_getsockname
, 3)
1934 MIPS_SYS(sys_getsockopt
, 5)
1935 MIPS_SYS(sys_listen
, 2)
1936 MIPS_SYS(sys_recv
, 4) /* 4175 */
1937 MIPS_SYS(sys_recvfrom
, 6)
1938 MIPS_SYS(sys_recvmsg
, 3)
1939 MIPS_SYS(sys_send
, 4)
1940 MIPS_SYS(sys_sendmsg
, 3)
1941 MIPS_SYS(sys_sendto
, 6) /* 4180 */
1942 MIPS_SYS(sys_setsockopt
, 5)
1943 MIPS_SYS(sys_shutdown
, 2)
1944 MIPS_SYS(sys_socket
, 3)
1945 MIPS_SYS(sys_socketpair
, 4)
1946 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
1947 MIPS_SYS(sys_getresuid
, 3)
1948 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
1949 MIPS_SYS(sys_poll
, 3)
1950 MIPS_SYS(sys_nfsservctl
, 3)
1951 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
1952 MIPS_SYS(sys_getresgid
, 3)
1953 MIPS_SYS(sys_prctl
, 5)
1954 MIPS_SYS(sys_rt_sigreturn
, 0)
1955 MIPS_SYS(sys_rt_sigaction
, 4)
1956 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
1957 MIPS_SYS(sys_rt_sigpending
, 2)
1958 MIPS_SYS(sys_rt_sigtimedwait
, 4)
1959 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
1960 MIPS_SYS(sys_rt_sigsuspend
, 0)
1961 MIPS_SYS(sys_pread64
, 6) /* 4200 */
1962 MIPS_SYS(sys_pwrite64
, 6)
1963 MIPS_SYS(sys_chown
, 3)
1964 MIPS_SYS(sys_getcwd
, 2)
1965 MIPS_SYS(sys_capget
, 2)
1966 MIPS_SYS(sys_capset
, 2) /* 4205 */
1967 MIPS_SYS(sys_sigaltstack
, 2)
1968 MIPS_SYS(sys_sendfile
, 4)
1969 MIPS_SYS(sys_ni_syscall
, 0)
1970 MIPS_SYS(sys_ni_syscall
, 0)
1971 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
1972 MIPS_SYS(sys_truncate64
, 4)
1973 MIPS_SYS(sys_ftruncate64
, 4)
1974 MIPS_SYS(sys_stat64
, 2)
1975 MIPS_SYS(sys_lstat64
, 2)
1976 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
1977 MIPS_SYS(sys_pivot_root
, 2)
1978 MIPS_SYS(sys_mincore
, 3)
1979 MIPS_SYS(sys_madvise
, 3)
1980 MIPS_SYS(sys_getdents64
, 3)
1981 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
1982 MIPS_SYS(sys_ni_syscall
, 0)
1983 MIPS_SYS(sys_gettid
, 0)
1984 MIPS_SYS(sys_readahead
, 5)
1985 MIPS_SYS(sys_setxattr
, 5)
1986 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
1987 MIPS_SYS(sys_fsetxattr
, 5)
1988 MIPS_SYS(sys_getxattr
, 4)
1989 MIPS_SYS(sys_lgetxattr
, 4)
1990 MIPS_SYS(sys_fgetxattr
, 4)
1991 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
1992 MIPS_SYS(sys_llistxattr
, 3)
1993 MIPS_SYS(sys_flistxattr
, 3)
1994 MIPS_SYS(sys_removexattr
, 2)
1995 MIPS_SYS(sys_lremovexattr
, 2)
1996 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
1997 MIPS_SYS(sys_tkill
, 2)
1998 MIPS_SYS(sys_sendfile64
, 5)
1999 MIPS_SYS(sys_futex
, 2)
2000 MIPS_SYS(sys_sched_setaffinity
, 3)
2001 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2002 MIPS_SYS(sys_io_setup
, 2)
2003 MIPS_SYS(sys_io_destroy
, 1)
2004 MIPS_SYS(sys_io_getevents
, 5)
2005 MIPS_SYS(sys_io_submit
, 3)
2006 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2007 MIPS_SYS(sys_exit_group
, 1)
2008 MIPS_SYS(sys_lookup_dcookie
, 3)
2009 MIPS_SYS(sys_epoll_create
, 1)
2010 MIPS_SYS(sys_epoll_ctl
, 4)
2011 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2012 MIPS_SYS(sys_remap_file_pages
, 5)
2013 MIPS_SYS(sys_set_tid_address
, 1)
2014 MIPS_SYS(sys_restart_syscall
, 0)
2015 MIPS_SYS(sys_fadvise64_64
, 7)
2016 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2017 MIPS_SYS(sys_fstatfs64
, 2)
2018 MIPS_SYS(sys_timer_create
, 3)
2019 MIPS_SYS(sys_timer_settime
, 4)
2020 MIPS_SYS(sys_timer_gettime
, 2)
2021 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2022 MIPS_SYS(sys_timer_delete
, 1)
2023 MIPS_SYS(sys_clock_settime
, 2)
2024 MIPS_SYS(sys_clock_gettime
, 2)
2025 MIPS_SYS(sys_clock_getres
, 2)
2026 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2027 MIPS_SYS(sys_tgkill
, 3)
2028 MIPS_SYS(sys_utimes
, 2)
2029 MIPS_SYS(sys_mbind
, 4)
2030 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2031 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2032 MIPS_SYS(sys_mq_open
, 4)
2033 MIPS_SYS(sys_mq_unlink
, 1)
2034 MIPS_SYS(sys_mq_timedsend
, 5)
2035 MIPS_SYS(sys_mq_timedreceive
, 5)
2036 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2037 MIPS_SYS(sys_mq_getsetattr
, 3)
2038 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2039 MIPS_SYS(sys_waitid
, 4)
2040 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2041 MIPS_SYS(sys_add_key
, 5)
2042 MIPS_SYS(sys_request_key
, 4)
2043 MIPS_SYS(sys_keyctl
, 5)
2044 MIPS_SYS(sys_set_thread_area
, 1)
2045 MIPS_SYS(sys_inotify_init
, 0)
2046 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2047 MIPS_SYS(sys_inotify_rm_watch
, 2)
2048 MIPS_SYS(sys_migrate_pages
, 4)
2049 MIPS_SYS(sys_openat
, 4)
2050 MIPS_SYS(sys_mkdirat
, 3)
2051 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2052 MIPS_SYS(sys_fchownat
, 5)
2053 MIPS_SYS(sys_futimesat
, 3)
2054 MIPS_SYS(sys_fstatat64
, 4)
2055 MIPS_SYS(sys_unlinkat
, 3)
2056 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2057 MIPS_SYS(sys_linkat
, 5)
2058 MIPS_SYS(sys_symlinkat
, 3)
2059 MIPS_SYS(sys_readlinkat
, 4)
2060 MIPS_SYS(sys_fchmodat
, 3)
2061 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2062 MIPS_SYS(sys_pselect6
, 6)
2063 MIPS_SYS(sys_ppoll
, 5)
2064 MIPS_SYS(sys_unshare
, 1)
2065 MIPS_SYS(sys_splice
, 4)
2066 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2067 MIPS_SYS(sys_tee
, 4)
2068 MIPS_SYS(sys_vmsplice
, 4)
2069 MIPS_SYS(sys_move_pages
, 6)
2070 MIPS_SYS(sys_set_robust_list
, 2)
2071 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2072 MIPS_SYS(sys_kexec_load
, 4)
2073 MIPS_SYS(sys_getcpu
, 3)
2074 MIPS_SYS(sys_epoll_pwait
, 6)
2075 MIPS_SYS(sys_ioprio_set
, 3)
2076 MIPS_SYS(sys_ioprio_get
, 2)
2077 MIPS_SYS(sys_utimensat
, 4)
2078 MIPS_SYS(sys_signalfd
, 3)
2079 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2080 MIPS_SYS(sys_eventfd
, 1)
2081 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2082 MIPS_SYS(sys_timerfd_create
, 2)
2083 MIPS_SYS(sys_timerfd_gettime
, 2)
2084 MIPS_SYS(sys_timerfd_settime
, 4)
2085 MIPS_SYS(sys_signalfd4
, 4)
2086 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2087 MIPS_SYS(sys_epoll_create1
, 1)
2088 MIPS_SYS(sys_dup3
, 3)
2089 MIPS_SYS(sys_pipe2
, 2)
2090 MIPS_SYS(sys_inotify_init1
, 1)
2091 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2092 MIPS_SYS(sys_pwritev
, 6)
2093 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2094 MIPS_SYS(sys_perf_event_open
, 5)
2095 MIPS_SYS(sys_accept4
, 4)
2096 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2097 MIPS_SYS(sys_fanotify_init
, 2)
2098 MIPS_SYS(sys_fanotify_mark
, 6)
2099 MIPS_SYS(sys_prlimit64
, 4)
2100 MIPS_SYS(sys_name_to_handle_at
, 5)
2101 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2102 MIPS_SYS(sys_clock_adjtime
, 2)
2103 MIPS_SYS(sys_syncfs
, 1)
2108 static int do_store_exclusive(CPUMIPSState
*env
)
2111 target_ulong page_addr
;
2119 page_addr
= addr
& TARGET_PAGE_MASK
;
2122 flags
= page_get_flags(page_addr
);
2123 if ((flags
& PAGE_READ
) == 0) {
2126 reg
= env
->llreg
& 0x1f;
2127 d
= (env
->llreg
& 0x20) != 0;
2129 segv
= get_user_s64(val
, addr
);
2131 segv
= get_user_s32(val
, addr
);
2134 if (val
!= env
->llval
) {
2135 env
->active_tc
.gpr
[reg
] = 0;
2138 segv
= put_user_u64(env
->llnewval
, addr
);
2140 segv
= put_user_u32(env
->llnewval
, addr
);
2143 env
->active_tc
.gpr
[reg
] = 1;
2150 env
->active_tc
.PC
+= 4;
2157 void cpu_loop(CPUMIPSState
*env
)
2159 target_siginfo_t info
;
2161 unsigned int syscall_num
;
2164 cpu_exec_start(env
);
2165 trapnr
= cpu_mips_exec(env
);
2169 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2170 env
->active_tc
.PC
+= 4;
2171 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2172 ret
= -TARGET_ENOSYS
;
2176 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2178 nb_args
= mips_syscall_args
[syscall_num
];
2179 sp_reg
= env
->active_tc
.gpr
[29];
2181 /* these arguments are taken from the stack */
2183 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2187 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2191 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2195 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2201 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2202 env
->active_tc
.gpr
[4],
2203 env
->active_tc
.gpr
[5],
2204 env
->active_tc
.gpr
[6],
2205 env
->active_tc
.gpr
[7],
2206 arg5
, arg6
, arg7
, arg8
);
2209 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2210 /* Returning from a successful sigreturn syscall.
2211 Avoid clobbering register state. */
2214 if ((unsigned int)ret
>= (unsigned int)(-1133)) {
2215 env
->active_tc
.gpr
[7] = 1; /* error flag */
2218 env
->active_tc
.gpr
[7] = 0; /* error flag */
2220 env
->active_tc
.gpr
[2] = ret
;
2226 info
.si_signo
= TARGET_SIGSEGV
;
2228 /* XXX: check env->error_code */
2229 info
.si_code
= TARGET_SEGV_MAPERR
;
2230 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2231 queue_signal(env
, info
.si_signo
, &info
);
2235 info
.si_signo
= TARGET_SIGILL
;
2238 queue_signal(env
, info
.si_signo
, &info
);
2240 case EXCP_INTERRUPT
:
2241 /* just indicate that signals should be handled asap */
2247 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2250 info
.si_signo
= sig
;
2252 info
.si_code
= TARGET_TRAP_BRKPT
;
2253 queue_signal(env
, info
.si_signo
, &info
);
2258 if (do_store_exclusive(env
)) {
2259 info
.si_signo
= TARGET_SIGSEGV
;
2261 info
.si_code
= TARGET_SEGV_MAPERR
;
2262 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2263 queue_signal(env
, info
.si_signo
, &info
);
2268 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2270 cpu_dump_state(env
, stderr
, fprintf
, 0);
2273 process_pending_signals(env
);
2279 void cpu_loop(CPUSH4State
*env
)
2282 target_siginfo_t info
;
2285 trapnr
= cpu_sh4_exec (env
);
2290 ret
= do_syscall(env
,
2299 env
->gregs
[0] = ret
;
2301 case EXCP_INTERRUPT
:
2302 /* just indicate that signals should be handled asap */
2308 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2311 info
.si_signo
= sig
;
2313 info
.si_code
= TARGET_TRAP_BRKPT
;
2314 queue_signal(env
, info
.si_signo
, &info
);
2320 info
.si_signo
= SIGSEGV
;
2322 info
.si_code
= TARGET_SEGV_MAPERR
;
2323 info
._sifields
._sigfault
._addr
= env
->tea
;
2324 queue_signal(env
, info
.si_signo
, &info
);
2328 printf ("Unhandled trap: 0x%x\n", trapnr
);
2329 cpu_dump_state(env
, stderr
, fprintf
, 0);
2332 process_pending_signals (env
);
2338 void cpu_loop(CPUCRISState
*env
)
2341 target_siginfo_t info
;
2344 trapnr
= cpu_cris_exec (env
);
2348 info
.si_signo
= SIGSEGV
;
2350 /* XXX: check env->error_code */
2351 info
.si_code
= TARGET_SEGV_MAPERR
;
2352 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2353 queue_signal(env
, info
.si_signo
, &info
);
2356 case EXCP_INTERRUPT
:
2357 /* just indicate that signals should be handled asap */
2360 ret
= do_syscall(env
,
2369 env
->regs
[10] = ret
;
2375 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2378 info
.si_signo
= sig
;
2380 info
.si_code
= TARGET_TRAP_BRKPT
;
2381 queue_signal(env
, info
.si_signo
, &info
);
2386 printf ("Unhandled trap: 0x%x\n", trapnr
);
2387 cpu_dump_state(env
, stderr
, fprintf
, 0);
2390 process_pending_signals (env
);
2395 #ifdef TARGET_MICROBLAZE
2396 void cpu_loop(CPUMBState
*env
)
2399 target_siginfo_t info
;
2402 trapnr
= cpu_mb_exec (env
);
2406 info
.si_signo
= SIGSEGV
;
2408 /* XXX: check env->error_code */
2409 info
.si_code
= TARGET_SEGV_MAPERR
;
2410 info
._sifields
._sigfault
._addr
= 0;
2411 queue_signal(env
, info
.si_signo
, &info
);
2414 case EXCP_INTERRUPT
:
2415 /* just indicate that signals should be handled asap */
2418 /* Return address is 4 bytes after the call. */
2420 ret
= do_syscall(env
,
2430 env
->sregs
[SR_PC
] = env
->regs
[14];
2433 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2434 if (env
->iflags
& D_FLAG
) {
2435 env
->sregs
[SR_ESR
] |= 1 << 12;
2436 env
->sregs
[SR_PC
] -= 4;
2437 /* FIXME: if branch was immed, replay the imm as well. */
2440 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2442 switch (env
->sregs
[SR_ESR
] & 31) {
2443 case ESR_EC_DIVZERO
:
2444 info
.si_signo
= SIGFPE
;
2446 info
.si_code
= TARGET_FPE_FLTDIV
;
2447 info
._sifields
._sigfault
._addr
= 0;
2448 queue_signal(env
, info
.si_signo
, &info
);
2451 info
.si_signo
= SIGFPE
;
2453 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2454 info
.si_code
= TARGET_FPE_FLTINV
;
2456 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2457 info
.si_code
= TARGET_FPE_FLTDIV
;
2459 info
._sifields
._sigfault
._addr
= 0;
2460 queue_signal(env
, info
.si_signo
, &info
);
2463 printf ("Unhandled hw-exception: 0x%x\n",
2464 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2465 cpu_dump_state(env
, stderr
, fprintf
, 0);
2474 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2477 info
.si_signo
= sig
;
2479 info
.si_code
= TARGET_TRAP_BRKPT
;
2480 queue_signal(env
, info
.si_signo
, &info
);
2485 printf ("Unhandled trap: 0x%x\n", trapnr
);
2486 cpu_dump_state(env
, stderr
, fprintf
, 0);
2489 process_pending_signals (env
);
2496 void cpu_loop(CPUM68KState
*env
)
2500 target_siginfo_t info
;
2501 TaskState
*ts
= env
->opaque
;
2504 trapnr
= cpu_m68k_exec(env
);
2508 if (ts
->sim_syscalls
) {
2510 nr
= lduw(env
->pc
+ 2);
2512 do_m68k_simcall(env
, nr
);
2518 case EXCP_HALT_INSN
:
2519 /* Semihosing syscall. */
2521 do_m68k_semihosting(env
, env
->dregs
[0]);
2525 case EXCP_UNSUPPORTED
:
2527 info
.si_signo
= SIGILL
;
2529 info
.si_code
= TARGET_ILL_ILLOPN
;
2530 info
._sifields
._sigfault
._addr
= env
->pc
;
2531 queue_signal(env
, info
.si_signo
, &info
);
2535 ts
->sim_syscalls
= 0;
2538 env
->dregs
[0] = do_syscall(env
,
2549 case EXCP_INTERRUPT
:
2550 /* just indicate that signals should be handled asap */
2554 info
.si_signo
= SIGSEGV
;
2556 /* XXX: check env->error_code */
2557 info
.si_code
= TARGET_SEGV_MAPERR
;
2558 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
2559 queue_signal(env
, info
.si_signo
, &info
);
2566 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2569 info
.si_signo
= sig
;
2571 info
.si_code
= TARGET_TRAP_BRKPT
;
2572 queue_signal(env
, info
.si_signo
, &info
);
2577 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2579 cpu_dump_state(env
, stderr
, fprintf
, 0);
2582 process_pending_signals(env
);
2585 #endif /* TARGET_M68K */
2588 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
2590 target_ulong addr
, val
, tmp
;
2591 target_siginfo_t info
;
2594 addr
= env
->lock_addr
;
2595 tmp
= env
->lock_st_addr
;
2596 env
->lock_addr
= -1;
2597 env
->lock_st_addr
= 0;
2603 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
2607 if (val
== env
->lock_value
) {
2609 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
2626 info
.si_signo
= TARGET_SIGSEGV
;
2628 info
.si_code
= TARGET_SEGV_MAPERR
;
2629 info
._sifields
._sigfault
._addr
= addr
;
2630 queue_signal(env
, TARGET_SIGSEGV
, &info
);
2633 void cpu_loop(CPUAlphaState
*env
)
2636 target_siginfo_t info
;
2640 trapnr
= cpu_alpha_exec (env
);
2642 /* All of the traps imply a transition through PALcode, which
2643 implies an REI instruction has been executed. Which means
2644 that the intr_flag should be cleared. */
2649 fprintf(stderr
, "Reset requested. Exit\n");
2653 fprintf(stderr
, "Machine check exception. Exit\n");
2656 case EXCP_SMP_INTERRUPT
:
2657 case EXCP_CLK_INTERRUPT
:
2658 case EXCP_DEV_INTERRUPT
:
2659 fprintf(stderr
, "External interrupt. Exit\n");
2663 env
->lock_addr
= -1;
2664 info
.si_signo
= TARGET_SIGSEGV
;
2666 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
2667 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
2668 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
2669 queue_signal(env
, info
.si_signo
, &info
);
2672 env
->lock_addr
= -1;
2673 info
.si_signo
= TARGET_SIGBUS
;
2675 info
.si_code
= TARGET_BUS_ADRALN
;
2676 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
2677 queue_signal(env
, info
.si_signo
, &info
);
2681 env
->lock_addr
= -1;
2682 info
.si_signo
= TARGET_SIGILL
;
2684 info
.si_code
= TARGET_ILL_ILLOPC
;
2685 info
._sifields
._sigfault
._addr
= env
->pc
;
2686 queue_signal(env
, info
.si_signo
, &info
);
2689 env
->lock_addr
= -1;
2690 info
.si_signo
= TARGET_SIGFPE
;
2692 info
.si_code
= TARGET_FPE_FLTINV
;
2693 info
._sifields
._sigfault
._addr
= env
->pc
;
2694 queue_signal(env
, info
.si_signo
, &info
);
2697 /* No-op. Linux simply re-enables the FPU. */
2700 env
->lock_addr
= -1;
2701 switch (env
->error_code
) {
2704 info
.si_signo
= TARGET_SIGTRAP
;
2706 info
.si_code
= TARGET_TRAP_BRKPT
;
2707 info
._sifields
._sigfault
._addr
= env
->pc
;
2708 queue_signal(env
, info
.si_signo
, &info
);
2712 info
.si_signo
= TARGET_SIGTRAP
;
2715 info
._sifields
._sigfault
._addr
= env
->pc
;
2716 queue_signal(env
, info
.si_signo
, &info
);
2720 trapnr
= env
->ir
[IR_V0
];
2721 sysret
= do_syscall(env
, trapnr
,
2722 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
2723 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
2724 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
2726 if (trapnr
== TARGET_NR_sigreturn
2727 || trapnr
== TARGET_NR_rt_sigreturn
) {
2730 /* Syscall writes 0 to V0 to bypass error check, similar
2731 to how this is handled internal to Linux kernel. */
2732 if (env
->ir
[IR_V0
] == 0) {
2733 env
->ir
[IR_V0
] = sysret
;
2735 env
->ir
[IR_V0
] = (sysret
< 0 ? -sysret
: sysret
);
2736 env
->ir
[IR_A3
] = (sysret
< 0);
2741 /* ??? We can probably elide the code using page_unprotect
2742 that is checking for self-modifying code. Instead we
2743 could simply call tb_flush here. Until we work out the
2744 changes required to turn off the extra write protection,
2745 this can be a no-op. */
2749 /* Handled in the translator for usermode. */
2753 /* Handled in the translator for usermode. */
2757 info
.si_signo
= TARGET_SIGFPE
;
2758 switch (env
->ir
[IR_A0
]) {
2759 case TARGET_GEN_INTOVF
:
2760 info
.si_code
= TARGET_FPE_INTOVF
;
2762 case TARGET_GEN_INTDIV
:
2763 info
.si_code
= TARGET_FPE_INTDIV
;
2765 case TARGET_GEN_FLTOVF
:
2766 info
.si_code
= TARGET_FPE_FLTOVF
;
2768 case TARGET_GEN_FLTUND
:
2769 info
.si_code
= TARGET_FPE_FLTUND
;
2771 case TARGET_GEN_FLTINV
:
2772 info
.si_code
= TARGET_FPE_FLTINV
;
2774 case TARGET_GEN_FLTINE
:
2775 info
.si_code
= TARGET_FPE_FLTRES
;
2777 case TARGET_GEN_ROPRAND
:
2781 info
.si_signo
= TARGET_SIGTRAP
;
2786 info
._sifields
._sigfault
._addr
= env
->pc
;
2787 queue_signal(env
, info
.si_signo
, &info
);
2794 info
.si_signo
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2795 if (info
.si_signo
) {
2796 env
->lock_addr
= -1;
2798 info
.si_code
= TARGET_TRAP_BRKPT
;
2799 queue_signal(env
, info
.si_signo
, &info
);
2804 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
2807 printf ("Unhandled trap: 0x%x\n", trapnr
);
2808 cpu_dump_state(env
, stderr
, fprintf
, 0);
2811 process_pending_signals (env
);
2814 #endif /* TARGET_ALPHA */
2817 void cpu_loop(CPUS390XState
*env
)
2820 target_siginfo_t info
;
2823 trapnr
= cpu_s390x_exec (env
);
2826 case EXCP_INTERRUPT
:
2827 /* just indicate that signals should be handled asap */
2833 sig
= gdb_handlesig (env
, TARGET_SIGTRAP
);
2835 info
.si_signo
= sig
;
2837 info
.si_code
= TARGET_TRAP_BRKPT
;
2838 queue_signal(env
, info
.si_signo
, &info
);
2844 int n
= env
->int_svc_code
;
2846 /* syscalls > 255 */
2849 env
->psw
.addr
+= env
->int_svc_ilc
;
2850 env
->regs
[2] = do_syscall(env
, n
,
2862 info
.si_signo
= SIGSEGV
;
2864 /* XXX: check env->error_code */
2865 info
.si_code
= TARGET_SEGV_MAPERR
;
2866 info
._sifields
._sigfault
._addr
= env
->__excp_addr
;
2867 queue_signal(env
, info
.si_signo
, &info
);
2872 fprintf(stderr
,"specification exception insn 0x%08x%04x\n", ldl(env
->psw
.addr
), lduw(env
->psw
.addr
+ 4));
2873 info
.si_signo
= SIGILL
;
2875 info
.si_code
= TARGET_ILL_ILLOPC
;
2876 info
._sifields
._sigfault
._addr
= env
->__excp_addr
;
2877 queue_signal(env
, info
.si_signo
, &info
);
2881 printf ("Unhandled trap: 0x%x\n", trapnr
);
2882 cpu_dump_state(env
, stderr
, fprintf
, 0);
2885 process_pending_signals (env
);
2889 #endif /* TARGET_S390X */
2891 THREAD CPUArchState
*thread_env
;
2893 void task_settid(TaskState
*ts
)
2895 if (ts
->ts_tid
== 0) {
2896 #ifdef CONFIG_USE_NPTL
2897 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
2899 /* when no threads are used, tid becomes pid */
2900 ts
->ts_tid
= getpid();
2905 void stop_all_tasks(void)
2908 * We trust that when using NPTL, start_exclusive()
2909 * handles thread stopping correctly.
2914 /* Assumes contents are already zeroed. */
2915 void init_task_state(TaskState
*ts
)
2920 ts
->first_free
= ts
->sigqueue_table
;
2921 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
2922 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
2924 ts
->sigqueue_table
[i
].next
= NULL
;
2927 static void handle_arg_help(const char *arg
)
2932 static void handle_arg_log(const char *arg
)
2935 const CPULogItem
*item
;
2937 mask
= cpu_str_to_log_mask(arg
);
2939 printf("Log items (comma separated):\n");
2940 for (item
= cpu_log_items
; item
->mask
!= 0; item
++) {
2941 printf("%-10s %s\n", item
->name
, item
->help
);
2948 static void handle_arg_log_filename(const char *arg
)
2950 cpu_set_log_filename(arg
);
2953 static void handle_arg_set_env(const char *arg
)
2955 char *r
, *p
, *token
;
2956 r
= p
= strdup(arg
);
2957 while ((token
= strsep(&p
, ",")) != NULL
) {
2958 if (envlist_setenv(envlist
, token
) != 0) {
2965 static void handle_arg_unset_env(const char *arg
)
2967 char *r
, *p
, *token
;
2968 r
= p
= strdup(arg
);
2969 while ((token
= strsep(&p
, ",")) != NULL
) {
2970 if (envlist_unsetenv(envlist
, token
) != 0) {
2977 static void handle_arg_argv0(const char *arg
)
2979 argv0
= strdup(arg
);
2982 static void handle_arg_stack_size(const char *arg
)
2985 guest_stack_size
= strtoul(arg
, &p
, 0);
2986 if (guest_stack_size
== 0) {
2991 guest_stack_size
*= 1024 * 1024;
2992 } else if (*p
== 'k' || *p
== 'K') {
2993 guest_stack_size
*= 1024;
2997 static void handle_arg_ld_prefix(const char *arg
)
2999 interp_prefix
= strdup(arg
);
3002 static void handle_arg_pagesize(const char *arg
)
3004 qemu_host_page_size
= atoi(arg
);
3005 if (qemu_host_page_size
== 0 ||
3006 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3007 fprintf(stderr
, "page size must be a power of two\n");
3012 static void handle_arg_gdb(const char *arg
)
3014 gdbstub_port
= atoi(arg
);
3017 static void handle_arg_uname(const char *arg
)
3019 qemu_uname_release
= strdup(arg
);
3022 static void handle_arg_cpu(const char *arg
)
3024 cpu_model
= strdup(arg
);
3025 if (cpu_model
== NULL
|| strcmp(cpu_model
, "?") == 0) {
3026 /* XXX: implement xxx_cpu_list for targets that still miss it */
3027 #if defined(cpu_list_id)
3028 cpu_list_id(stdout
, &fprintf
, "");
3029 #elif defined(cpu_list)
3030 cpu_list(stdout
, &fprintf
); /* deprecated */
3036 #if defined(CONFIG_USE_GUEST_BASE)
3037 static void handle_arg_guest_base(const char *arg
)
3039 guest_base
= strtol(arg
, NULL
, 0);
3040 have_guest_base
= 1;
3043 static void handle_arg_reserved_va(const char *arg
)
3047 reserved_va
= strtoul(arg
, &p
, 0);
3061 unsigned long unshifted
= reserved_va
;
3063 reserved_va
<<= shift
;
3064 if (((reserved_va
>> shift
) != unshifted
)
3065 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3066 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3069 fprintf(stderr
, "Reserved virtual address too big\n");
3074 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3080 static void handle_arg_singlestep(const char *arg
)
3085 static void handle_arg_strace(const char *arg
)
3090 static void handle_arg_version(const char *arg
)
3092 printf("qemu-" TARGET_ARCH
" version " QEMU_VERSION QEMU_PKGVERSION
3093 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3097 struct qemu_argument
{
3101 void (*handle_opt
)(const char *arg
);
3102 const char *example
;
3106 struct qemu_argument arg_table
[] = {
3107 {"h", "", false, handle_arg_help
,
3108 "", "print this help"},
3109 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3110 "port", "wait gdb connection to 'port'"},
3111 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3112 "path", "set the elf interpreter prefix to 'path'"},
3113 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3114 "size", "set the stack size to 'size' bytes"},
3115 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3116 "model", "select CPU (-cpu ? for list)"},
3117 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3118 "var=value", "sets targets environment variable (see below)"},
3119 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3120 "var", "unsets targets environment variable (see below)"},
3121 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3122 "argv0", "forces target process argv[0] to be 'argv0'"},
3123 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3124 "uname", "set qemu uname release string to 'uname'"},
3125 #if defined(CONFIG_USE_GUEST_BASE)
3126 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3127 "address", "set guest_base address to 'address'"},
3128 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3129 "size", "reserve 'size' bytes for guest virtual address space"},
3131 {"d", "QEMU_LOG", true, handle_arg_log
,
3132 "options", "activate log"},
3133 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3134 "logfile", "override default logfile location"},
3135 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3136 "pagesize", "set the host page size to 'pagesize'"},
3137 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3138 "", "run in singlestep mode"},
3139 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3140 "", "log system calls"},
3141 {"version", "QEMU_VERSION", false, handle_arg_version
,
3142 "", "display version information and exit"},
3143 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3146 static void usage(void)
3148 struct qemu_argument
*arginfo
;
3152 printf("usage: qemu-" TARGET_ARCH
" [options] program [arguments...]\n"
3153 "Linux CPU emulator (compiled for " TARGET_ARCH
" emulation)\n"
3155 "Options and associated environment variables:\n"
3158 maxarglen
= maxenvlen
= 0;
3160 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3161 if (strlen(arginfo
->env
) > maxenvlen
) {
3162 maxenvlen
= strlen(arginfo
->env
);
3164 if (strlen(arginfo
->argv
) > maxarglen
) {
3165 maxarglen
= strlen(arginfo
->argv
);
3169 printf("%-*s%-*sDescription\n", maxarglen
+3, "Argument",
3170 maxenvlen
+1, "Env-variable");
3172 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3173 if (arginfo
->has_arg
) {
3174 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3175 (int)(maxarglen
-strlen(arginfo
->argv
)), arginfo
->example
,
3176 maxenvlen
, arginfo
->env
, arginfo
->help
);
3178 printf("-%-*s %-*s %s\n", maxarglen
+1, arginfo
->argv
,
3179 maxenvlen
, arginfo
->env
,
3186 "QEMU_LD_PREFIX = %s\n"
3187 "QEMU_STACK_SIZE = %ld byte\n"
3194 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3195 "QEMU_UNSET_ENV environment variables to set and unset\n"
3196 "environment variables for the target process.\n"
3197 "It is possible to provide several variables by separating them\n"
3198 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3199 "provide the -E and -U options multiple times.\n"
3200 "The following lines are equivalent:\n"
3201 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3202 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3203 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3204 "Note that if you provide several changes to a single variable\n"
3205 "the last change will stay in effect.\n");
3210 static int parse_args(int argc
, char **argv
)
3214 struct qemu_argument
*arginfo
;
3216 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3217 if (arginfo
->env
== NULL
) {
3221 r
= getenv(arginfo
->env
);
3223 arginfo
->handle_opt(r
);
3229 if (optind
>= argc
) {
3238 if (!strcmp(r
, "-")) {
3242 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3243 if (!strcmp(r
, arginfo
->argv
)) {
3244 if (arginfo
->has_arg
) {
3245 if (optind
>= argc
) {
3248 arginfo
->handle_opt(argv
[optind
]);
3251 arginfo
->handle_opt(NULL
);
3257 /* no option matched the current argv */
3258 if (arginfo
->handle_opt
== NULL
) {
3263 if (optind
>= argc
) {
3267 filename
= argv
[optind
];
3268 exec_path
= argv
[optind
];
3273 int main(int argc
, char **argv
, char **envp
)
3275 const char *log_file
= DEBUG_LOGFILE
;
3276 struct target_pt_regs regs1
, *regs
= ®s1
;
3277 struct image_info info1
, *info
= &info1
;
3278 struct linux_binprm bprm
;
3282 char **target_environ
, **wrk
;
3288 module_call_init(MODULE_INIT_QOM
);
3290 qemu_cache_utils_init(envp
);
3292 if ((envlist
= envlist_create()) == NULL
) {
3293 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3297 /* add current environment into the list */
3298 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3299 (void) envlist_setenv(envlist
, *wrk
);
3302 /* Read the stack limit from the kernel. If it's "unlimited",
3303 then we can do little else besides use the default. */
3306 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3307 && lim
.rlim_cur
!= RLIM_INFINITY
3308 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3309 guest_stack_size
= lim
.rlim_cur
;
3314 #if defined(cpudef_setup)
3315 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3319 cpu_set_log_filename(log_file
);
3320 optind
= parse_args(argc
, argv
);
3323 memset(regs
, 0, sizeof(struct target_pt_regs
));
3325 /* Zero out image_info */
3326 memset(info
, 0, sizeof(struct image_info
));
3328 memset(&bprm
, 0, sizeof (bprm
));
3330 /* Scan interp_prefix dir for replacement files. */
3331 init_paths(interp_prefix
);
3333 if (cpu_model
== NULL
) {
3334 #if defined(TARGET_I386)
3335 #ifdef TARGET_X86_64
3336 cpu_model
= "qemu64";
3338 cpu_model
= "qemu32";
3340 #elif defined(TARGET_ARM)
3342 #elif defined(TARGET_UNICORE32)
3344 #elif defined(TARGET_M68K)
3346 #elif defined(TARGET_SPARC)
3347 #ifdef TARGET_SPARC64
3348 cpu_model
= "TI UltraSparc II";
3350 cpu_model
= "Fujitsu MB86904";
3352 #elif defined(TARGET_MIPS)
3353 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3358 #elif defined(TARGET_PPC)
3360 cpu_model
= "970fx";
3369 cpu_exec_init_all();
3370 /* NOTE: we need to init the CPU at this stage to get
3371 qemu_host_page_size */
3372 env
= cpu_init(cpu_model
);
3374 fprintf(stderr
, "Unable to find CPU definition\n");
3377 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3378 cpu_state_reset(env
);
3383 if (getenv("QEMU_STRACE")) {
3387 target_environ
= envlist_to_environ(envlist
, NULL
);
3388 envlist_free(envlist
);
3390 #if defined(CONFIG_USE_GUEST_BASE)
3392 * Now that page sizes are configured in cpu_init() we can do
3393 * proper page alignment for guest_base.
3395 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3401 flags
= MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_NORESERVE
;
3402 if (have_guest_base
) {
3405 p
= mmap((void *)guest_base
, reserved_va
, PROT_NONE
, flags
, -1, 0);
3406 if (p
== MAP_FAILED
) {
3407 fprintf(stderr
, "Unable to reserve guest address space\n");
3410 guest_base
= (unsigned long)p
;
3411 /* Make sure the address is properly aligned. */
3412 if (guest_base
& ~qemu_host_page_mask
) {
3413 munmap(p
, reserved_va
);
3414 p
= mmap((void *)guest_base
, reserved_va
+ qemu_host_page_size
,
3415 PROT_NONE
, flags
, -1, 0);
3416 if (p
== MAP_FAILED
) {
3417 fprintf(stderr
, "Unable to reserve guest address space\n");
3420 guest_base
= HOST_PAGE_ALIGN((unsigned long)p
);
3422 qemu_log("Reserved 0x%lx bytes of guest address space\n", reserved_va
);
3425 if (reserved_va
|| have_guest_base
) {
3426 if (!guest_validate_base(guest_base
)) {
3427 fprintf(stderr
, "Guest base/Reserved VA rejected by guest code\n");
3431 #endif /* CONFIG_USE_GUEST_BASE */
3434 * Read in mmap_min_addr kernel parameter. This value is used
3435 * When loading the ELF image to determine whether guest_base
3436 * is needed. It is also used in mmap_find_vma.
3441 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3443 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3444 mmap_min_addr
= tmp
;
3445 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
3452 * Prepare copy of argv vector for target.
3454 target_argc
= argc
- optind
;
3455 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
3456 if (target_argv
== NULL
) {
3457 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
3462 * If argv0 is specified (using '-0' switch) we replace
3463 * argv[0] pointer with the given one.
3466 if (argv0
!= NULL
) {
3467 target_argv
[i
++] = strdup(argv0
);
3469 for (; i
< target_argc
; i
++) {
3470 target_argv
[i
] = strdup(argv
[optind
+ i
]);
3472 target_argv
[target_argc
] = NULL
;
3474 ts
= g_malloc0 (sizeof(TaskState
));
3475 init_task_state(ts
);
3476 /* build Task State */
3482 ret
= loader_exec(filename
, target_argv
, target_environ
, regs
,
3485 printf("Error %d while loading %s\n", ret
, filename
);
3489 for (i
= 0; i
< target_argc
; i
++) {
3490 free(target_argv
[i
]);
3494 for (wrk
= target_environ
; *wrk
; wrk
++) {
3498 free(target_environ
);
3500 if (qemu_log_enabled()) {
3501 #if defined(CONFIG_USE_GUEST_BASE)
3502 qemu_log("guest_base 0x%lx\n", guest_base
);
3506 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
3507 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
3508 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
3510 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
3512 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
3513 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
3515 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
3516 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
3519 target_set_brk(info
->brk
);
3523 #if defined(CONFIG_USE_GUEST_BASE)
3524 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
3525 generating the prologue until now so that the prologue can take
3526 the real value of GUEST_BASE into account. */
3527 tcg_prologue_init(&tcg_ctx
);
3530 #if defined(TARGET_I386)
3531 cpu_x86_set_cpl(env
, 3);
3533 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
3534 env
->hflags
|= HF_PE_MASK
;
3535 if (env
->cpuid_features
& CPUID_SSE
) {
3536 env
->cr
[4] |= CR4_OSFXSR_MASK
;
3537 env
->hflags
|= HF_OSFXSR_MASK
;
3539 #ifndef TARGET_ABI32
3540 /* enable 64 bit mode if possible */
3541 if (!(env
->cpuid_ext2_features
& CPUID_EXT2_LM
)) {
3542 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
3545 env
->cr
[4] |= CR4_PAE_MASK
;
3546 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
3547 env
->hflags
|= HF_LMA_MASK
;
3550 /* flags setup : we activate the IRQs by default as in user mode */
3551 env
->eflags
|= IF_MASK
;
3553 /* linux register setup */
3554 #ifndef TARGET_ABI32
3555 env
->regs
[R_EAX
] = regs
->rax
;
3556 env
->regs
[R_EBX
] = regs
->rbx
;
3557 env
->regs
[R_ECX
] = regs
->rcx
;
3558 env
->regs
[R_EDX
] = regs
->rdx
;
3559 env
->regs
[R_ESI
] = regs
->rsi
;
3560 env
->regs
[R_EDI
] = regs
->rdi
;
3561 env
->regs
[R_EBP
] = regs
->rbp
;
3562 env
->regs
[R_ESP
] = regs
->rsp
;
3563 env
->eip
= regs
->rip
;
3565 env
->regs
[R_EAX
] = regs
->eax
;
3566 env
->regs
[R_EBX
] = regs
->ebx
;
3567 env
->regs
[R_ECX
] = regs
->ecx
;
3568 env
->regs
[R_EDX
] = regs
->edx
;
3569 env
->regs
[R_ESI
] = regs
->esi
;
3570 env
->regs
[R_EDI
] = regs
->edi
;
3571 env
->regs
[R_EBP
] = regs
->ebp
;
3572 env
->regs
[R_ESP
] = regs
->esp
;
3573 env
->eip
= regs
->eip
;
3576 /* linux interrupt setup */
3577 #ifndef TARGET_ABI32
3578 env
->idt
.limit
= 511;
3580 env
->idt
.limit
= 255;
3582 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
3583 PROT_READ
|PROT_WRITE
,
3584 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3585 idt_table
= g2h(env
->idt
.base
);
3608 /* linux segment setup */
3610 uint64_t *gdt_table
;
3611 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
3612 PROT_READ
|PROT_WRITE
,
3613 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3614 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
3615 gdt_table
= g2h(env
->gdt
.base
);
3617 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
3618 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3619 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
3621 /* 64 bit code segment */
3622 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
3623 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3625 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
3627 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
3628 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
3629 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
3631 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
3632 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
3634 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
3635 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
3636 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
3637 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
3638 /* This hack makes Wine work... */
3639 env
->segs
[R_FS
].selector
= 0;
3641 cpu_x86_load_seg(env
, R_DS
, 0);
3642 cpu_x86_load_seg(env
, R_ES
, 0);
3643 cpu_x86_load_seg(env
, R_FS
, 0);
3644 cpu_x86_load_seg(env
, R_GS
, 0);
3646 #elif defined(TARGET_ARM)
3649 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
3650 for(i
= 0; i
< 16; i
++) {
3651 env
->regs
[i
] = regs
->uregs
[i
];
3654 #elif defined(TARGET_UNICORE32)
3657 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
3658 for (i
= 0; i
< 32; i
++) {
3659 env
->regs
[i
] = regs
->uregs
[i
];
3662 #elif defined(TARGET_SPARC)
3666 env
->npc
= regs
->npc
;
3668 for(i
= 0; i
< 8; i
++)
3669 env
->gregs
[i
] = regs
->u_regs
[i
];
3670 for(i
= 0; i
< 8; i
++)
3671 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
3673 #elif defined(TARGET_PPC)
3677 #if defined(TARGET_PPC64)
3678 #if defined(TARGET_ABI32)
3679 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
3681 env
->msr
|= (target_ulong
)1 << MSR_SF
;
3684 env
->nip
= regs
->nip
;
3685 for(i
= 0; i
< 32; i
++) {
3686 env
->gpr
[i
] = regs
->gpr
[i
];
3689 #elif defined(TARGET_M68K)
3692 env
->dregs
[0] = regs
->d0
;
3693 env
->dregs
[1] = regs
->d1
;
3694 env
->dregs
[2] = regs
->d2
;
3695 env
->dregs
[3] = regs
->d3
;
3696 env
->dregs
[4] = regs
->d4
;
3697 env
->dregs
[5] = regs
->d5
;
3698 env
->dregs
[6] = regs
->d6
;
3699 env
->dregs
[7] = regs
->d7
;
3700 env
->aregs
[0] = regs
->a0
;
3701 env
->aregs
[1] = regs
->a1
;
3702 env
->aregs
[2] = regs
->a2
;
3703 env
->aregs
[3] = regs
->a3
;
3704 env
->aregs
[4] = regs
->a4
;
3705 env
->aregs
[5] = regs
->a5
;
3706 env
->aregs
[6] = regs
->a6
;
3707 env
->aregs
[7] = regs
->usp
;
3709 ts
->sim_syscalls
= 1;
3711 #elif defined(TARGET_MICROBLAZE)
3713 env
->regs
[0] = regs
->r0
;
3714 env
->regs
[1] = regs
->r1
;
3715 env
->regs
[2] = regs
->r2
;
3716 env
->regs
[3] = regs
->r3
;
3717 env
->regs
[4] = regs
->r4
;
3718 env
->regs
[5] = regs
->r5
;
3719 env
->regs
[6] = regs
->r6
;
3720 env
->regs
[7] = regs
->r7
;
3721 env
->regs
[8] = regs
->r8
;
3722 env
->regs
[9] = regs
->r9
;
3723 env
->regs
[10] = regs
->r10
;
3724 env
->regs
[11] = regs
->r11
;
3725 env
->regs
[12] = regs
->r12
;
3726 env
->regs
[13] = regs
->r13
;
3727 env
->regs
[14] = regs
->r14
;
3728 env
->regs
[15] = regs
->r15
;
3729 env
->regs
[16] = regs
->r16
;
3730 env
->regs
[17] = regs
->r17
;
3731 env
->regs
[18] = regs
->r18
;
3732 env
->regs
[19] = regs
->r19
;
3733 env
->regs
[20] = regs
->r20
;
3734 env
->regs
[21] = regs
->r21
;
3735 env
->regs
[22] = regs
->r22
;
3736 env
->regs
[23] = regs
->r23
;
3737 env
->regs
[24] = regs
->r24
;
3738 env
->regs
[25] = regs
->r25
;
3739 env
->regs
[26] = regs
->r26
;
3740 env
->regs
[27] = regs
->r27
;
3741 env
->regs
[28] = regs
->r28
;
3742 env
->regs
[29] = regs
->r29
;
3743 env
->regs
[30] = regs
->r30
;
3744 env
->regs
[31] = regs
->r31
;
3745 env
->sregs
[SR_PC
] = regs
->pc
;
3747 #elif defined(TARGET_MIPS)
3751 for(i
= 0; i
< 32; i
++) {
3752 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
3754 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
3755 if (regs
->cp0_epc
& 1) {
3756 env
->hflags
|= MIPS_HFLAG_M16
;
3759 #elif defined(TARGET_SH4)
3763 for(i
= 0; i
< 16; i
++) {
3764 env
->gregs
[i
] = regs
->regs
[i
];
3768 #elif defined(TARGET_ALPHA)
3772 for(i
= 0; i
< 28; i
++) {
3773 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
3775 env
->ir
[IR_SP
] = regs
->usp
;
3778 #elif defined(TARGET_CRIS)
3780 env
->regs
[0] = regs
->r0
;
3781 env
->regs
[1] = regs
->r1
;
3782 env
->regs
[2] = regs
->r2
;
3783 env
->regs
[3] = regs
->r3
;
3784 env
->regs
[4] = regs
->r4
;
3785 env
->regs
[5] = regs
->r5
;
3786 env
->regs
[6] = regs
->r6
;
3787 env
->regs
[7] = regs
->r7
;
3788 env
->regs
[8] = regs
->r8
;
3789 env
->regs
[9] = regs
->r9
;
3790 env
->regs
[10] = regs
->r10
;
3791 env
->regs
[11] = regs
->r11
;
3792 env
->regs
[12] = regs
->r12
;
3793 env
->regs
[13] = regs
->r13
;
3794 env
->regs
[14] = info
->start_stack
;
3795 env
->regs
[15] = regs
->acr
;
3796 env
->pc
= regs
->erp
;
3798 #elif defined(TARGET_S390X)
3801 for (i
= 0; i
< 16; i
++) {
3802 env
->regs
[i
] = regs
->gprs
[i
];
3804 env
->psw
.mask
= regs
->psw
.mask
;
3805 env
->psw
.addr
= regs
->psw
.addr
;
3808 #error unsupported target CPU
3811 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
3812 ts
->stack_base
= info
->start_stack
;
3813 ts
->heap_base
= info
->brk
;
3814 /* This will be filled in on the first SYS_HEAPINFO call. */
3819 if (gdbserver_start(gdbstub_port
) < 0) {
3820 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
3824 gdb_handlesig(env
, 0);