4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
26 #include <sys/syscall.h>
27 #include <sys/resource.h>
30 #include "qemu-common.h"
33 #include "qemu/timer.h"
34 #include "qemu/envlist.h"
40 static const char *filename
;
41 static const char *argv0
;
42 static int gdbstub_port
;
43 static envlist_t
*envlist
;
44 static const char *cpu_model
;
45 unsigned long mmap_min_addr
;
46 unsigned long guest_base
;
48 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
50 * When running 32-on-64 we should make sure we can fit all of the possible
51 * guest address space into a contiguous chunk of virtual host memory.
53 * This way we will never overlap with our own libraries or binaries or stack
54 * or anything else that QEMU maps.
57 /* MIPS only supports 31 bits of virtual address space for user space */
58 unsigned long reserved_va
= 0x77000000;
60 unsigned long reserved_va
= 0xf7000000;
63 unsigned long reserved_va
;
66 static void usage(void);
68 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
69 const char *qemu_uname_release
;
71 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
72 we allocate a bigger stack. Need a better solution, for example
73 by remapping the process stack directly at the right place */
74 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
76 void gemu_log(const char *fmt
, ...)
81 vfprintf(stderr
, fmt
, ap
);
85 #if defined(TARGET_I386)
86 int cpu_get_pic_interrupt(CPUX86State
*env
)
92 /***********************************************************/
93 /* Helper routines for implementing atomic operations. */
95 /* To implement exclusive operations we force all cpus to syncronise.
96 We don't require a full sync, only that no cpus are executing guest code.
97 The alternative is to map target atomic ops onto host equivalents,
98 which requires quite a lot of per host/target work. */
99 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
100 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
101 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
102 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
103 static int pending_cpus
;
105 /* Make sure everything is in a consistent state for calling fork(). */
106 void fork_start(void)
108 pthread_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
109 pthread_mutex_lock(&exclusive_lock
);
113 void fork_end(int child
)
115 mmap_fork_end(child
);
117 CPUState
*cpu
, *next_cpu
;
118 /* Child processes created by fork() only have a single thread.
119 Discard information about the parent threads. */
120 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
121 if (cpu
!= thread_cpu
) {
122 QTAILQ_REMOVE(&cpus
, thread_cpu
, node
);
126 pthread_mutex_init(&exclusive_lock
, NULL
);
127 pthread_mutex_init(&cpu_list_mutex
, NULL
);
128 pthread_cond_init(&exclusive_cond
, NULL
);
129 pthread_cond_init(&exclusive_resume
, NULL
);
130 pthread_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
, NULL
);
131 gdbserver_fork(thread_cpu
);
133 pthread_mutex_unlock(&exclusive_lock
);
134 pthread_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
138 /* Wait for pending exclusive operations to complete. The exclusive lock
140 static inline void exclusive_idle(void)
142 while (pending_cpus
) {
143 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
147 /* Start an exclusive operation.
148 Must only be called from outside cpu_arm_exec. */
149 static inline void start_exclusive(void)
153 pthread_mutex_lock(&exclusive_lock
);
157 /* Make all other cpus stop executing. */
158 CPU_FOREACH(other_cpu
) {
159 if (other_cpu
->running
) {
164 if (pending_cpus
> 1) {
165 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
169 /* Finish an exclusive operation. */
170 static inline void __attribute__((unused
)) end_exclusive(void)
173 pthread_cond_broadcast(&exclusive_resume
);
174 pthread_mutex_unlock(&exclusive_lock
);
177 /* Wait for exclusive ops to finish, and begin cpu execution. */
178 static inline void cpu_exec_start(CPUState
*cpu
)
180 pthread_mutex_lock(&exclusive_lock
);
183 pthread_mutex_unlock(&exclusive_lock
);
186 /* Mark cpu as not executing, and release pending exclusive ops. */
187 static inline void cpu_exec_end(CPUState
*cpu
)
189 pthread_mutex_lock(&exclusive_lock
);
190 cpu
->running
= false;
191 if (pending_cpus
> 1) {
193 if (pending_cpus
== 1) {
194 pthread_cond_signal(&exclusive_cond
);
198 pthread_mutex_unlock(&exclusive_lock
);
201 void cpu_list_lock(void)
203 pthread_mutex_lock(&cpu_list_mutex
);
206 void cpu_list_unlock(void)
208 pthread_mutex_unlock(&cpu_list_mutex
);
213 /***********************************************************/
214 /* CPUX86 core interface */
216 uint64_t cpu_get_tsc(CPUX86State
*env
)
218 return cpu_get_real_ticks();
221 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
226 e1
= (addr
<< 16) | (limit
& 0xffff);
227 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
234 static uint64_t *idt_table
;
236 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
237 uint64_t addr
, unsigned int sel
)
240 e1
= (addr
& 0xffff) | (sel
<< 16);
241 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
245 p
[2] = tswap32(addr
>> 32);
248 /* only dpl matters as we do only user space emulation */
249 static void set_idt(int n
, unsigned int dpl
)
251 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
254 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
255 uint32_t addr
, unsigned int sel
)
258 e1
= (addr
& 0xffff) | (sel
<< 16);
259 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
265 /* only dpl matters as we do only user space emulation */
266 static void set_idt(int n
, unsigned int dpl
)
268 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
272 void cpu_loop(CPUX86State
*env
)
274 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
277 target_siginfo_t info
;
281 trapnr
= cpu_x86_exec(cs
);
285 /* linux syscall from int $0x80 */
286 env
->regs
[R_EAX
] = do_syscall(env
,
298 /* linux syscall from syscall instruction */
299 env
->regs
[R_EAX
] = do_syscall(env
,
312 info
.si_signo
= TARGET_SIGBUS
;
314 info
.si_code
= TARGET_SI_KERNEL
;
315 info
._sifields
._sigfault
._addr
= 0;
316 queue_signal(env
, info
.si_signo
, &info
);
319 /* XXX: potential problem if ABI32 */
320 #ifndef TARGET_X86_64
321 if (env
->eflags
& VM_MASK
) {
322 handle_vm86_fault(env
);
326 info
.si_signo
= TARGET_SIGSEGV
;
328 info
.si_code
= TARGET_SI_KERNEL
;
329 info
._sifields
._sigfault
._addr
= 0;
330 queue_signal(env
, info
.si_signo
, &info
);
334 info
.si_signo
= TARGET_SIGSEGV
;
336 if (!(env
->error_code
& 1))
337 info
.si_code
= TARGET_SEGV_MAPERR
;
339 info
.si_code
= TARGET_SEGV_ACCERR
;
340 info
._sifields
._sigfault
._addr
= env
->cr
[2];
341 queue_signal(env
, info
.si_signo
, &info
);
344 #ifndef TARGET_X86_64
345 if (env
->eflags
& VM_MASK
) {
346 handle_vm86_trap(env
, trapnr
);
350 /* division by zero */
351 info
.si_signo
= TARGET_SIGFPE
;
353 info
.si_code
= TARGET_FPE_INTDIV
;
354 info
._sifields
._sigfault
._addr
= env
->eip
;
355 queue_signal(env
, info
.si_signo
, &info
);
360 #ifndef TARGET_X86_64
361 if (env
->eflags
& VM_MASK
) {
362 handle_vm86_trap(env
, trapnr
);
366 info
.si_signo
= TARGET_SIGTRAP
;
368 if (trapnr
== EXCP01_DB
) {
369 info
.si_code
= TARGET_TRAP_BRKPT
;
370 info
._sifields
._sigfault
._addr
= env
->eip
;
372 info
.si_code
= TARGET_SI_KERNEL
;
373 info
._sifields
._sigfault
._addr
= 0;
375 queue_signal(env
, info
.si_signo
, &info
);
380 #ifndef TARGET_X86_64
381 if (env
->eflags
& VM_MASK
) {
382 handle_vm86_trap(env
, trapnr
);
386 info
.si_signo
= TARGET_SIGSEGV
;
388 info
.si_code
= TARGET_SI_KERNEL
;
389 info
._sifields
._sigfault
._addr
= 0;
390 queue_signal(env
, info
.si_signo
, &info
);
394 info
.si_signo
= TARGET_SIGILL
;
396 info
.si_code
= TARGET_ILL_ILLOPN
;
397 info
._sifields
._sigfault
._addr
= env
->eip
;
398 queue_signal(env
, info
.si_signo
, &info
);
401 /* just indicate that signals should be handled asap */
407 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
412 info
.si_code
= TARGET_TRAP_BRKPT
;
413 queue_signal(env
, info
.si_signo
, &info
);
418 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
419 fprintf(stderr
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
423 process_pending_signals(env
);
430 #define get_user_code_u32(x, gaddr, doswap) \
431 ({ abi_long __r = get_user_u32((x), (gaddr)); \
432 if (!__r && (doswap)) { \
438 #define get_user_code_u16(x, gaddr, doswap) \
439 ({ abi_long __r = get_user_u16((x), (gaddr)); \
440 if (!__r && (doswap)) { \
447 /* Commpage handling -- there is no commpage for AArch64 */
450 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
452 * r0 = pointer to oldval
453 * r1 = pointer to newval
454 * r2 = pointer to target value
457 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
458 * C set if *ptr was changed, clear if no exchange happened
460 * Note segv's in kernel helpers are a bit tricky, we can set the
461 * data address sensibly but the PC address is just the entry point.
463 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
465 uint64_t oldval
, newval
, val
;
467 target_siginfo_t info
;
469 /* Based on the 32 bit code in do_kernel_trap */
471 /* XXX: This only works between threads, not between processes.
472 It's probably possible to implement this with native host
473 operations. However things like ldrex/strex are much harder so
474 there's not much point trying. */
476 cpsr
= cpsr_read(env
);
479 if (get_user_u64(oldval
, env
->regs
[0])) {
480 env
->exception
.vaddress
= env
->regs
[0];
484 if (get_user_u64(newval
, env
->regs
[1])) {
485 env
->exception
.vaddress
= env
->regs
[1];
489 if (get_user_u64(val
, addr
)) {
490 env
->exception
.vaddress
= addr
;
497 if (put_user_u64(val
, addr
)) {
498 env
->exception
.vaddress
= addr
;
508 cpsr_write(env
, cpsr
, CPSR_C
);
514 /* We get the PC of the entry address - which is as good as anything,
515 on a real kernel what you get depends on which mode it uses. */
516 info
.si_signo
= TARGET_SIGSEGV
;
518 /* XXX: check env->error_code */
519 info
.si_code
= TARGET_SEGV_MAPERR
;
520 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
521 queue_signal(env
, info
.si_signo
, &info
);
524 /* Handle a jump to the kernel code page. */
526 do_kernel_trap(CPUARMState
*env
)
532 switch (env
->regs
[15]) {
533 case 0xffff0fa0: /* __kernel_memory_barrier */
534 /* ??? No-op. Will need to do better for SMP. */
536 case 0xffff0fc0: /* __kernel_cmpxchg */
537 /* XXX: This only works between threads, not between processes.
538 It's probably possible to implement this with native host
539 operations. However things like ldrex/strex are much harder so
540 there's not much point trying. */
542 cpsr
= cpsr_read(env
);
544 /* FIXME: This should SEGV if the access fails. */
545 if (get_user_u32(val
, addr
))
547 if (val
== env
->regs
[0]) {
549 /* FIXME: Check for segfaults. */
550 put_user_u32(val
, addr
);
557 cpsr_write(env
, cpsr
, CPSR_C
);
560 case 0xffff0fe0: /* __kernel_get_tls */
561 env
->regs
[0] = cpu_get_tls(env
);
563 case 0xffff0f60: /* __kernel_cmpxchg64 */
564 arm_kernel_cmpxchg64_helper(env
);
570 /* Jump back to the caller. */
571 addr
= env
->regs
[14];
576 env
->regs
[15] = addr
;
581 /* Store exclusive handling for AArch32 */
582 static int do_strex(CPUARMState
*env
)
590 if (env
->exclusive_addr
!= env
->exclusive_test
) {
593 /* We know we're always AArch32 so the address is in uint32_t range
594 * unless it was the -1 exclusive-monitor-lost value (which won't
595 * match exclusive_test above).
597 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
598 addr
= env
->exclusive_addr
;
599 size
= env
->exclusive_info
& 0xf;
602 segv
= get_user_u8(val
, addr
);
605 segv
= get_user_u16(val
, addr
);
609 segv
= get_user_u32(val
, addr
);
615 env
->exception
.vaddress
= addr
;
620 segv
= get_user_u32(valhi
, addr
+ 4);
622 env
->exception
.vaddress
= addr
+ 4;
625 val
= deposit64(val
, 32, 32, valhi
);
627 if (val
!= env
->exclusive_val
) {
631 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
634 segv
= put_user_u8(val
, addr
);
637 segv
= put_user_u16(val
, addr
);
641 segv
= put_user_u32(val
, addr
);
645 env
->exception
.vaddress
= addr
;
649 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
650 segv
= put_user_u32(val
, addr
+ 4);
652 env
->exception
.vaddress
= addr
+ 4;
659 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
665 void cpu_loop(CPUARMState
*env
)
667 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
669 unsigned int n
, insn
;
670 target_siginfo_t info
;
675 trapnr
= cpu_arm_exec(cs
);
680 TaskState
*ts
= cs
->opaque
;
684 /* we handle the FPU emulation here, as Linux */
685 /* we get the opcode */
686 /* FIXME - what to do if get_user() fails? */
687 get_user_code_u32(opcode
, env
->regs
[15], env
->bswap_code
);
689 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
690 if (rc
== 0) { /* illegal instruction */
691 info
.si_signo
= TARGET_SIGILL
;
693 info
.si_code
= TARGET_ILL_ILLOPN
;
694 info
._sifields
._sigfault
._addr
= env
->regs
[15];
695 queue_signal(env
, info
.si_signo
, &info
);
696 } else if (rc
< 0) { /* FP exception */
699 /* translate softfloat flags to FPSR flags */
700 if (-rc
& float_flag_invalid
)
702 if (-rc
& float_flag_divbyzero
)
704 if (-rc
& float_flag_overflow
)
706 if (-rc
& float_flag_underflow
)
708 if (-rc
& float_flag_inexact
)
711 FPSR fpsr
= ts
->fpa
.fpsr
;
712 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
714 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
715 info
.si_signo
= TARGET_SIGFPE
;
718 /* ordered by priority, least first */
719 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
720 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
721 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
722 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
723 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
725 info
._sifields
._sigfault
._addr
= env
->regs
[15];
726 queue_signal(env
, info
.si_signo
, &info
);
731 /* accumulate unenabled exceptions */
732 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
734 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
736 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
738 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
740 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
743 } else { /* everything OK */
754 if (trapnr
== EXCP_BKPT
) {
756 /* FIXME - what to do if get_user() fails? */
757 get_user_code_u16(insn
, env
->regs
[15], env
->bswap_code
);
761 /* FIXME - what to do if get_user() fails? */
762 get_user_code_u32(insn
, env
->regs
[15], env
->bswap_code
);
763 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
768 /* FIXME - what to do if get_user() fails? */
769 get_user_code_u16(insn
, env
->regs
[15] - 2,
773 /* FIXME - what to do if get_user() fails? */
774 get_user_code_u32(insn
, env
->regs
[15] - 4,
780 if (n
== ARM_NR_cacheflush
) {
782 } else if (n
== ARM_NR_semihosting
783 || n
== ARM_NR_thumb_semihosting
) {
784 env
->regs
[0] = do_arm_semihosting (env
);
785 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
787 if (env
->thumb
|| n
== 0) {
790 n
-= ARM_SYSCALL_BASE
;
793 if ( n
> ARM_NR_BASE
) {
795 case ARM_NR_cacheflush
:
799 cpu_set_tls(env
, env
->regs
[0]);
802 case ARM_NR_breakpoint
:
803 env
->regs
[15] -= env
->thumb
? 2 : 4;
806 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
808 env
->regs
[0] = -TARGET_ENOSYS
;
812 env
->regs
[0] = do_syscall(env
,
828 /* just indicate that signals should be handled asap */
831 if (!do_strex(env
)) {
834 /* fall through for segv */
835 case EXCP_PREFETCH_ABORT
:
836 case EXCP_DATA_ABORT
:
837 addr
= env
->exception
.vaddress
;
839 info
.si_signo
= TARGET_SIGSEGV
;
841 /* XXX: check env->error_code */
842 info
.si_code
= TARGET_SEGV_MAPERR
;
843 info
._sifields
._sigfault
._addr
= addr
;
844 queue_signal(env
, info
.si_signo
, &info
);
852 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
857 info
.si_code
= TARGET_TRAP_BRKPT
;
858 queue_signal(env
, info
.si_signo
, &info
);
862 case EXCP_KERNEL_TRAP
:
863 if (do_kernel_trap(env
))
868 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
870 cpu_dump_state(cs
, stderr
, fprintf
, 0);
873 process_pending_signals(env
);
880 * Handle AArch64 store-release exclusive
882 * rs = gets the status result of store exclusive
883 * rt = is the register that is stored
884 * rt2 = is the second register store (in STP)
887 static int do_strex_a64(CPUARMState
*env
)
898 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
899 size
= extract32(env
->exclusive_info
, 0, 2);
900 is_pair
= extract32(env
->exclusive_info
, 2, 1);
901 rs
= extract32(env
->exclusive_info
, 4, 5);
902 rt
= extract32(env
->exclusive_info
, 9, 5);
903 rt2
= extract32(env
->exclusive_info
, 14, 5);
905 addr
= env
->exclusive_addr
;
907 if (addr
!= env
->exclusive_test
) {
913 segv
= get_user_u8(val
, addr
);
916 segv
= get_user_u16(val
, addr
);
919 segv
= get_user_u32(val
, addr
);
922 segv
= get_user_u64(val
, addr
);
928 env
->exception
.vaddress
= addr
;
931 if (val
!= env
->exclusive_val
) {
936 segv
= get_user_u32(val
, addr
+ 4);
938 segv
= get_user_u64(val
, addr
+ 8);
941 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
944 if (val
!= env
->exclusive_high
) {
948 /* handle the zero register */
949 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
952 segv
= put_user_u8(val
, addr
);
955 segv
= put_user_u16(val
, addr
);
958 segv
= put_user_u32(val
, addr
);
961 segv
= put_user_u64(val
, addr
);
968 /* handle the zero register */
969 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
971 segv
= put_user_u32(val
, addr
+ 4);
973 segv
= put_user_u64(val
, addr
+ 8);
976 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
983 /* rs == 31 encodes a write to the ZR, thus throwing away
984 * the status return. This is rather silly but valid.
990 /* instruction faulted, PC does not advance */
991 /* either way a strex releases any exclusive lock we have */
992 env
->exclusive_addr
= -1;
997 /* AArch64 main loop */
998 void cpu_loop(CPUARMState
*env
)
1000 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1002 target_siginfo_t info
;
1006 trapnr
= cpu_arm_exec(cs
);
1011 env
->xregs
[0] = do_syscall(env
,
1021 case EXCP_INTERRUPT
:
1022 /* just indicate that signals should be handled asap */
1025 info
.si_signo
= TARGET_SIGILL
;
1027 info
.si_code
= TARGET_ILL_ILLOPN
;
1028 info
._sifields
._sigfault
._addr
= env
->pc
;
1029 queue_signal(env
, info
.si_signo
, &info
);
1032 if (!do_strex_a64(env
)) {
1035 /* fall through for segv */
1036 case EXCP_PREFETCH_ABORT
:
1037 case EXCP_DATA_ABORT
:
1038 info
.si_signo
= TARGET_SIGSEGV
;
1040 /* XXX: check env->error_code */
1041 info
.si_code
= TARGET_SEGV_MAPERR
;
1042 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1043 queue_signal(env
, info
.si_signo
, &info
);
1047 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1049 info
.si_signo
= sig
;
1051 info
.si_code
= TARGET_TRAP_BRKPT
;
1052 queue_signal(env
, info
.si_signo
, &info
);
1056 env
->xregs
[0] = do_arm_semihosting(env
);
1059 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
1061 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1064 process_pending_signals(env
);
1065 /* Exception return on AArch64 always clears the exclusive monitor,
1066 * so any return to running guest code implies this.
1067 * A strex (successful or otherwise) also clears the monitor, so
1068 * we don't need to specialcase EXCP_STREX.
1070 env
->exclusive_addr
= -1;
1073 #endif /* ndef TARGET_ABI32 */
1077 #ifdef TARGET_UNICORE32
1079 void cpu_loop(CPUUniCore32State
*env
)
1081 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1083 unsigned int n
, insn
;
1084 target_siginfo_t info
;
1088 trapnr
= uc32_cpu_exec(cs
);
1091 case UC32_EXCP_PRIV
:
1094 get_user_u32(insn
, env
->regs
[31] - 4);
1095 n
= insn
& 0xffffff;
1097 if (n
>= UC32_SYSCALL_BASE
) {
1099 n
-= UC32_SYSCALL_BASE
;
1100 if (n
== UC32_SYSCALL_NR_set_tls
) {
1101 cpu_set_tls(env
, env
->regs
[0]);
1104 env
->regs
[0] = do_syscall(env
,
1119 case UC32_EXCP_DTRAP
:
1120 case UC32_EXCP_ITRAP
:
1121 info
.si_signo
= TARGET_SIGSEGV
;
1123 /* XXX: check env->error_code */
1124 info
.si_code
= TARGET_SEGV_MAPERR
;
1125 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1126 queue_signal(env
, info
.si_signo
, &info
);
1128 case EXCP_INTERRUPT
:
1129 /* just indicate that signals should be handled asap */
1135 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1137 info
.si_signo
= sig
;
1139 info
.si_code
= TARGET_TRAP_BRKPT
;
1140 queue_signal(env
, info
.si_signo
, &info
);
1147 process_pending_signals(env
);
1151 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1152 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1158 #define SPARC64_STACK_BIAS 2047
1162 /* WARNING: dealing with register windows _is_ complicated. More info
1163 can be found at http://www.sics.se/~psm/sparcstack.html */
1164 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1166 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1167 /* wrap handling : if cwp is on the last window, then we use the
1168 registers 'after' the end */
1169 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1170 index
+= 16 * env
->nwindows
;
1174 /* save the register window 'cwp1' */
1175 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1180 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1181 #ifdef TARGET_SPARC64
1183 sp_ptr
+= SPARC64_STACK_BIAS
;
1185 #if defined(DEBUG_WIN)
1186 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1189 for(i
= 0; i
< 16; i
++) {
1190 /* FIXME - what to do if put_user() fails? */
1191 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1192 sp_ptr
+= sizeof(abi_ulong
);
1196 static void save_window(CPUSPARCState
*env
)
1198 #ifndef TARGET_SPARC64
1199 unsigned int new_wim
;
1200 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1201 ((1LL << env
->nwindows
) - 1);
1202 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1205 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1211 static void restore_window(CPUSPARCState
*env
)
1213 #ifndef TARGET_SPARC64
1214 unsigned int new_wim
;
1216 unsigned int i
, cwp1
;
1219 #ifndef TARGET_SPARC64
1220 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1221 ((1LL << env
->nwindows
) - 1);
1224 /* restore the invalid window */
1225 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1226 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1227 #ifdef TARGET_SPARC64
1229 sp_ptr
+= SPARC64_STACK_BIAS
;
1231 #if defined(DEBUG_WIN)
1232 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1235 for(i
= 0; i
< 16; i
++) {
1236 /* FIXME - what to do if get_user() fails? */
1237 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1238 sp_ptr
+= sizeof(abi_ulong
);
1240 #ifdef TARGET_SPARC64
1242 if (env
->cleanwin
< env
->nwindows
- 1)
1250 static void flush_windows(CPUSPARCState
*env
)
1256 /* if restore would invoke restore_window(), then we can stop */
1257 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1258 #ifndef TARGET_SPARC64
1259 if (env
->wim
& (1 << cwp1
))
1262 if (env
->canrestore
== 0)
1267 save_window_offset(env
, cwp1
);
1270 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1271 #ifndef TARGET_SPARC64
1272 /* set wim so that restore will reload the registers */
1273 env
->wim
= 1 << cwp1
;
1275 #if defined(DEBUG_WIN)
1276 printf("flush_windows: nb=%d\n", offset
- 1);
1280 void cpu_loop (CPUSPARCState
*env
)
1282 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1285 target_siginfo_t info
;
1289 trapnr
= cpu_sparc_exec(cs
);
1292 /* Compute PSR before exposing state. */
1293 if (env
->cc_op
!= CC_OP_FLAGS
) {
1298 #ifndef TARGET_SPARC64
1305 ret
= do_syscall (env
, env
->gregs
[1],
1306 env
->regwptr
[0], env
->regwptr
[1],
1307 env
->regwptr
[2], env
->regwptr
[3],
1308 env
->regwptr
[4], env
->regwptr
[5],
1310 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1311 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1312 env
->xcc
|= PSR_CARRY
;
1314 env
->psr
|= PSR_CARRY
;
1318 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1319 env
->xcc
&= ~PSR_CARRY
;
1321 env
->psr
&= ~PSR_CARRY
;
1324 env
->regwptr
[0] = ret
;
1325 /* next instruction */
1327 env
->npc
= env
->npc
+ 4;
1329 case 0x83: /* flush windows */
1334 /* next instruction */
1336 env
->npc
= env
->npc
+ 4;
1338 #ifndef TARGET_SPARC64
1339 case TT_WIN_OVF
: /* window overflow */
1342 case TT_WIN_UNF
: /* window underflow */
1343 restore_window(env
);
1348 info
.si_signo
= TARGET_SIGSEGV
;
1350 /* XXX: check env->error_code */
1351 info
.si_code
= TARGET_SEGV_MAPERR
;
1352 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1353 queue_signal(env
, info
.si_signo
, &info
);
1357 case TT_SPILL
: /* window overflow */
1360 case TT_FILL
: /* window underflow */
1361 restore_window(env
);
1366 info
.si_signo
= TARGET_SIGSEGV
;
1368 /* XXX: check env->error_code */
1369 info
.si_code
= TARGET_SEGV_MAPERR
;
1370 if (trapnr
== TT_DFAULT
)
1371 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1373 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1374 queue_signal(env
, info
.si_signo
, &info
);
1377 #ifndef TARGET_ABI32
1380 sparc64_get_context(env
);
1384 sparc64_set_context(env
);
1388 case EXCP_INTERRUPT
:
1389 /* just indicate that signals should be handled asap */
1393 info
.si_signo
= TARGET_SIGILL
;
1395 info
.si_code
= TARGET_ILL_ILLOPC
;
1396 info
._sifields
._sigfault
._addr
= env
->pc
;
1397 queue_signal(env
, info
.si_signo
, &info
);
1404 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1407 info
.si_signo
= sig
;
1409 info
.si_code
= TARGET_TRAP_BRKPT
;
1410 queue_signal(env
, info
.si_signo
, &info
);
1415 printf ("Unhandled trap: 0x%x\n", trapnr
);
1416 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1419 process_pending_signals (env
);
1426 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1428 return cpu_get_real_ticks();
1431 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1433 return cpu_ppc_get_tb(env
);
1436 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1438 return cpu_ppc_get_tb(env
) >> 32;
1441 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1443 return cpu_ppc_get_tb(env
);
1446 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1448 return cpu_ppc_get_tb(env
) >> 32;
1451 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1452 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1454 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1456 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1459 /* XXX: to be fixed */
1460 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1465 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1470 #define EXCP_DUMP(env, fmt, ...) \
1472 CPUState *cs = ENV_GET_CPU(env); \
1473 fprintf(stderr, fmt , ## __VA_ARGS__); \
1474 cpu_dump_state(cs, stderr, fprintf, 0); \
1475 qemu_log(fmt, ## __VA_ARGS__); \
1476 if (qemu_log_enabled()) { \
1477 log_cpu_state(cs, 0); \
1481 static int do_store_exclusive(CPUPPCState
*env
)
1484 target_ulong page_addr
;
1485 target_ulong val
, val2
__attribute__((unused
)) = 0;
1489 addr
= env
->reserve_ea
;
1490 page_addr
= addr
& TARGET_PAGE_MASK
;
1493 flags
= page_get_flags(page_addr
);
1494 if ((flags
& PAGE_READ
) == 0) {
1497 int reg
= env
->reserve_info
& 0x1f;
1498 int size
= env
->reserve_info
>> 5;
1501 if (addr
== env
->reserve_addr
) {
1503 case 1: segv
= get_user_u8(val
, addr
); break;
1504 case 2: segv
= get_user_u16(val
, addr
); break;
1505 case 4: segv
= get_user_u32(val
, addr
); break;
1506 #if defined(TARGET_PPC64)
1507 case 8: segv
= get_user_u64(val
, addr
); break;
1509 segv
= get_user_u64(val
, addr
);
1511 segv
= get_user_u64(val2
, addr
+ 8);
1518 if (!segv
&& val
== env
->reserve_val
) {
1519 val
= env
->gpr
[reg
];
1521 case 1: segv
= put_user_u8(val
, addr
); break;
1522 case 2: segv
= put_user_u16(val
, addr
); break;
1523 case 4: segv
= put_user_u32(val
, addr
); break;
1524 #if defined(TARGET_PPC64)
1525 case 8: segv
= put_user_u64(val
, addr
); break;
1527 if (val2
== env
->reserve_val2
) {
1530 val
= env
->gpr
[reg
+1];
1532 val2
= env
->gpr
[reg
+1];
1534 segv
= put_user_u64(val
, addr
);
1536 segv
= put_user_u64(val2
, addr
+ 8);
1549 env
->crf
[0] = (stored
<< 1) | xer_so
;
1550 env
->reserve_addr
= (target_ulong
)-1;
1560 void cpu_loop(CPUPPCState
*env
)
1562 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1563 target_siginfo_t info
;
1569 trapnr
= cpu_ppc_exec(cs
);
1572 case POWERPC_EXCP_NONE
:
1575 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1576 cpu_abort(cs
, "Critical interrupt while in user mode. "
1579 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1580 cpu_abort(cs
, "Machine check exception while in user mode. "
1583 case POWERPC_EXCP_DSI
: /* Data storage exception */
1584 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1586 /* XXX: check this. Seems bugged */
1587 switch (env
->error_code
& 0xFF000000) {
1589 info
.si_signo
= TARGET_SIGSEGV
;
1591 info
.si_code
= TARGET_SEGV_MAPERR
;
1594 info
.si_signo
= TARGET_SIGILL
;
1596 info
.si_code
= TARGET_ILL_ILLADR
;
1599 info
.si_signo
= TARGET_SIGSEGV
;
1601 info
.si_code
= TARGET_SEGV_ACCERR
;
1604 /* Let's send a regular segfault... */
1605 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1607 info
.si_signo
= TARGET_SIGSEGV
;
1609 info
.si_code
= TARGET_SEGV_MAPERR
;
1612 info
._sifields
._sigfault
._addr
= env
->nip
;
1613 queue_signal(env
, info
.si_signo
, &info
);
1615 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1616 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1617 "\n", env
->spr
[SPR_SRR0
]);
1618 /* XXX: check this */
1619 switch (env
->error_code
& 0xFF000000) {
1621 info
.si_signo
= TARGET_SIGSEGV
;
1623 info
.si_code
= TARGET_SEGV_MAPERR
;
1627 info
.si_signo
= TARGET_SIGSEGV
;
1629 info
.si_code
= TARGET_SEGV_ACCERR
;
1632 /* Let's send a regular segfault... */
1633 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1635 info
.si_signo
= TARGET_SIGSEGV
;
1637 info
.si_code
= TARGET_SEGV_MAPERR
;
1640 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1641 queue_signal(env
, info
.si_signo
, &info
);
1643 case POWERPC_EXCP_EXTERNAL
: /* External input */
1644 cpu_abort(cs
, "External interrupt while in user mode. "
1647 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1648 EXCP_DUMP(env
, "Unaligned memory access\n");
1649 /* XXX: check this */
1650 info
.si_signo
= TARGET_SIGBUS
;
1652 info
.si_code
= TARGET_BUS_ADRALN
;
1653 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1654 queue_signal(env
, info
.si_signo
, &info
);
1656 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1657 /* XXX: check this */
1658 switch (env
->error_code
& ~0xF) {
1659 case POWERPC_EXCP_FP
:
1660 EXCP_DUMP(env
, "Floating point program exception\n");
1661 info
.si_signo
= TARGET_SIGFPE
;
1663 switch (env
->error_code
& 0xF) {
1664 case POWERPC_EXCP_FP_OX
:
1665 info
.si_code
= TARGET_FPE_FLTOVF
;
1667 case POWERPC_EXCP_FP_UX
:
1668 info
.si_code
= TARGET_FPE_FLTUND
;
1670 case POWERPC_EXCP_FP_ZX
:
1671 case POWERPC_EXCP_FP_VXZDZ
:
1672 info
.si_code
= TARGET_FPE_FLTDIV
;
1674 case POWERPC_EXCP_FP_XX
:
1675 info
.si_code
= TARGET_FPE_FLTRES
;
1677 case POWERPC_EXCP_FP_VXSOFT
:
1678 info
.si_code
= TARGET_FPE_FLTINV
;
1680 case POWERPC_EXCP_FP_VXSNAN
:
1681 case POWERPC_EXCP_FP_VXISI
:
1682 case POWERPC_EXCP_FP_VXIDI
:
1683 case POWERPC_EXCP_FP_VXIMZ
:
1684 case POWERPC_EXCP_FP_VXVC
:
1685 case POWERPC_EXCP_FP_VXSQRT
:
1686 case POWERPC_EXCP_FP_VXCVI
:
1687 info
.si_code
= TARGET_FPE_FLTSUB
;
1690 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1695 case POWERPC_EXCP_INVAL
:
1696 EXCP_DUMP(env
, "Invalid instruction\n");
1697 info
.si_signo
= TARGET_SIGILL
;
1699 switch (env
->error_code
& 0xF) {
1700 case POWERPC_EXCP_INVAL_INVAL
:
1701 info
.si_code
= TARGET_ILL_ILLOPC
;
1703 case POWERPC_EXCP_INVAL_LSWX
:
1704 info
.si_code
= TARGET_ILL_ILLOPN
;
1706 case POWERPC_EXCP_INVAL_SPR
:
1707 info
.si_code
= TARGET_ILL_PRVREG
;
1709 case POWERPC_EXCP_INVAL_FP
:
1710 info
.si_code
= TARGET_ILL_COPROC
;
1713 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1714 env
->error_code
& 0xF);
1715 info
.si_code
= TARGET_ILL_ILLADR
;
1719 case POWERPC_EXCP_PRIV
:
1720 EXCP_DUMP(env
, "Privilege violation\n");
1721 info
.si_signo
= TARGET_SIGILL
;
1723 switch (env
->error_code
& 0xF) {
1724 case POWERPC_EXCP_PRIV_OPC
:
1725 info
.si_code
= TARGET_ILL_PRVOPC
;
1727 case POWERPC_EXCP_PRIV_REG
:
1728 info
.si_code
= TARGET_ILL_PRVREG
;
1731 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1732 env
->error_code
& 0xF);
1733 info
.si_code
= TARGET_ILL_PRVOPC
;
1737 case POWERPC_EXCP_TRAP
:
1738 cpu_abort(cs
, "Tried to call a TRAP\n");
1741 /* Should not happen ! */
1742 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1746 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1747 queue_signal(env
, info
.si_signo
, &info
);
1749 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1750 EXCP_DUMP(env
, "No floating point allowed\n");
1751 info
.si_signo
= TARGET_SIGILL
;
1753 info
.si_code
= TARGET_ILL_COPROC
;
1754 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1755 queue_signal(env
, info
.si_signo
, &info
);
1757 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1758 cpu_abort(cs
, "Syscall exception while in user mode. "
1761 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1762 EXCP_DUMP(env
, "No APU instruction allowed\n");
1763 info
.si_signo
= TARGET_SIGILL
;
1765 info
.si_code
= TARGET_ILL_COPROC
;
1766 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1767 queue_signal(env
, info
.si_signo
, &info
);
1769 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1770 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1773 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1774 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1777 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1778 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1781 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1782 cpu_abort(cs
, "Data TLB exception while in user mode. "
1785 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1786 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1789 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1790 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1791 info
.si_signo
= TARGET_SIGILL
;
1793 info
.si_code
= TARGET_ILL_COPROC
;
1794 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1795 queue_signal(env
, info
.si_signo
, &info
);
1797 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1798 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1800 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1801 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1803 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1804 cpu_abort(cs
, "Performance monitor exception not handled\n");
1806 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1807 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1810 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1811 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1814 case POWERPC_EXCP_RESET
: /* System reset exception */
1815 cpu_abort(cs
, "Reset interrupt while in user mode. "
1818 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1819 cpu_abort(cs
, "Data segment exception while in user mode. "
1822 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1823 cpu_abort(cs
, "Instruction segment exception "
1824 "while in user mode. Aborting\n");
1826 /* PowerPC 64 with hypervisor mode support */
1827 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1828 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1829 "while in user mode. Aborting\n");
1831 case POWERPC_EXCP_TRACE
: /* Trace exception */
1833 * we use this exception to emulate step-by-step execution mode.
1836 /* PowerPC 64 with hypervisor mode support */
1837 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1838 cpu_abort(cs
, "Hypervisor data storage exception "
1839 "while in user mode. Aborting\n");
1841 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1842 cpu_abort(cs
, "Hypervisor instruction storage exception "
1843 "while in user mode. Aborting\n");
1845 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1846 cpu_abort(cs
, "Hypervisor data segment exception "
1847 "while in user mode. Aborting\n");
1849 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1850 cpu_abort(cs
, "Hypervisor instruction segment exception "
1851 "while in user mode. Aborting\n");
1853 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1854 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1855 info
.si_signo
= TARGET_SIGILL
;
1857 info
.si_code
= TARGET_ILL_COPROC
;
1858 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1859 queue_signal(env
, info
.si_signo
, &info
);
1861 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1862 cpu_abort(cs
, "Programmable interval timer interrupt "
1863 "while in user mode. Aborting\n");
1865 case POWERPC_EXCP_IO
: /* IO error exception */
1866 cpu_abort(cs
, "IO error exception while in user mode. "
1869 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1870 cpu_abort(cs
, "Run mode exception while in user mode. "
1873 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1874 cpu_abort(cs
, "Emulation trap exception not handled\n");
1876 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1877 cpu_abort(cs
, "Instruction fetch TLB exception "
1878 "while in user-mode. Aborting");
1880 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1881 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1884 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1885 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1888 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1889 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1891 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1892 cpu_abort(cs
, "Instruction address breakpoint exception "
1895 case POWERPC_EXCP_SMI
: /* System management interrupt */
1896 cpu_abort(cs
, "System management interrupt while in user mode. "
1899 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1900 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1903 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1904 cpu_abort(cs
, "Performance monitor exception not handled\n");
1906 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1907 cpu_abort(cs
, "Vector assist exception not handled\n");
1909 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1910 cpu_abort(cs
, "Soft patch exception not handled\n");
1912 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1913 cpu_abort(cs
, "Maintenance exception while in user mode. "
1916 case POWERPC_EXCP_STOP
: /* stop translation */
1917 /* We did invalidate the instruction cache. Go on */
1919 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1920 /* We just stopped because of a branch. Go on */
1922 case POWERPC_EXCP_SYSCALL_USER
:
1923 /* system call in user-mode emulation */
1925 * PPC ABI uses overflow flag in cr0 to signal an error
1928 env
->crf
[0] &= ~0x1;
1929 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1930 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1932 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
1933 /* Returning from a successful sigreturn syscall.
1934 Avoid corrupting register state. */
1937 if (ret
> (target_ulong
)(-515)) {
1943 case POWERPC_EXCP_STCX
:
1944 if (do_store_exclusive(env
)) {
1945 info
.si_signo
= TARGET_SIGSEGV
;
1947 info
.si_code
= TARGET_SEGV_MAPERR
;
1948 info
._sifields
._sigfault
._addr
= env
->nip
;
1949 queue_signal(env
, info
.si_signo
, &info
);
1956 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1958 info
.si_signo
= sig
;
1960 info
.si_code
= TARGET_TRAP_BRKPT
;
1961 queue_signal(env
, info
.si_signo
, &info
);
1965 case EXCP_INTERRUPT
:
1966 /* just indicate that signals should be handled asap */
1969 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
1972 process_pending_signals(env
);
1979 # ifdef TARGET_ABI_MIPSO32
1980 # define MIPS_SYS(name, args) args,
1981 static const uint8_t mips_syscall_args
[] = {
1982 MIPS_SYS(sys_syscall
, 8) /* 4000 */
1983 MIPS_SYS(sys_exit
, 1)
1984 MIPS_SYS(sys_fork
, 0)
1985 MIPS_SYS(sys_read
, 3)
1986 MIPS_SYS(sys_write
, 3)
1987 MIPS_SYS(sys_open
, 3) /* 4005 */
1988 MIPS_SYS(sys_close
, 1)
1989 MIPS_SYS(sys_waitpid
, 3)
1990 MIPS_SYS(sys_creat
, 2)
1991 MIPS_SYS(sys_link
, 2)
1992 MIPS_SYS(sys_unlink
, 1) /* 4010 */
1993 MIPS_SYS(sys_execve
, 0)
1994 MIPS_SYS(sys_chdir
, 1)
1995 MIPS_SYS(sys_time
, 1)
1996 MIPS_SYS(sys_mknod
, 3)
1997 MIPS_SYS(sys_chmod
, 2) /* 4015 */
1998 MIPS_SYS(sys_lchown
, 3)
1999 MIPS_SYS(sys_ni_syscall
, 0)
2000 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2001 MIPS_SYS(sys_lseek
, 3)
2002 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2003 MIPS_SYS(sys_mount
, 5)
2004 MIPS_SYS(sys_umount
, 1)
2005 MIPS_SYS(sys_setuid
, 1)
2006 MIPS_SYS(sys_getuid
, 0)
2007 MIPS_SYS(sys_stime
, 1) /* 4025 */
2008 MIPS_SYS(sys_ptrace
, 4)
2009 MIPS_SYS(sys_alarm
, 1)
2010 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2011 MIPS_SYS(sys_pause
, 0)
2012 MIPS_SYS(sys_utime
, 2) /* 4030 */
2013 MIPS_SYS(sys_ni_syscall
, 0)
2014 MIPS_SYS(sys_ni_syscall
, 0)
2015 MIPS_SYS(sys_access
, 2)
2016 MIPS_SYS(sys_nice
, 1)
2017 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2018 MIPS_SYS(sys_sync
, 0)
2019 MIPS_SYS(sys_kill
, 2)
2020 MIPS_SYS(sys_rename
, 2)
2021 MIPS_SYS(sys_mkdir
, 2)
2022 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2023 MIPS_SYS(sys_dup
, 1)
2024 MIPS_SYS(sys_pipe
, 0)
2025 MIPS_SYS(sys_times
, 1)
2026 MIPS_SYS(sys_ni_syscall
, 0)
2027 MIPS_SYS(sys_brk
, 1) /* 4045 */
2028 MIPS_SYS(sys_setgid
, 1)
2029 MIPS_SYS(sys_getgid
, 0)
2030 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2031 MIPS_SYS(sys_geteuid
, 0)
2032 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2033 MIPS_SYS(sys_acct
, 0)
2034 MIPS_SYS(sys_umount2
, 2)
2035 MIPS_SYS(sys_ni_syscall
, 0)
2036 MIPS_SYS(sys_ioctl
, 3)
2037 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2038 MIPS_SYS(sys_ni_syscall
, 2)
2039 MIPS_SYS(sys_setpgid
, 2)
2040 MIPS_SYS(sys_ni_syscall
, 0)
2041 MIPS_SYS(sys_olduname
, 1)
2042 MIPS_SYS(sys_umask
, 1) /* 4060 */
2043 MIPS_SYS(sys_chroot
, 1)
2044 MIPS_SYS(sys_ustat
, 2)
2045 MIPS_SYS(sys_dup2
, 2)
2046 MIPS_SYS(sys_getppid
, 0)
2047 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2048 MIPS_SYS(sys_setsid
, 0)
2049 MIPS_SYS(sys_sigaction
, 3)
2050 MIPS_SYS(sys_sgetmask
, 0)
2051 MIPS_SYS(sys_ssetmask
, 1)
2052 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2053 MIPS_SYS(sys_setregid
, 2)
2054 MIPS_SYS(sys_sigsuspend
, 0)
2055 MIPS_SYS(sys_sigpending
, 1)
2056 MIPS_SYS(sys_sethostname
, 2)
2057 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2058 MIPS_SYS(sys_getrlimit
, 2)
2059 MIPS_SYS(sys_getrusage
, 2)
2060 MIPS_SYS(sys_gettimeofday
, 2)
2061 MIPS_SYS(sys_settimeofday
, 2)
2062 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2063 MIPS_SYS(sys_setgroups
, 2)
2064 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2065 MIPS_SYS(sys_symlink
, 2)
2066 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2067 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2068 MIPS_SYS(sys_uselib
, 1)
2069 MIPS_SYS(sys_swapon
, 2)
2070 MIPS_SYS(sys_reboot
, 3)
2071 MIPS_SYS(old_readdir
, 3)
2072 MIPS_SYS(old_mmap
, 6) /* 4090 */
2073 MIPS_SYS(sys_munmap
, 2)
2074 MIPS_SYS(sys_truncate
, 2)
2075 MIPS_SYS(sys_ftruncate
, 2)
2076 MIPS_SYS(sys_fchmod
, 2)
2077 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2078 MIPS_SYS(sys_getpriority
, 2)
2079 MIPS_SYS(sys_setpriority
, 3)
2080 MIPS_SYS(sys_ni_syscall
, 0)
2081 MIPS_SYS(sys_statfs
, 2)
2082 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2083 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2084 MIPS_SYS(sys_socketcall
, 2)
2085 MIPS_SYS(sys_syslog
, 3)
2086 MIPS_SYS(sys_setitimer
, 3)
2087 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2088 MIPS_SYS(sys_newstat
, 2)
2089 MIPS_SYS(sys_newlstat
, 2)
2090 MIPS_SYS(sys_newfstat
, 2)
2091 MIPS_SYS(sys_uname
, 1)
2092 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2093 MIPS_SYS(sys_vhangup
, 0)
2094 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2095 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2096 MIPS_SYS(sys_wait4
, 4)
2097 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2098 MIPS_SYS(sys_sysinfo
, 1)
2099 MIPS_SYS(sys_ipc
, 6)
2100 MIPS_SYS(sys_fsync
, 1)
2101 MIPS_SYS(sys_sigreturn
, 0)
2102 MIPS_SYS(sys_clone
, 6) /* 4120 */
2103 MIPS_SYS(sys_setdomainname
, 2)
2104 MIPS_SYS(sys_newuname
, 1)
2105 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2106 MIPS_SYS(sys_adjtimex
, 1)
2107 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2108 MIPS_SYS(sys_sigprocmask
, 3)
2109 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2110 MIPS_SYS(sys_init_module
, 5)
2111 MIPS_SYS(sys_delete_module
, 1)
2112 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2113 MIPS_SYS(sys_quotactl
, 0)
2114 MIPS_SYS(sys_getpgid
, 1)
2115 MIPS_SYS(sys_fchdir
, 1)
2116 MIPS_SYS(sys_bdflush
, 2)
2117 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2118 MIPS_SYS(sys_personality
, 1)
2119 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2120 MIPS_SYS(sys_setfsuid
, 1)
2121 MIPS_SYS(sys_setfsgid
, 1)
2122 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2123 MIPS_SYS(sys_getdents
, 3)
2124 MIPS_SYS(sys_select
, 5)
2125 MIPS_SYS(sys_flock
, 2)
2126 MIPS_SYS(sys_msync
, 3)
2127 MIPS_SYS(sys_readv
, 3) /* 4145 */
2128 MIPS_SYS(sys_writev
, 3)
2129 MIPS_SYS(sys_cacheflush
, 3)
2130 MIPS_SYS(sys_cachectl
, 3)
2131 MIPS_SYS(sys_sysmips
, 4)
2132 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2133 MIPS_SYS(sys_getsid
, 1)
2134 MIPS_SYS(sys_fdatasync
, 0)
2135 MIPS_SYS(sys_sysctl
, 1)
2136 MIPS_SYS(sys_mlock
, 2)
2137 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2138 MIPS_SYS(sys_mlockall
, 1)
2139 MIPS_SYS(sys_munlockall
, 0)
2140 MIPS_SYS(sys_sched_setparam
, 2)
2141 MIPS_SYS(sys_sched_getparam
, 2)
2142 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2143 MIPS_SYS(sys_sched_getscheduler
, 1)
2144 MIPS_SYS(sys_sched_yield
, 0)
2145 MIPS_SYS(sys_sched_get_priority_max
, 1)
2146 MIPS_SYS(sys_sched_get_priority_min
, 1)
2147 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2148 MIPS_SYS(sys_nanosleep
, 2)
2149 MIPS_SYS(sys_mremap
, 5)
2150 MIPS_SYS(sys_accept
, 3)
2151 MIPS_SYS(sys_bind
, 3)
2152 MIPS_SYS(sys_connect
, 3) /* 4170 */
2153 MIPS_SYS(sys_getpeername
, 3)
2154 MIPS_SYS(sys_getsockname
, 3)
2155 MIPS_SYS(sys_getsockopt
, 5)
2156 MIPS_SYS(sys_listen
, 2)
2157 MIPS_SYS(sys_recv
, 4) /* 4175 */
2158 MIPS_SYS(sys_recvfrom
, 6)
2159 MIPS_SYS(sys_recvmsg
, 3)
2160 MIPS_SYS(sys_send
, 4)
2161 MIPS_SYS(sys_sendmsg
, 3)
2162 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2163 MIPS_SYS(sys_setsockopt
, 5)
2164 MIPS_SYS(sys_shutdown
, 2)
2165 MIPS_SYS(sys_socket
, 3)
2166 MIPS_SYS(sys_socketpair
, 4)
2167 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2168 MIPS_SYS(sys_getresuid
, 3)
2169 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2170 MIPS_SYS(sys_poll
, 3)
2171 MIPS_SYS(sys_nfsservctl
, 3)
2172 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2173 MIPS_SYS(sys_getresgid
, 3)
2174 MIPS_SYS(sys_prctl
, 5)
2175 MIPS_SYS(sys_rt_sigreturn
, 0)
2176 MIPS_SYS(sys_rt_sigaction
, 4)
2177 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2178 MIPS_SYS(sys_rt_sigpending
, 2)
2179 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2180 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2181 MIPS_SYS(sys_rt_sigsuspend
, 0)
2182 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2183 MIPS_SYS(sys_pwrite64
, 6)
2184 MIPS_SYS(sys_chown
, 3)
2185 MIPS_SYS(sys_getcwd
, 2)
2186 MIPS_SYS(sys_capget
, 2)
2187 MIPS_SYS(sys_capset
, 2) /* 4205 */
2188 MIPS_SYS(sys_sigaltstack
, 2)
2189 MIPS_SYS(sys_sendfile
, 4)
2190 MIPS_SYS(sys_ni_syscall
, 0)
2191 MIPS_SYS(sys_ni_syscall
, 0)
2192 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2193 MIPS_SYS(sys_truncate64
, 4)
2194 MIPS_SYS(sys_ftruncate64
, 4)
2195 MIPS_SYS(sys_stat64
, 2)
2196 MIPS_SYS(sys_lstat64
, 2)
2197 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2198 MIPS_SYS(sys_pivot_root
, 2)
2199 MIPS_SYS(sys_mincore
, 3)
2200 MIPS_SYS(sys_madvise
, 3)
2201 MIPS_SYS(sys_getdents64
, 3)
2202 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2203 MIPS_SYS(sys_ni_syscall
, 0)
2204 MIPS_SYS(sys_gettid
, 0)
2205 MIPS_SYS(sys_readahead
, 5)
2206 MIPS_SYS(sys_setxattr
, 5)
2207 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2208 MIPS_SYS(sys_fsetxattr
, 5)
2209 MIPS_SYS(sys_getxattr
, 4)
2210 MIPS_SYS(sys_lgetxattr
, 4)
2211 MIPS_SYS(sys_fgetxattr
, 4)
2212 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2213 MIPS_SYS(sys_llistxattr
, 3)
2214 MIPS_SYS(sys_flistxattr
, 3)
2215 MIPS_SYS(sys_removexattr
, 2)
2216 MIPS_SYS(sys_lremovexattr
, 2)
2217 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2218 MIPS_SYS(sys_tkill
, 2)
2219 MIPS_SYS(sys_sendfile64
, 5)
2220 MIPS_SYS(sys_futex
, 6)
2221 MIPS_SYS(sys_sched_setaffinity
, 3)
2222 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2223 MIPS_SYS(sys_io_setup
, 2)
2224 MIPS_SYS(sys_io_destroy
, 1)
2225 MIPS_SYS(sys_io_getevents
, 5)
2226 MIPS_SYS(sys_io_submit
, 3)
2227 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2228 MIPS_SYS(sys_exit_group
, 1)
2229 MIPS_SYS(sys_lookup_dcookie
, 3)
2230 MIPS_SYS(sys_epoll_create
, 1)
2231 MIPS_SYS(sys_epoll_ctl
, 4)
2232 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2233 MIPS_SYS(sys_remap_file_pages
, 5)
2234 MIPS_SYS(sys_set_tid_address
, 1)
2235 MIPS_SYS(sys_restart_syscall
, 0)
2236 MIPS_SYS(sys_fadvise64_64
, 7)
2237 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2238 MIPS_SYS(sys_fstatfs64
, 2)
2239 MIPS_SYS(sys_timer_create
, 3)
2240 MIPS_SYS(sys_timer_settime
, 4)
2241 MIPS_SYS(sys_timer_gettime
, 2)
2242 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2243 MIPS_SYS(sys_timer_delete
, 1)
2244 MIPS_SYS(sys_clock_settime
, 2)
2245 MIPS_SYS(sys_clock_gettime
, 2)
2246 MIPS_SYS(sys_clock_getres
, 2)
2247 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2248 MIPS_SYS(sys_tgkill
, 3)
2249 MIPS_SYS(sys_utimes
, 2)
2250 MIPS_SYS(sys_mbind
, 4)
2251 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2252 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2253 MIPS_SYS(sys_mq_open
, 4)
2254 MIPS_SYS(sys_mq_unlink
, 1)
2255 MIPS_SYS(sys_mq_timedsend
, 5)
2256 MIPS_SYS(sys_mq_timedreceive
, 5)
2257 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2258 MIPS_SYS(sys_mq_getsetattr
, 3)
2259 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2260 MIPS_SYS(sys_waitid
, 4)
2261 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2262 MIPS_SYS(sys_add_key
, 5)
2263 MIPS_SYS(sys_request_key
, 4)
2264 MIPS_SYS(sys_keyctl
, 5)
2265 MIPS_SYS(sys_set_thread_area
, 1)
2266 MIPS_SYS(sys_inotify_init
, 0)
2267 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2268 MIPS_SYS(sys_inotify_rm_watch
, 2)
2269 MIPS_SYS(sys_migrate_pages
, 4)
2270 MIPS_SYS(sys_openat
, 4)
2271 MIPS_SYS(sys_mkdirat
, 3)
2272 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2273 MIPS_SYS(sys_fchownat
, 5)
2274 MIPS_SYS(sys_futimesat
, 3)
2275 MIPS_SYS(sys_fstatat64
, 4)
2276 MIPS_SYS(sys_unlinkat
, 3)
2277 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2278 MIPS_SYS(sys_linkat
, 5)
2279 MIPS_SYS(sys_symlinkat
, 3)
2280 MIPS_SYS(sys_readlinkat
, 4)
2281 MIPS_SYS(sys_fchmodat
, 3)
2282 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2283 MIPS_SYS(sys_pselect6
, 6)
2284 MIPS_SYS(sys_ppoll
, 5)
2285 MIPS_SYS(sys_unshare
, 1)
2286 MIPS_SYS(sys_splice
, 6)
2287 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2288 MIPS_SYS(sys_tee
, 4)
2289 MIPS_SYS(sys_vmsplice
, 4)
2290 MIPS_SYS(sys_move_pages
, 6)
2291 MIPS_SYS(sys_set_robust_list
, 2)
2292 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2293 MIPS_SYS(sys_kexec_load
, 4)
2294 MIPS_SYS(sys_getcpu
, 3)
2295 MIPS_SYS(sys_epoll_pwait
, 6)
2296 MIPS_SYS(sys_ioprio_set
, 3)
2297 MIPS_SYS(sys_ioprio_get
, 2)
2298 MIPS_SYS(sys_utimensat
, 4)
2299 MIPS_SYS(sys_signalfd
, 3)
2300 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2301 MIPS_SYS(sys_eventfd
, 1)
2302 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2303 MIPS_SYS(sys_timerfd_create
, 2)
2304 MIPS_SYS(sys_timerfd_gettime
, 2)
2305 MIPS_SYS(sys_timerfd_settime
, 4)
2306 MIPS_SYS(sys_signalfd4
, 4)
2307 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2308 MIPS_SYS(sys_epoll_create1
, 1)
2309 MIPS_SYS(sys_dup3
, 3)
2310 MIPS_SYS(sys_pipe2
, 2)
2311 MIPS_SYS(sys_inotify_init1
, 1)
2312 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2313 MIPS_SYS(sys_pwritev
, 6)
2314 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2315 MIPS_SYS(sys_perf_event_open
, 5)
2316 MIPS_SYS(sys_accept4
, 4)
2317 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2318 MIPS_SYS(sys_fanotify_init
, 2)
2319 MIPS_SYS(sys_fanotify_mark
, 6)
2320 MIPS_SYS(sys_prlimit64
, 4)
2321 MIPS_SYS(sys_name_to_handle_at
, 5)
2322 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2323 MIPS_SYS(sys_clock_adjtime
, 2)
2324 MIPS_SYS(sys_syncfs
, 1)
2329 static int do_store_exclusive(CPUMIPSState
*env
)
2332 target_ulong page_addr
;
2340 page_addr
= addr
& TARGET_PAGE_MASK
;
2343 flags
= page_get_flags(page_addr
);
2344 if ((flags
& PAGE_READ
) == 0) {
2347 reg
= env
->llreg
& 0x1f;
2348 d
= (env
->llreg
& 0x20) != 0;
2350 segv
= get_user_s64(val
, addr
);
2352 segv
= get_user_s32(val
, addr
);
2355 if (val
!= env
->llval
) {
2356 env
->active_tc
.gpr
[reg
] = 0;
2359 segv
= put_user_u64(env
->llnewval
, addr
);
2361 segv
= put_user_u32(env
->llnewval
, addr
);
2364 env
->active_tc
.gpr
[reg
] = 1;
2371 env
->active_tc
.PC
+= 4;
2384 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2392 info
->si_signo
= TARGET_SIGFPE
;
2394 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2395 queue_signal(env
, info
->si_signo
, &*info
);
2399 info
->si_signo
= TARGET_SIGTRAP
;
2401 queue_signal(env
, info
->si_signo
, &*info
);
2409 void cpu_loop(CPUMIPSState
*env
)
2411 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2412 target_siginfo_t info
;
2415 # ifdef TARGET_ABI_MIPSO32
2416 unsigned int syscall_num
;
2421 trapnr
= cpu_mips_exec(cs
);
2425 env
->active_tc
.PC
+= 4;
2426 # ifdef TARGET_ABI_MIPSO32
2427 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2428 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2429 ret
= -TARGET_ENOSYS
;
2433 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2435 nb_args
= mips_syscall_args
[syscall_num
];
2436 sp_reg
= env
->active_tc
.gpr
[29];
2438 /* these arguments are taken from the stack */
2440 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2444 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2448 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2452 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2458 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2459 env
->active_tc
.gpr
[4],
2460 env
->active_tc
.gpr
[5],
2461 env
->active_tc
.gpr
[6],
2462 env
->active_tc
.gpr
[7],
2463 arg5
, arg6
, arg7
, arg8
);
2467 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2468 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2469 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2470 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2471 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2473 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2474 /* Returning from a successful sigreturn syscall.
2475 Avoid clobbering register state. */
2478 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2479 env
->active_tc
.gpr
[7] = 1; /* error flag */
2482 env
->active_tc
.gpr
[7] = 0; /* error flag */
2484 env
->active_tc
.gpr
[2] = ret
;
2490 info
.si_signo
= TARGET_SIGSEGV
;
2492 /* XXX: check env->error_code */
2493 info
.si_code
= TARGET_SEGV_MAPERR
;
2494 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2495 queue_signal(env
, info
.si_signo
, &info
);
2499 info
.si_signo
= TARGET_SIGILL
;
2502 queue_signal(env
, info
.si_signo
, &info
);
2504 case EXCP_INTERRUPT
:
2505 /* just indicate that signals should be handled asap */
2511 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2514 info
.si_signo
= sig
;
2516 info
.si_code
= TARGET_TRAP_BRKPT
;
2517 queue_signal(env
, info
.si_signo
, &info
);
2522 if (do_store_exclusive(env
)) {
2523 info
.si_signo
= TARGET_SIGSEGV
;
2525 info
.si_code
= TARGET_SEGV_MAPERR
;
2526 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2527 queue_signal(env
, info
.si_signo
, &info
);
2531 info
.si_signo
= TARGET_SIGILL
;
2533 info
.si_code
= TARGET_ILL_ILLOPC
;
2534 queue_signal(env
, info
.si_signo
, &info
);
2536 /* The code below was inspired by the MIPS Linux kernel trap
2537 * handling code in arch/mips/kernel/traps.c.
2541 abi_ulong trap_instr
;
2544 if (env
->hflags
& MIPS_HFLAG_M16
) {
2545 if (env
->insn_flags
& ASE_MICROMIPS
) {
2546 /* microMIPS mode */
2547 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2552 if ((trap_instr
>> 10) == 0x11) {
2553 /* 16-bit instruction */
2554 code
= trap_instr
& 0xf;
2556 /* 32-bit instruction */
2559 ret
= get_user_u16(instr_lo
,
2560 env
->active_tc
.PC
+ 2);
2564 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2565 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2566 /* Unfortunately, microMIPS also suffers from
2567 the old assembler bug... */
2568 if (code
>= (1 << 10)) {
2574 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2578 code
= (trap_instr
>> 6) & 0x3f;
2581 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2586 /* As described in the original Linux kernel code, the
2587 * below checks on 'code' are to work around an old
2590 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2591 if (code
>= (1 << 10)) {
2596 if (do_break(env
, &info
, code
) != 0) {
2603 abi_ulong trap_instr
;
2604 unsigned int code
= 0;
2606 if (env
->hflags
& MIPS_HFLAG_M16
) {
2607 /* microMIPS mode */
2610 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2611 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2613 trap_instr
= (instr
[0] << 16) | instr
[1];
2615 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2622 /* The immediate versions don't provide a code. */
2623 if (!(trap_instr
& 0xFC000000)) {
2624 if (env
->hflags
& MIPS_HFLAG_M16
) {
2625 /* microMIPS mode */
2626 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2628 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2632 if (do_break(env
, &info
, code
) != 0) {
2639 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
2641 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2644 process_pending_signals(env
);
2649 #ifdef TARGET_OPENRISC
2651 void cpu_loop(CPUOpenRISCState
*env
)
2653 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2658 trapnr
= cpu_openrisc_exec(cs
);
2664 qemu_log("\nReset request, exit, pc is %#x\n", env
->pc
);
2668 qemu_log("\nBus error, exit, pc is %#x\n", env
->pc
);
2669 gdbsig
= TARGET_SIGBUS
;
2673 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2674 gdbsig
= TARGET_SIGSEGV
;
2677 qemu_log("\nTick time interrupt pc is %#x\n", env
->pc
);
2680 qemu_log("\nAlignment pc is %#x\n", env
->pc
);
2681 gdbsig
= TARGET_SIGBUS
;
2684 qemu_log("\nIllegal instructionpc is %#x\n", env
->pc
);
2685 gdbsig
= TARGET_SIGILL
;
2688 qemu_log("\nExternal interruptpc is %#x\n", env
->pc
);
2692 qemu_log("\nTLB miss\n");
2695 qemu_log("\nRange\n");
2696 gdbsig
= TARGET_SIGSEGV
;
2699 env
->pc
+= 4; /* 0xc00; */
2700 env
->gpr
[11] = do_syscall(env
,
2701 env
->gpr
[11], /* return value */
2702 env
->gpr
[3], /* r3 - r7 are params */
2710 qemu_log("\nFloating point error\n");
2713 qemu_log("\nTrap\n");
2714 gdbsig
= TARGET_SIGTRAP
;
2720 qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
2722 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2723 gdbsig
= TARGET_SIGILL
;
2727 gdb_handlesig(cs
, gdbsig
);
2728 if (gdbsig
!= TARGET_SIGTRAP
) {
2733 process_pending_signals(env
);
2737 #endif /* TARGET_OPENRISC */
2740 void cpu_loop(CPUSH4State
*env
)
2742 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2744 target_siginfo_t info
;
2748 trapnr
= cpu_sh4_exec(cs
);
2754 ret
= do_syscall(env
,
2763 env
->gregs
[0] = ret
;
2765 case EXCP_INTERRUPT
:
2766 /* just indicate that signals should be handled asap */
2772 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2775 info
.si_signo
= sig
;
2777 info
.si_code
= TARGET_TRAP_BRKPT
;
2778 queue_signal(env
, info
.si_signo
, &info
);
2784 info
.si_signo
= TARGET_SIGSEGV
;
2786 info
.si_code
= TARGET_SEGV_MAPERR
;
2787 info
._sifields
._sigfault
._addr
= env
->tea
;
2788 queue_signal(env
, info
.si_signo
, &info
);
2792 printf ("Unhandled trap: 0x%x\n", trapnr
);
2793 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2796 process_pending_signals (env
);
2802 void cpu_loop(CPUCRISState
*env
)
2804 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2806 target_siginfo_t info
;
2810 trapnr
= cpu_cris_exec(cs
);
2815 info
.si_signo
= TARGET_SIGSEGV
;
2817 /* XXX: check env->error_code */
2818 info
.si_code
= TARGET_SEGV_MAPERR
;
2819 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2820 queue_signal(env
, info
.si_signo
, &info
);
2823 case EXCP_INTERRUPT
:
2824 /* just indicate that signals should be handled asap */
2827 ret
= do_syscall(env
,
2836 env
->regs
[10] = ret
;
2842 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2845 info
.si_signo
= sig
;
2847 info
.si_code
= TARGET_TRAP_BRKPT
;
2848 queue_signal(env
, info
.si_signo
, &info
);
2853 printf ("Unhandled trap: 0x%x\n", trapnr
);
2854 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2857 process_pending_signals (env
);
2862 #ifdef TARGET_MICROBLAZE
2863 void cpu_loop(CPUMBState
*env
)
2865 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2867 target_siginfo_t info
;
2871 trapnr
= cpu_mb_exec(cs
);
2876 info
.si_signo
= TARGET_SIGSEGV
;
2878 /* XXX: check env->error_code */
2879 info
.si_code
= TARGET_SEGV_MAPERR
;
2880 info
._sifields
._sigfault
._addr
= 0;
2881 queue_signal(env
, info
.si_signo
, &info
);
2884 case EXCP_INTERRUPT
:
2885 /* just indicate that signals should be handled asap */
2888 /* Return address is 4 bytes after the call. */
2890 env
->sregs
[SR_PC
] = env
->regs
[14];
2891 ret
= do_syscall(env
,
2903 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
2904 if (env
->iflags
& D_FLAG
) {
2905 env
->sregs
[SR_ESR
] |= 1 << 12;
2906 env
->sregs
[SR_PC
] -= 4;
2907 /* FIXME: if branch was immed, replay the imm as well. */
2910 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
2912 switch (env
->sregs
[SR_ESR
] & 31) {
2913 case ESR_EC_DIVZERO
:
2914 info
.si_signo
= TARGET_SIGFPE
;
2916 info
.si_code
= TARGET_FPE_FLTDIV
;
2917 info
._sifields
._sigfault
._addr
= 0;
2918 queue_signal(env
, info
.si_signo
, &info
);
2921 info
.si_signo
= TARGET_SIGFPE
;
2923 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
2924 info
.si_code
= TARGET_FPE_FLTINV
;
2926 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
2927 info
.si_code
= TARGET_FPE_FLTDIV
;
2929 info
._sifields
._sigfault
._addr
= 0;
2930 queue_signal(env
, info
.si_signo
, &info
);
2933 printf ("Unhandled hw-exception: 0x%x\n",
2934 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
2935 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2944 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2947 info
.si_signo
= sig
;
2949 info
.si_code
= TARGET_TRAP_BRKPT
;
2950 queue_signal(env
, info
.si_signo
, &info
);
2955 printf ("Unhandled trap: 0x%x\n", trapnr
);
2956 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2959 process_pending_signals (env
);
2966 void cpu_loop(CPUM68KState
*env
)
2968 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
2971 target_siginfo_t info
;
2972 TaskState
*ts
= cs
->opaque
;
2976 trapnr
= cpu_m68k_exec(cs
);
2981 if (ts
->sim_syscalls
) {
2983 get_user_u16(nr
, env
->pc
+ 2);
2985 do_m68k_simcall(env
, nr
);
2991 case EXCP_HALT_INSN
:
2992 /* Semihosing syscall. */
2994 do_m68k_semihosting(env
, env
->dregs
[0]);
2998 case EXCP_UNSUPPORTED
:
3000 info
.si_signo
= TARGET_SIGILL
;
3002 info
.si_code
= TARGET_ILL_ILLOPN
;
3003 info
._sifields
._sigfault
._addr
= env
->pc
;
3004 queue_signal(env
, info
.si_signo
, &info
);
3008 ts
->sim_syscalls
= 0;
3011 env
->dregs
[0] = do_syscall(env
,
3022 case EXCP_INTERRUPT
:
3023 /* just indicate that signals should be handled asap */
3027 info
.si_signo
= TARGET_SIGSEGV
;
3029 /* XXX: check env->error_code */
3030 info
.si_code
= TARGET_SEGV_MAPERR
;
3031 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3032 queue_signal(env
, info
.si_signo
, &info
);
3039 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3042 info
.si_signo
= sig
;
3044 info
.si_code
= TARGET_TRAP_BRKPT
;
3045 queue_signal(env
, info
.si_signo
, &info
);
3050 fprintf(stderr
, "qemu: unhandled CPU exception 0x%x - aborting\n",
3052 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3055 process_pending_signals(env
);
3058 #endif /* TARGET_M68K */
3061 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3063 target_ulong addr
, val
, tmp
;
3064 target_siginfo_t info
;
3067 addr
= env
->lock_addr
;
3068 tmp
= env
->lock_st_addr
;
3069 env
->lock_addr
= -1;
3070 env
->lock_st_addr
= 0;
3076 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3080 if (val
== env
->lock_value
) {
3082 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3099 info
.si_signo
= TARGET_SIGSEGV
;
3101 info
.si_code
= TARGET_SEGV_MAPERR
;
3102 info
._sifields
._sigfault
._addr
= addr
;
3103 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3106 void cpu_loop(CPUAlphaState
*env
)
3108 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3110 target_siginfo_t info
;
3115 trapnr
= cpu_alpha_exec(cs
);
3118 /* All of the traps imply a transition through PALcode, which
3119 implies an REI instruction has been executed. Which means
3120 that the intr_flag should be cleared. */
3125 fprintf(stderr
, "Reset requested. Exit\n");
3129 fprintf(stderr
, "Machine check exception. Exit\n");
3132 case EXCP_SMP_INTERRUPT
:
3133 case EXCP_CLK_INTERRUPT
:
3134 case EXCP_DEV_INTERRUPT
:
3135 fprintf(stderr
, "External interrupt. Exit\n");
3139 env
->lock_addr
= -1;
3140 info
.si_signo
= TARGET_SIGSEGV
;
3142 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3143 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3144 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3145 queue_signal(env
, info
.si_signo
, &info
);
3148 env
->lock_addr
= -1;
3149 info
.si_signo
= TARGET_SIGBUS
;
3151 info
.si_code
= TARGET_BUS_ADRALN
;
3152 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3153 queue_signal(env
, info
.si_signo
, &info
);
3157 env
->lock_addr
= -1;
3158 info
.si_signo
= TARGET_SIGILL
;
3160 info
.si_code
= TARGET_ILL_ILLOPC
;
3161 info
._sifields
._sigfault
._addr
= env
->pc
;
3162 queue_signal(env
, info
.si_signo
, &info
);
3165 env
->lock_addr
= -1;
3166 info
.si_signo
= TARGET_SIGFPE
;
3168 info
.si_code
= TARGET_FPE_FLTINV
;
3169 info
._sifields
._sigfault
._addr
= env
->pc
;
3170 queue_signal(env
, info
.si_signo
, &info
);
3173 /* No-op. Linux simply re-enables the FPU. */
3176 env
->lock_addr
= -1;
3177 switch (env
->error_code
) {
3180 info
.si_signo
= TARGET_SIGTRAP
;
3182 info
.si_code
= TARGET_TRAP_BRKPT
;
3183 info
._sifields
._sigfault
._addr
= env
->pc
;
3184 queue_signal(env
, info
.si_signo
, &info
);
3188 info
.si_signo
= TARGET_SIGTRAP
;
3191 info
._sifields
._sigfault
._addr
= env
->pc
;
3192 queue_signal(env
, info
.si_signo
, &info
);
3196 trapnr
= env
->ir
[IR_V0
];
3197 sysret
= do_syscall(env
, trapnr
,
3198 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3199 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3200 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3202 if (trapnr
== TARGET_NR_sigreturn
3203 || trapnr
== TARGET_NR_rt_sigreturn
) {
3206 /* Syscall writes 0 to V0 to bypass error check, similar
3207 to how this is handled internal to Linux kernel.
3208 (Ab)use trapnr temporarily as boolean indicating error. */
3209 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3210 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3211 env
->ir
[IR_A3
] = trapnr
;
3215 /* ??? We can probably elide the code using page_unprotect
3216 that is checking for self-modifying code. Instead we
3217 could simply call tb_flush here. Until we work out the
3218 changes required to turn off the extra write protection,
3219 this can be a no-op. */
3223 /* Handled in the translator for usermode. */
3227 /* Handled in the translator for usermode. */
3231 info
.si_signo
= TARGET_SIGFPE
;
3232 switch (env
->ir
[IR_A0
]) {
3233 case TARGET_GEN_INTOVF
:
3234 info
.si_code
= TARGET_FPE_INTOVF
;
3236 case TARGET_GEN_INTDIV
:
3237 info
.si_code
= TARGET_FPE_INTDIV
;
3239 case TARGET_GEN_FLTOVF
:
3240 info
.si_code
= TARGET_FPE_FLTOVF
;
3242 case TARGET_GEN_FLTUND
:
3243 info
.si_code
= TARGET_FPE_FLTUND
;
3245 case TARGET_GEN_FLTINV
:
3246 info
.si_code
= TARGET_FPE_FLTINV
;
3248 case TARGET_GEN_FLTINE
:
3249 info
.si_code
= TARGET_FPE_FLTRES
;
3251 case TARGET_GEN_ROPRAND
:
3255 info
.si_signo
= TARGET_SIGTRAP
;
3260 info
._sifields
._sigfault
._addr
= env
->pc
;
3261 queue_signal(env
, info
.si_signo
, &info
);
3268 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3269 if (info
.si_signo
) {
3270 env
->lock_addr
= -1;
3272 info
.si_code
= TARGET_TRAP_BRKPT
;
3273 queue_signal(env
, info
.si_signo
, &info
);
3278 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3280 case EXCP_INTERRUPT
:
3281 /* Just indicate that signals should be handled asap. */
3284 printf ("Unhandled trap: 0x%x\n", trapnr
);
3285 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3288 process_pending_signals (env
);
3291 #endif /* TARGET_ALPHA */
3294 void cpu_loop(CPUS390XState
*env
)
3296 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3298 target_siginfo_t info
;
3303 trapnr
= cpu_s390x_exec(cs
);
3306 case EXCP_INTERRUPT
:
3307 /* Just indicate that signals should be handled asap. */
3311 n
= env
->int_svc_code
;
3313 /* syscalls > 255 */
3316 env
->psw
.addr
+= env
->int_svc_ilen
;
3317 env
->regs
[2] = do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3318 env
->regs
[4], env
->regs
[5],
3319 env
->regs
[6], env
->regs
[7], 0, 0);
3323 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3325 n
= TARGET_TRAP_BRKPT
;
3330 n
= env
->int_pgm_code
;
3333 case PGM_PRIVILEGED
:
3334 sig
= TARGET_SIGILL
;
3335 n
= TARGET_ILL_ILLOPC
;
3337 case PGM_PROTECTION
:
3338 case PGM_ADDRESSING
:
3339 sig
= TARGET_SIGSEGV
;
3340 /* XXX: check env->error_code */
3341 n
= TARGET_SEGV_MAPERR
;
3342 addr
= env
->__excp_addr
;
3345 case PGM_SPECIFICATION
:
3346 case PGM_SPECIAL_OP
:
3349 sig
= TARGET_SIGILL
;
3350 n
= TARGET_ILL_ILLOPN
;
3353 case PGM_FIXPT_OVERFLOW
:
3354 sig
= TARGET_SIGFPE
;
3355 n
= TARGET_FPE_INTOVF
;
3357 case PGM_FIXPT_DIVIDE
:
3358 sig
= TARGET_SIGFPE
;
3359 n
= TARGET_FPE_INTDIV
;
3363 n
= (env
->fpc
>> 8) & 0xff;
3365 /* compare-and-trap */
3368 /* An IEEE exception, simulated or otherwise. */
3370 n
= TARGET_FPE_FLTINV
;
3371 } else if (n
& 0x40) {
3372 n
= TARGET_FPE_FLTDIV
;
3373 } else if (n
& 0x20) {
3374 n
= TARGET_FPE_FLTOVF
;
3375 } else if (n
& 0x10) {
3376 n
= TARGET_FPE_FLTUND
;
3377 } else if (n
& 0x08) {
3378 n
= TARGET_FPE_FLTRES
;
3380 /* ??? Quantum exception; BFP, DFP error. */
3383 sig
= TARGET_SIGFPE
;
3388 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3389 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3395 addr
= env
->psw
.addr
;
3397 info
.si_signo
= sig
;
3400 info
._sifields
._sigfault
._addr
= addr
;
3401 queue_signal(env
, info
.si_signo
, &info
);
3405 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3406 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3409 process_pending_signals (env
);
3413 #endif /* TARGET_S390X */
3415 THREAD CPUState
*thread_cpu
;
3417 void task_settid(TaskState
*ts
)
3419 if (ts
->ts_tid
== 0) {
3420 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3424 void stop_all_tasks(void)
3427 * We trust that when using NPTL, start_exclusive()
3428 * handles thread stopping correctly.
3433 /* Assumes contents are already zeroed. */
3434 void init_task_state(TaskState
*ts
)
3439 ts
->first_free
= ts
->sigqueue_table
;
3440 for (i
= 0; i
< MAX_SIGQUEUE_SIZE
- 1; i
++) {
3441 ts
->sigqueue_table
[i
].next
= &ts
->sigqueue_table
[i
+ 1];
3443 ts
->sigqueue_table
[i
].next
= NULL
;
3446 CPUArchState
*cpu_copy(CPUArchState
*env
)
3448 CPUState
*cpu
= ENV_GET_CPU(env
);
3449 CPUState
*new_cpu
= cpu_init(cpu_model
);
3450 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3454 /* Reset non arch specific state */
3457 memcpy(new_env
, env
, sizeof(CPUArchState
));
3459 /* Clone all break/watchpoints.
3460 Note: Once we support ptrace with hw-debug register access, make sure
3461 BP_CPU break/watchpoints are handled correctly on clone. */
3462 QTAILQ_INIT(&new_cpu
->breakpoints
);
3463 QTAILQ_INIT(&new_cpu
->watchpoints
);
3464 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3465 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3467 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3468 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3474 static void handle_arg_help(const char *arg
)
3479 static void handle_arg_log(const char *arg
)
3483 mask
= qemu_str_to_log_mask(arg
);
3485 qemu_print_log_usage(stdout
);
3491 static void handle_arg_log_filename(const char *arg
)
3493 qemu_set_log_filename(arg
);
3496 static void handle_arg_set_env(const char *arg
)
3498 char *r
, *p
, *token
;
3499 r
= p
= strdup(arg
);
3500 while ((token
= strsep(&p
, ",")) != NULL
) {
3501 if (envlist_setenv(envlist
, token
) != 0) {
3508 static void handle_arg_unset_env(const char *arg
)
3510 char *r
, *p
, *token
;
3511 r
= p
= strdup(arg
);
3512 while ((token
= strsep(&p
, ",")) != NULL
) {
3513 if (envlist_unsetenv(envlist
, token
) != 0) {
3520 static void handle_arg_argv0(const char *arg
)
3522 argv0
= strdup(arg
);
3525 static void handle_arg_stack_size(const char *arg
)
3528 guest_stack_size
= strtoul(arg
, &p
, 0);
3529 if (guest_stack_size
== 0) {
3534 guest_stack_size
*= 1024 * 1024;
3535 } else if (*p
== 'k' || *p
== 'K') {
3536 guest_stack_size
*= 1024;
3540 static void handle_arg_ld_prefix(const char *arg
)
3542 interp_prefix
= strdup(arg
);
3545 static void handle_arg_pagesize(const char *arg
)
3547 qemu_host_page_size
= atoi(arg
);
3548 if (qemu_host_page_size
== 0 ||
3549 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3550 fprintf(stderr
, "page size must be a power of two\n");
3555 static void handle_arg_randseed(const char *arg
)
3557 unsigned long long seed
;
3559 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3560 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3566 static void handle_arg_gdb(const char *arg
)
3568 gdbstub_port
= atoi(arg
);
3571 static void handle_arg_uname(const char *arg
)
3573 qemu_uname_release
= strdup(arg
);
3576 static void handle_arg_cpu(const char *arg
)
3578 cpu_model
= strdup(arg
);
3579 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3580 /* XXX: implement xxx_cpu_list for targets that still miss it */
3581 #if defined(cpu_list)
3582 cpu_list(stdout
, &fprintf
);
3588 static void handle_arg_guest_base(const char *arg
)
3590 guest_base
= strtol(arg
, NULL
, 0);
3591 have_guest_base
= 1;
3594 static void handle_arg_reserved_va(const char *arg
)
3598 reserved_va
= strtoul(arg
, &p
, 0);
3612 unsigned long unshifted
= reserved_va
;
3614 reserved_va
<<= shift
;
3615 if (((reserved_va
>> shift
) != unshifted
)
3616 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3617 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3620 fprintf(stderr
, "Reserved virtual address too big\n");
3625 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3630 static void handle_arg_singlestep(const char *arg
)
3635 static void handle_arg_strace(const char *arg
)
3640 static void handle_arg_version(const char *arg
)
3642 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3643 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3647 struct qemu_argument
{
3651 void (*handle_opt
)(const char *arg
);
3652 const char *example
;
3656 static const struct qemu_argument arg_table
[] = {
3657 {"h", "", false, handle_arg_help
,
3658 "", "print this help"},
3659 {"g", "QEMU_GDB", true, handle_arg_gdb
,
3660 "port", "wait gdb connection to 'port'"},
3661 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
3662 "path", "set the elf interpreter prefix to 'path'"},
3663 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
3664 "size", "set the stack size to 'size' bytes"},
3665 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
3666 "model", "select CPU (-cpu help for list)"},
3667 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
3668 "var=value", "sets targets environment variable (see below)"},
3669 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
3670 "var", "unsets targets environment variable (see below)"},
3671 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
3672 "argv0", "forces target process argv[0] to be 'argv0'"},
3673 {"r", "QEMU_UNAME", true, handle_arg_uname
,
3674 "uname", "set qemu uname release string to 'uname'"},
3675 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
3676 "address", "set guest_base address to 'address'"},
3677 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
3678 "size", "reserve 'size' bytes for guest virtual address space"},
3679 {"d", "QEMU_LOG", true, handle_arg_log
,
3680 "item[,...]", "enable logging of specified items "
3681 "(use '-d help' for a list of items)"},
3682 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
3683 "logfile", "write logs to 'logfile' (default stderr)"},
3684 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
3685 "pagesize", "set the host page size to 'pagesize'"},
3686 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
3687 "", "run in singlestep mode"},
3688 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
3689 "", "log system calls"},
3690 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
3691 "", "Seed for pseudo-random number generator"},
3692 {"version", "QEMU_VERSION", false, handle_arg_version
,
3693 "", "display version information and exit"},
3694 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
3697 static void usage(void)
3699 const struct qemu_argument
*arginfo
;
3703 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
3704 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
3706 "Options and associated environment variables:\n"
3709 /* Calculate column widths. We must always have at least enough space
3710 * for the column header.
3712 maxarglen
= strlen("Argument");
3713 maxenvlen
= strlen("Env-variable");
3715 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3716 int arglen
= strlen(arginfo
->argv
);
3717 if (arginfo
->has_arg
) {
3718 arglen
+= strlen(arginfo
->example
) + 1;
3720 if (strlen(arginfo
->env
) > maxenvlen
) {
3721 maxenvlen
= strlen(arginfo
->env
);
3723 if (arglen
> maxarglen
) {
3728 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
3729 maxenvlen
, "Env-variable");
3731 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3732 if (arginfo
->has_arg
) {
3733 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
3734 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
3735 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
3737 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
3738 maxenvlen
, arginfo
->env
,
3745 "QEMU_LD_PREFIX = %s\n"
3746 "QEMU_STACK_SIZE = %ld byte\n",
3751 "You can use -E and -U options or the QEMU_SET_ENV and\n"
3752 "QEMU_UNSET_ENV environment variables to set and unset\n"
3753 "environment variables for the target process.\n"
3754 "It is possible to provide several variables by separating them\n"
3755 "by commas in getsubopt(3) style. Additionally it is possible to\n"
3756 "provide the -E and -U options multiple times.\n"
3757 "The following lines are equivalent:\n"
3758 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
3759 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
3760 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
3761 "Note that if you provide several changes to a single variable\n"
3762 "the last change will stay in effect.\n");
3767 static int parse_args(int argc
, char **argv
)
3771 const struct qemu_argument
*arginfo
;
3773 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3774 if (arginfo
->env
== NULL
) {
3778 r
= getenv(arginfo
->env
);
3780 arginfo
->handle_opt(r
);
3786 if (optind
>= argc
) {
3795 if (!strcmp(r
, "-")) {
3799 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
3800 if (!strcmp(r
, arginfo
->argv
)) {
3801 if (arginfo
->has_arg
) {
3802 if (optind
>= argc
) {
3805 arginfo
->handle_opt(argv
[optind
]);
3808 arginfo
->handle_opt(NULL
);
3814 /* no option matched the current argv */
3815 if (arginfo
->handle_opt
== NULL
) {
3820 if (optind
>= argc
) {
3824 filename
= argv
[optind
];
3825 exec_path
= argv
[optind
];
3830 int main(int argc
, char **argv
, char **envp
)
3832 struct target_pt_regs regs1
, *regs
= ®s1
;
3833 struct image_info info1
, *info
= &info1
;
3834 struct linux_binprm bprm
;
3839 char **target_environ
, **wrk
;
3846 module_call_init(MODULE_INIT_QOM
);
3848 if ((envlist
= envlist_create()) == NULL
) {
3849 (void) fprintf(stderr
, "Unable to allocate envlist\n");
3853 /* add current environment into the list */
3854 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
3855 (void) envlist_setenv(envlist
, *wrk
);
3858 /* Read the stack limit from the kernel. If it's "unlimited",
3859 then we can do little else besides use the default. */
3862 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
3863 && lim
.rlim_cur
!= RLIM_INFINITY
3864 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
3865 guest_stack_size
= lim
.rlim_cur
;
3870 #if defined(cpudef_setup)
3871 cpudef_setup(); /* parse cpu definitions in target config file (TBD) */
3876 optind
= parse_args(argc
, argv
);
3879 memset(regs
, 0, sizeof(struct target_pt_regs
));
3881 /* Zero out image_info */
3882 memset(info
, 0, sizeof(struct image_info
));
3884 memset(&bprm
, 0, sizeof (bprm
));
3886 /* Scan interp_prefix dir for replacement files. */
3887 init_paths(interp_prefix
);
3889 init_qemu_uname_release();
3891 if (cpu_model
== NULL
) {
3892 #if defined(TARGET_I386)
3893 #ifdef TARGET_X86_64
3894 cpu_model
= "qemu64";
3896 cpu_model
= "qemu32";
3898 #elif defined(TARGET_ARM)
3900 #elif defined(TARGET_UNICORE32)
3902 #elif defined(TARGET_M68K)
3904 #elif defined(TARGET_SPARC)
3905 #ifdef TARGET_SPARC64
3906 cpu_model
= "TI UltraSparc II";
3908 cpu_model
= "Fujitsu MB86904";
3910 #elif defined(TARGET_MIPS)
3911 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
3916 #elif defined TARGET_OPENRISC
3917 cpu_model
= "or1200";
3918 #elif defined(TARGET_PPC)
3919 # ifdef TARGET_PPC64
3920 cpu_model
= "POWER7";
3924 #elif defined TARGET_SH4
3925 cpu_model
= TYPE_SH7785_CPU
;
3931 /* NOTE: we need to init the CPU at this stage to get
3932 qemu_host_page_size */
3933 cpu
= cpu_init(cpu_model
);
3935 fprintf(stderr
, "Unable to find CPU definition\n");
3943 if (getenv("QEMU_STRACE")) {
3947 if (getenv("QEMU_RAND_SEED")) {
3948 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
3951 target_environ
= envlist_to_environ(envlist
, NULL
);
3952 envlist_free(envlist
);
3955 * Now that page sizes are configured in cpu_init() we can do
3956 * proper page alignment for guest_base.
3958 guest_base
= HOST_PAGE_ALIGN(guest_base
);
3960 if (reserved_va
|| have_guest_base
) {
3961 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
3963 if (guest_base
== (unsigned long)-1) {
3964 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
3965 "space for use as guest address space (check your virtual "
3966 "memory ulimit setting or reserve less using -R option)\n",
3972 mmap_next_start
= reserved_va
;
3977 * Read in mmap_min_addr kernel parameter. This value is used
3978 * When loading the ELF image to determine whether guest_base
3979 * is needed. It is also used in mmap_find_vma.
3984 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
3986 if (fscanf(fp
, "%lu", &tmp
) == 1) {
3987 mmap_min_addr
= tmp
;
3988 qemu_log("host mmap_min_addr=0x%lx\n", mmap_min_addr
);
3995 * Prepare copy of argv vector for target.
3997 target_argc
= argc
- optind
;
3998 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
3999 if (target_argv
== NULL
) {
4000 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4005 * If argv0 is specified (using '-0' switch) we replace
4006 * argv[0] pointer with the given one.
4009 if (argv0
!= NULL
) {
4010 target_argv
[i
++] = strdup(argv0
);
4012 for (; i
< target_argc
; i
++) {
4013 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4015 target_argv
[target_argc
] = NULL
;
4017 ts
= g_malloc0 (sizeof(TaskState
));
4018 init_task_state(ts
);
4019 /* build Task State */
4025 execfd
= qemu_getauxval(AT_EXECFD
);
4027 execfd
= open(filename
, O_RDONLY
);
4029 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4034 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4037 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4041 for (wrk
= target_environ
; *wrk
; wrk
++) {
4045 free(target_environ
);
4047 if (qemu_log_enabled()) {
4048 qemu_log("guest_base 0x%lx\n", guest_base
);
4051 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4052 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4053 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4055 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4057 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4058 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4060 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4061 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4064 target_set_brk(info
->brk
);
4068 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4069 generating the prologue until now so that the prologue can take
4070 the real value of GUEST_BASE into account. */
4071 tcg_prologue_init(&tcg_ctx
);
4073 #if defined(TARGET_I386)
4074 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4075 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4076 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4077 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4078 env
->hflags
|= HF_OSFXSR_MASK
;
4080 #ifndef TARGET_ABI32
4081 /* enable 64 bit mode if possible */
4082 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4083 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4086 env
->cr
[4] |= CR4_PAE_MASK
;
4087 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4088 env
->hflags
|= HF_LMA_MASK
;
4091 /* flags setup : we activate the IRQs by default as in user mode */
4092 env
->eflags
|= IF_MASK
;
4094 /* linux register setup */
4095 #ifndef TARGET_ABI32
4096 env
->regs
[R_EAX
] = regs
->rax
;
4097 env
->regs
[R_EBX
] = regs
->rbx
;
4098 env
->regs
[R_ECX
] = regs
->rcx
;
4099 env
->regs
[R_EDX
] = regs
->rdx
;
4100 env
->regs
[R_ESI
] = regs
->rsi
;
4101 env
->regs
[R_EDI
] = regs
->rdi
;
4102 env
->regs
[R_EBP
] = regs
->rbp
;
4103 env
->regs
[R_ESP
] = regs
->rsp
;
4104 env
->eip
= regs
->rip
;
4106 env
->regs
[R_EAX
] = regs
->eax
;
4107 env
->regs
[R_EBX
] = regs
->ebx
;
4108 env
->regs
[R_ECX
] = regs
->ecx
;
4109 env
->regs
[R_EDX
] = regs
->edx
;
4110 env
->regs
[R_ESI
] = regs
->esi
;
4111 env
->regs
[R_EDI
] = regs
->edi
;
4112 env
->regs
[R_EBP
] = regs
->ebp
;
4113 env
->regs
[R_ESP
] = regs
->esp
;
4114 env
->eip
= regs
->eip
;
4117 /* linux interrupt setup */
4118 #ifndef TARGET_ABI32
4119 env
->idt
.limit
= 511;
4121 env
->idt
.limit
= 255;
4123 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4124 PROT_READ
|PROT_WRITE
,
4125 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4126 idt_table
= g2h(env
->idt
.base
);
4149 /* linux segment setup */
4151 uint64_t *gdt_table
;
4152 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4153 PROT_READ
|PROT_WRITE
,
4154 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4155 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4156 gdt_table
= g2h(env
->gdt
.base
);
4158 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4159 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4160 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4162 /* 64 bit code segment */
4163 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4164 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4166 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4168 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4169 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4170 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4172 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4173 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4175 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4176 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4177 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4178 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4179 /* This hack makes Wine work... */
4180 env
->segs
[R_FS
].selector
= 0;
4182 cpu_x86_load_seg(env
, R_DS
, 0);
4183 cpu_x86_load_seg(env
, R_ES
, 0);
4184 cpu_x86_load_seg(env
, R_FS
, 0);
4185 cpu_x86_load_seg(env
, R_GS
, 0);
4187 #elif defined(TARGET_AARCH64)
4191 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4193 "The selected ARM CPU does not support 64 bit mode\n");
4197 for (i
= 0; i
< 31; i
++) {
4198 env
->xregs
[i
] = regs
->regs
[i
];
4201 env
->xregs
[31] = regs
->sp
;
4203 #elif defined(TARGET_ARM)
4206 cpsr_write(env
, regs
->uregs
[16], 0xffffffff);
4207 for(i
= 0; i
< 16; i
++) {
4208 env
->regs
[i
] = regs
->uregs
[i
];
4211 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4212 && (info
->elf_flags
& EF_ARM_BE8
)) {
4213 env
->bswap_code
= 1;
4216 #elif defined(TARGET_UNICORE32)
4219 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4220 for (i
= 0; i
< 32; i
++) {
4221 env
->regs
[i
] = regs
->uregs
[i
];
4224 #elif defined(TARGET_SPARC)
4228 env
->npc
= regs
->npc
;
4230 for(i
= 0; i
< 8; i
++)
4231 env
->gregs
[i
] = regs
->u_regs
[i
];
4232 for(i
= 0; i
< 8; i
++)
4233 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4235 #elif defined(TARGET_PPC)
4239 #if defined(TARGET_PPC64)
4240 #if defined(TARGET_ABI32)
4241 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4243 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4246 env
->nip
= regs
->nip
;
4247 for(i
= 0; i
< 32; i
++) {
4248 env
->gpr
[i
] = regs
->gpr
[i
];
4251 #elif defined(TARGET_M68K)
4254 env
->dregs
[0] = regs
->d0
;
4255 env
->dregs
[1] = regs
->d1
;
4256 env
->dregs
[2] = regs
->d2
;
4257 env
->dregs
[3] = regs
->d3
;
4258 env
->dregs
[4] = regs
->d4
;
4259 env
->dregs
[5] = regs
->d5
;
4260 env
->dregs
[6] = regs
->d6
;
4261 env
->dregs
[7] = regs
->d7
;
4262 env
->aregs
[0] = regs
->a0
;
4263 env
->aregs
[1] = regs
->a1
;
4264 env
->aregs
[2] = regs
->a2
;
4265 env
->aregs
[3] = regs
->a3
;
4266 env
->aregs
[4] = regs
->a4
;
4267 env
->aregs
[5] = regs
->a5
;
4268 env
->aregs
[6] = regs
->a6
;
4269 env
->aregs
[7] = regs
->usp
;
4271 ts
->sim_syscalls
= 1;
4273 #elif defined(TARGET_MICROBLAZE)
4275 env
->regs
[0] = regs
->r0
;
4276 env
->regs
[1] = regs
->r1
;
4277 env
->regs
[2] = regs
->r2
;
4278 env
->regs
[3] = regs
->r3
;
4279 env
->regs
[4] = regs
->r4
;
4280 env
->regs
[5] = regs
->r5
;
4281 env
->regs
[6] = regs
->r6
;
4282 env
->regs
[7] = regs
->r7
;
4283 env
->regs
[8] = regs
->r8
;
4284 env
->regs
[9] = regs
->r9
;
4285 env
->regs
[10] = regs
->r10
;
4286 env
->regs
[11] = regs
->r11
;
4287 env
->regs
[12] = regs
->r12
;
4288 env
->regs
[13] = regs
->r13
;
4289 env
->regs
[14] = regs
->r14
;
4290 env
->regs
[15] = regs
->r15
;
4291 env
->regs
[16] = regs
->r16
;
4292 env
->regs
[17] = regs
->r17
;
4293 env
->regs
[18] = regs
->r18
;
4294 env
->regs
[19] = regs
->r19
;
4295 env
->regs
[20] = regs
->r20
;
4296 env
->regs
[21] = regs
->r21
;
4297 env
->regs
[22] = regs
->r22
;
4298 env
->regs
[23] = regs
->r23
;
4299 env
->regs
[24] = regs
->r24
;
4300 env
->regs
[25] = regs
->r25
;
4301 env
->regs
[26] = regs
->r26
;
4302 env
->regs
[27] = regs
->r27
;
4303 env
->regs
[28] = regs
->r28
;
4304 env
->regs
[29] = regs
->r29
;
4305 env
->regs
[30] = regs
->r30
;
4306 env
->regs
[31] = regs
->r31
;
4307 env
->sregs
[SR_PC
] = regs
->pc
;
4309 #elif defined(TARGET_MIPS)
4313 for(i
= 0; i
< 32; i
++) {
4314 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4316 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4317 if (regs
->cp0_epc
& 1) {
4318 env
->hflags
|= MIPS_HFLAG_M16
;
4321 #elif defined(TARGET_OPENRISC)
4325 for (i
= 0; i
< 32; i
++) {
4326 env
->gpr
[i
] = regs
->gpr
[i
];
4332 #elif defined(TARGET_SH4)
4336 for(i
= 0; i
< 16; i
++) {
4337 env
->gregs
[i
] = regs
->regs
[i
];
4341 #elif defined(TARGET_ALPHA)
4345 for(i
= 0; i
< 28; i
++) {
4346 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4348 env
->ir
[IR_SP
] = regs
->usp
;
4351 #elif defined(TARGET_CRIS)
4353 env
->regs
[0] = regs
->r0
;
4354 env
->regs
[1] = regs
->r1
;
4355 env
->regs
[2] = regs
->r2
;
4356 env
->regs
[3] = regs
->r3
;
4357 env
->regs
[4] = regs
->r4
;
4358 env
->regs
[5] = regs
->r5
;
4359 env
->regs
[6] = regs
->r6
;
4360 env
->regs
[7] = regs
->r7
;
4361 env
->regs
[8] = regs
->r8
;
4362 env
->regs
[9] = regs
->r9
;
4363 env
->regs
[10] = regs
->r10
;
4364 env
->regs
[11] = regs
->r11
;
4365 env
->regs
[12] = regs
->r12
;
4366 env
->regs
[13] = regs
->r13
;
4367 env
->regs
[14] = info
->start_stack
;
4368 env
->regs
[15] = regs
->acr
;
4369 env
->pc
= regs
->erp
;
4371 #elif defined(TARGET_S390X)
4374 for (i
= 0; i
< 16; i
++) {
4375 env
->regs
[i
] = regs
->gprs
[i
];
4377 env
->psw
.mask
= regs
->psw
.mask
;
4378 env
->psw
.addr
= regs
->psw
.addr
;
4381 #error unsupported target CPU
4384 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4385 ts
->stack_base
= info
->start_stack
;
4386 ts
->heap_base
= info
->brk
;
4387 /* This will be filled in on the first SYS_HEAPINFO call. */
4392 if (gdbserver_start(gdbstub_port
) < 0) {
4393 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4397 gdb_handlesig(cpu
, 0);