4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu-version.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
24 #include "qapi/error.h"
26 #include "qemu/path.h"
27 #include "qemu/cutils.h"
28 #include "qemu/help_option.h"
30 #include "exec/exec-all.h"
32 #include "qemu/timer.h"
33 #include "qemu/envlist.h"
40 static const char *filename
;
41 static const char *argv0
;
42 static int gdbstub_port
;
43 static envlist_t
*envlist
;
44 static const char *cpu_model
;
45 unsigned long mmap_min_addr
;
46 unsigned long guest_base
;
49 #define EXCP_DUMP(env, fmt, ...) \
51 CPUState *cs = ENV_GET_CPU(env); \
52 fprintf(stderr, fmt , ## __VA_ARGS__); \
53 cpu_dump_state(cs, stderr, fprintf, 0); \
54 if (qemu_log_separate()) { \
55 qemu_log(fmt, ## __VA_ARGS__); \
56 log_cpu_state(cs, 0); \
60 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
62 * When running 32-on-64 we should make sure we can fit all of the possible
63 * guest address space into a contiguous chunk of virtual host memory.
65 * This way we will never overlap with our own libraries or binaries or stack
66 * or anything else that QEMU maps.
69 /* MIPS only supports 31 bits of virtual address space for user space */
70 unsigned long reserved_va
= 0x77000000;
72 unsigned long reserved_va
= 0xf7000000;
75 unsigned long reserved_va
;
78 static void usage(int exitcode
);
80 static const char *interp_prefix
= CONFIG_QEMU_INTERP_PREFIX
;
81 const char *qemu_uname_release
;
83 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
84 we allocate a bigger stack. Need a better solution, for example
85 by remapping the process stack directly at the right place */
86 unsigned long guest_stack_size
= 8 * 1024 * 1024UL;
88 void gemu_log(const char *fmt
, ...)
93 vfprintf(stderr
, fmt
, ap
);
97 #if defined(TARGET_I386)
98 int cpu_get_pic_interrupt(CPUX86State
*env
)
104 /***********************************************************/
105 /* Helper routines for implementing atomic operations. */
107 /* To implement exclusive operations we force all cpus to syncronise.
108 We don't require a full sync, only that no cpus are executing guest code.
109 The alternative is to map target atomic ops onto host equivalents,
110 which requires quite a lot of per host/target work. */
111 static pthread_mutex_t cpu_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
112 static pthread_mutex_t exclusive_lock
= PTHREAD_MUTEX_INITIALIZER
;
113 static pthread_cond_t exclusive_cond
= PTHREAD_COND_INITIALIZER
;
114 static pthread_cond_t exclusive_resume
= PTHREAD_COND_INITIALIZER
;
115 static int pending_cpus
;
117 /* Make sure everything is in a consistent state for calling fork(). */
118 void fork_start(void)
120 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
121 pthread_mutex_lock(&exclusive_lock
);
125 void fork_end(int child
)
127 mmap_fork_end(child
);
129 CPUState
*cpu
, *next_cpu
;
130 /* Child processes created by fork() only have a single thread.
131 Discard information about the parent threads. */
132 CPU_FOREACH_SAFE(cpu
, next_cpu
) {
133 if (cpu
!= thread_cpu
) {
134 QTAILQ_REMOVE(&cpus
, cpu
, node
);
138 pthread_mutex_init(&exclusive_lock
, NULL
);
139 pthread_mutex_init(&cpu_list_mutex
, NULL
);
140 pthread_cond_init(&exclusive_cond
, NULL
);
141 pthread_cond_init(&exclusive_resume
, NULL
);
142 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
143 gdbserver_fork(thread_cpu
);
145 pthread_mutex_unlock(&exclusive_lock
);
146 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
150 /* Wait for pending exclusive operations to complete. The exclusive lock
152 static inline void exclusive_idle(void)
154 while (pending_cpus
) {
155 pthread_cond_wait(&exclusive_resume
, &exclusive_lock
);
159 /* Start an exclusive operation.
160 Must only be called from outside cpu_arm_exec. */
161 static inline void start_exclusive(void)
165 pthread_mutex_lock(&exclusive_lock
);
169 /* Make all other cpus stop executing. */
170 CPU_FOREACH(other_cpu
) {
171 if (other_cpu
->running
) {
176 if (pending_cpus
> 1) {
177 pthread_cond_wait(&exclusive_cond
, &exclusive_lock
);
181 /* Finish an exclusive operation. */
182 static inline void __attribute__((unused
)) end_exclusive(void)
185 pthread_cond_broadcast(&exclusive_resume
);
186 pthread_mutex_unlock(&exclusive_lock
);
189 /* Wait for exclusive ops to finish, and begin cpu execution. */
190 static inline void cpu_exec_start(CPUState
*cpu
)
192 pthread_mutex_lock(&exclusive_lock
);
195 pthread_mutex_unlock(&exclusive_lock
);
198 /* Mark cpu as not executing, and release pending exclusive ops. */
199 static inline void cpu_exec_end(CPUState
*cpu
)
201 pthread_mutex_lock(&exclusive_lock
);
202 cpu
->running
= false;
203 if (pending_cpus
> 1) {
205 if (pending_cpus
== 1) {
206 pthread_cond_signal(&exclusive_cond
);
210 pthread_mutex_unlock(&exclusive_lock
);
213 void cpu_list_lock(void)
215 pthread_mutex_lock(&cpu_list_mutex
);
218 void cpu_list_unlock(void)
220 pthread_mutex_unlock(&cpu_list_mutex
);
225 /***********************************************************/
226 /* CPUX86 core interface */
228 uint64_t cpu_get_tsc(CPUX86State
*env
)
230 return cpu_get_host_ticks();
233 static void write_dt(void *ptr
, unsigned long addr
, unsigned long limit
,
238 e1
= (addr
<< 16) | (limit
& 0xffff);
239 e2
= ((addr
>> 16) & 0xff) | (addr
& 0xff000000) | (limit
& 0x000f0000);
246 static uint64_t *idt_table
;
248 static void set_gate64(void *ptr
, unsigned int type
, unsigned int dpl
,
249 uint64_t addr
, unsigned int sel
)
252 e1
= (addr
& 0xffff) | (sel
<< 16);
253 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
257 p
[2] = tswap32(addr
>> 32);
260 /* only dpl matters as we do only user space emulation */
261 static void set_idt(int n
, unsigned int dpl
)
263 set_gate64(idt_table
+ n
* 2, 0, dpl
, 0, 0);
266 static void set_gate(void *ptr
, unsigned int type
, unsigned int dpl
,
267 uint32_t addr
, unsigned int sel
)
270 e1
= (addr
& 0xffff) | (sel
<< 16);
271 e2
= (addr
& 0xffff0000) | 0x8000 | (dpl
<< 13) | (type
<< 8);
277 /* only dpl matters as we do only user space emulation */
278 static void set_idt(int n
, unsigned int dpl
)
280 set_gate(idt_table
+ n
, 0, dpl
, 0, 0);
284 void cpu_loop(CPUX86State
*env
)
286 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
290 target_siginfo_t info
;
294 trapnr
= cpu_x86_exec(cs
);
298 /* linux syscall from int $0x80 */
299 ret
= do_syscall(env
,
308 if (ret
== -TARGET_ERESTARTSYS
) {
310 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
311 env
->regs
[R_EAX
] = ret
;
316 /* linux syscall from syscall instruction */
317 ret
= do_syscall(env
,
326 if (ret
== -TARGET_ERESTARTSYS
) {
328 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
329 env
->regs
[R_EAX
] = ret
;
335 info
.si_signo
= TARGET_SIGBUS
;
337 info
.si_code
= TARGET_SI_KERNEL
;
338 info
._sifields
._sigfault
._addr
= 0;
339 queue_signal(env
, info
.si_signo
, &info
);
342 /* XXX: potential problem if ABI32 */
343 #ifndef TARGET_X86_64
344 if (env
->eflags
& VM_MASK
) {
345 handle_vm86_fault(env
);
349 info
.si_signo
= TARGET_SIGSEGV
;
351 info
.si_code
= TARGET_SI_KERNEL
;
352 info
._sifields
._sigfault
._addr
= 0;
353 queue_signal(env
, info
.si_signo
, &info
);
357 info
.si_signo
= TARGET_SIGSEGV
;
359 if (!(env
->error_code
& 1))
360 info
.si_code
= TARGET_SEGV_MAPERR
;
362 info
.si_code
= TARGET_SEGV_ACCERR
;
363 info
._sifields
._sigfault
._addr
= env
->cr
[2];
364 queue_signal(env
, info
.si_signo
, &info
);
367 #ifndef TARGET_X86_64
368 if (env
->eflags
& VM_MASK
) {
369 handle_vm86_trap(env
, trapnr
);
373 /* division by zero */
374 info
.si_signo
= TARGET_SIGFPE
;
376 info
.si_code
= TARGET_FPE_INTDIV
;
377 info
._sifields
._sigfault
._addr
= env
->eip
;
378 queue_signal(env
, info
.si_signo
, &info
);
383 #ifndef TARGET_X86_64
384 if (env
->eflags
& VM_MASK
) {
385 handle_vm86_trap(env
, trapnr
);
389 info
.si_signo
= TARGET_SIGTRAP
;
391 if (trapnr
== EXCP01_DB
) {
392 info
.si_code
= TARGET_TRAP_BRKPT
;
393 info
._sifields
._sigfault
._addr
= env
->eip
;
395 info
.si_code
= TARGET_SI_KERNEL
;
396 info
._sifields
._sigfault
._addr
= 0;
398 queue_signal(env
, info
.si_signo
, &info
);
403 #ifndef TARGET_X86_64
404 if (env
->eflags
& VM_MASK
) {
405 handle_vm86_trap(env
, trapnr
);
409 info
.si_signo
= TARGET_SIGSEGV
;
411 info
.si_code
= TARGET_SI_KERNEL
;
412 info
._sifields
._sigfault
._addr
= 0;
413 queue_signal(env
, info
.si_signo
, &info
);
417 info
.si_signo
= TARGET_SIGILL
;
419 info
.si_code
= TARGET_ILL_ILLOPN
;
420 info
._sifields
._sigfault
._addr
= env
->eip
;
421 queue_signal(env
, info
.si_signo
, &info
);
424 /* just indicate that signals should be handled asap */
430 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
435 info
.si_code
= TARGET_TRAP_BRKPT
;
436 queue_signal(env
, info
.si_signo
, &info
);
441 pc
= env
->segs
[R_CS
].base
+ env
->eip
;
442 EXCP_DUMP(env
, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
446 process_pending_signals(env
);
453 #define get_user_code_u32(x, gaddr, env) \
454 ({ abi_long __r = get_user_u32((x), (gaddr)); \
455 if (!__r && bswap_code(arm_sctlr_b(env))) { \
461 #define get_user_code_u16(x, gaddr, env) \
462 ({ abi_long __r = get_user_u16((x), (gaddr)); \
463 if (!__r && bswap_code(arm_sctlr_b(env))) { \
469 #define get_user_data_u32(x, gaddr, env) \
470 ({ abi_long __r = get_user_u32((x), (gaddr)); \
471 if (!__r && arm_cpu_bswap_data(env)) { \
477 #define get_user_data_u16(x, gaddr, env) \
478 ({ abi_long __r = get_user_u16((x), (gaddr)); \
479 if (!__r && arm_cpu_bswap_data(env)) { \
485 #define put_user_data_u32(x, gaddr, env) \
486 ({ typeof(x) __x = (x); \
487 if (arm_cpu_bswap_data(env)) { \
488 __x = bswap32(__x); \
490 put_user_u32(__x, (gaddr)); \
493 #define put_user_data_u16(x, gaddr, env) \
494 ({ typeof(x) __x = (x); \
495 if (arm_cpu_bswap_data(env)) { \
496 __x = bswap16(__x); \
498 put_user_u16(__x, (gaddr)); \
502 /* Commpage handling -- there is no commpage for AArch64 */
505 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
507 * r0 = pointer to oldval
508 * r1 = pointer to newval
509 * r2 = pointer to target value
512 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
513 * C set if *ptr was changed, clear if no exchange happened
515 * Note segv's in kernel helpers are a bit tricky, we can set the
516 * data address sensibly but the PC address is just the entry point.
518 static void arm_kernel_cmpxchg64_helper(CPUARMState
*env
)
520 uint64_t oldval
, newval
, val
;
522 target_siginfo_t info
;
524 /* Based on the 32 bit code in do_kernel_trap */
526 /* XXX: This only works between threads, not between processes.
527 It's probably possible to implement this with native host
528 operations. However things like ldrex/strex are much harder so
529 there's not much point trying. */
531 cpsr
= cpsr_read(env
);
534 if (get_user_u64(oldval
, env
->regs
[0])) {
535 env
->exception
.vaddress
= env
->regs
[0];
539 if (get_user_u64(newval
, env
->regs
[1])) {
540 env
->exception
.vaddress
= env
->regs
[1];
544 if (get_user_u64(val
, addr
)) {
545 env
->exception
.vaddress
= addr
;
552 if (put_user_u64(val
, addr
)) {
553 env
->exception
.vaddress
= addr
;
563 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
569 /* We get the PC of the entry address - which is as good as anything,
570 on a real kernel what you get depends on which mode it uses. */
571 info
.si_signo
= TARGET_SIGSEGV
;
573 /* XXX: check env->error_code */
574 info
.si_code
= TARGET_SEGV_MAPERR
;
575 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
576 queue_signal(env
, info
.si_signo
, &info
);
579 /* Handle a jump to the kernel code page. */
581 do_kernel_trap(CPUARMState
*env
)
587 switch (env
->regs
[15]) {
588 case 0xffff0fa0: /* __kernel_memory_barrier */
589 /* ??? No-op. Will need to do better for SMP. */
591 case 0xffff0fc0: /* __kernel_cmpxchg */
592 /* XXX: This only works between threads, not between processes.
593 It's probably possible to implement this with native host
594 operations. However things like ldrex/strex are much harder so
595 there's not much point trying. */
597 cpsr
= cpsr_read(env
);
599 /* FIXME: This should SEGV if the access fails. */
600 if (get_user_u32(val
, addr
))
602 if (val
== env
->regs
[0]) {
604 /* FIXME: Check for segfaults. */
605 put_user_u32(val
, addr
);
612 cpsr_write(env
, cpsr
, CPSR_C
, CPSRWriteByInstr
);
615 case 0xffff0fe0: /* __kernel_get_tls */
616 env
->regs
[0] = cpu_get_tls(env
);
618 case 0xffff0f60: /* __kernel_cmpxchg64 */
619 arm_kernel_cmpxchg64_helper(env
);
625 /* Jump back to the caller. */
626 addr
= env
->regs
[14];
631 env
->regs
[15] = addr
;
636 /* Store exclusive handling for AArch32 */
637 static int do_strex(CPUARMState
*env
)
645 if (env
->exclusive_addr
!= env
->exclusive_test
) {
648 /* We know we're always AArch32 so the address is in uint32_t range
649 * unless it was the -1 exclusive-monitor-lost value (which won't
650 * match exclusive_test above).
652 assert(extract64(env
->exclusive_addr
, 32, 32) == 0);
653 addr
= env
->exclusive_addr
;
654 size
= env
->exclusive_info
& 0xf;
657 segv
= get_user_u8(val
, addr
);
660 segv
= get_user_data_u16(val
, addr
, env
);
664 segv
= get_user_data_u32(val
, addr
, env
);
670 env
->exception
.vaddress
= addr
;
675 segv
= get_user_data_u32(valhi
, addr
+ 4, env
);
677 env
->exception
.vaddress
= addr
+ 4;
680 if (arm_cpu_bswap_data(env
)) {
681 val
= deposit64((uint64_t)valhi
, 32, 32, val
);
683 val
= deposit64(val
, 32, 32, valhi
);
686 if (val
!= env
->exclusive_val
) {
690 val
= env
->regs
[(env
->exclusive_info
>> 8) & 0xf];
693 segv
= put_user_u8(val
, addr
);
696 segv
= put_user_data_u16(val
, addr
, env
);
700 segv
= put_user_data_u32(val
, addr
, env
);
704 env
->exception
.vaddress
= addr
;
708 val
= env
->regs
[(env
->exclusive_info
>> 12) & 0xf];
709 segv
= put_user_data_u32(val
, addr
+ 4, env
);
711 env
->exception
.vaddress
= addr
+ 4;
718 env
->regs
[(env
->exclusive_info
>> 4) & 0xf] = rc
;
724 void cpu_loop(CPUARMState
*env
)
726 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
728 unsigned int n
, insn
;
729 target_siginfo_t info
;
735 trapnr
= cpu_arm_exec(cs
);
740 TaskState
*ts
= cs
->opaque
;
744 /* we handle the FPU emulation here, as Linux */
745 /* we get the opcode */
746 /* FIXME - what to do if get_user() fails? */
747 get_user_code_u32(opcode
, env
->regs
[15], env
);
749 rc
= EmulateAll(opcode
, &ts
->fpa
, env
);
750 if (rc
== 0) { /* illegal instruction */
751 info
.si_signo
= TARGET_SIGILL
;
753 info
.si_code
= TARGET_ILL_ILLOPN
;
754 info
._sifields
._sigfault
._addr
= env
->regs
[15];
755 queue_signal(env
, info
.si_signo
, &info
);
756 } else if (rc
< 0) { /* FP exception */
759 /* translate softfloat flags to FPSR flags */
760 if (-rc
& float_flag_invalid
)
762 if (-rc
& float_flag_divbyzero
)
764 if (-rc
& float_flag_overflow
)
766 if (-rc
& float_flag_underflow
)
768 if (-rc
& float_flag_inexact
)
771 FPSR fpsr
= ts
->fpa
.fpsr
;
772 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
774 if (fpsr
& (arm_fpe
<< 16)) { /* exception enabled? */
775 info
.si_signo
= TARGET_SIGFPE
;
778 /* ordered by priority, least first */
779 if (arm_fpe
& BIT_IXC
) info
.si_code
= TARGET_FPE_FLTRES
;
780 if (arm_fpe
& BIT_UFC
) info
.si_code
= TARGET_FPE_FLTUND
;
781 if (arm_fpe
& BIT_OFC
) info
.si_code
= TARGET_FPE_FLTOVF
;
782 if (arm_fpe
& BIT_DZC
) info
.si_code
= TARGET_FPE_FLTDIV
;
783 if (arm_fpe
& BIT_IOC
) info
.si_code
= TARGET_FPE_FLTINV
;
785 info
._sifields
._sigfault
._addr
= env
->regs
[15];
786 queue_signal(env
, info
.si_signo
, &info
);
791 /* accumulate unenabled exceptions */
792 if ((!(fpsr
& BIT_IXE
)) && (arm_fpe
& BIT_IXC
))
794 if ((!(fpsr
& BIT_UFE
)) && (arm_fpe
& BIT_UFC
))
796 if ((!(fpsr
& BIT_OFE
)) && (arm_fpe
& BIT_OFC
))
798 if ((!(fpsr
& BIT_DZE
)) && (arm_fpe
& BIT_DZC
))
800 if ((!(fpsr
& BIT_IOE
)) && (arm_fpe
& BIT_IOC
))
803 } else { /* everything OK */
814 if (trapnr
== EXCP_BKPT
) {
816 /* FIXME - what to do if get_user() fails? */
817 get_user_code_u16(insn
, env
->regs
[15], env
);
821 /* FIXME - what to do if get_user() fails? */
822 get_user_code_u32(insn
, env
->regs
[15], env
);
823 n
= (insn
& 0xf) | ((insn
>> 4) & 0xff0);
828 /* FIXME - what to do if get_user() fails? */
829 get_user_code_u16(insn
, env
->regs
[15] - 2, env
);
832 /* FIXME - what to do if get_user() fails? */
833 get_user_code_u32(insn
, env
->regs
[15] - 4, env
);
838 if (n
== ARM_NR_cacheflush
) {
840 } else if (n
== ARM_NR_semihosting
841 || n
== ARM_NR_thumb_semihosting
) {
842 env
->regs
[0] = do_arm_semihosting (env
);
843 } else if (n
== 0 || n
>= ARM_SYSCALL_BASE
|| env
->thumb
) {
845 if (env
->thumb
|| n
== 0) {
848 n
-= ARM_SYSCALL_BASE
;
851 if ( n
> ARM_NR_BASE
) {
853 case ARM_NR_cacheflush
:
857 cpu_set_tls(env
, env
->regs
[0]);
860 case ARM_NR_breakpoint
:
861 env
->regs
[15] -= env
->thumb
? 2 : 4;
864 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
866 env
->regs
[0] = -TARGET_ENOSYS
;
870 ret
= do_syscall(env
,
879 if (ret
== -TARGET_ERESTARTSYS
) {
880 env
->regs
[15] -= env
->thumb
? 2 : 4;
881 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
891 /* just indicate that signals should be handled asap */
894 if (!do_strex(env
)) {
897 /* fall through for segv */
898 case EXCP_PREFETCH_ABORT
:
899 case EXCP_DATA_ABORT
:
900 addr
= env
->exception
.vaddress
;
902 info
.si_signo
= TARGET_SIGSEGV
;
904 /* XXX: check env->error_code */
905 info
.si_code
= TARGET_SEGV_MAPERR
;
906 info
._sifields
._sigfault
._addr
= addr
;
907 queue_signal(env
, info
.si_signo
, &info
);
915 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
920 info
.si_code
= TARGET_TRAP_BRKPT
;
921 queue_signal(env
, info
.si_signo
, &info
);
925 case EXCP_KERNEL_TRAP
:
926 if (do_kernel_trap(env
))
930 /* nothing to do here for user-mode, just resume guest code */
934 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
937 process_pending_signals(env
);
944 * Handle AArch64 store-release exclusive
946 * rs = gets the status result of store exclusive
947 * rt = is the register that is stored
948 * rt2 = is the second register store (in STP)
951 static int do_strex_a64(CPUARMState
*env
)
962 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
963 size
= extract32(env
->exclusive_info
, 0, 2);
964 is_pair
= extract32(env
->exclusive_info
, 2, 1);
965 rs
= extract32(env
->exclusive_info
, 4, 5);
966 rt
= extract32(env
->exclusive_info
, 9, 5);
967 rt2
= extract32(env
->exclusive_info
, 14, 5);
969 addr
= env
->exclusive_addr
;
971 if (addr
!= env
->exclusive_test
) {
977 segv
= get_user_u8(val
, addr
);
980 segv
= get_user_u16(val
, addr
);
983 segv
= get_user_u32(val
, addr
);
986 segv
= get_user_u64(val
, addr
);
992 env
->exception
.vaddress
= addr
;
995 if (val
!= env
->exclusive_val
) {
1000 segv
= get_user_u32(val
, addr
+ 4);
1002 segv
= get_user_u64(val
, addr
+ 8);
1005 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1008 if (val
!= env
->exclusive_high
) {
1012 /* handle the zero register */
1013 val
= rt
== 31 ? 0 : env
->xregs
[rt
];
1016 segv
= put_user_u8(val
, addr
);
1019 segv
= put_user_u16(val
, addr
);
1022 segv
= put_user_u32(val
, addr
);
1025 segv
= put_user_u64(val
, addr
);
1032 /* handle the zero register */
1033 val
= rt2
== 31 ? 0 : env
->xregs
[rt2
];
1035 segv
= put_user_u32(val
, addr
+ 4);
1037 segv
= put_user_u64(val
, addr
+ 8);
1040 env
->exception
.vaddress
= addr
+ (size
== 2 ? 4 : 8);
1047 /* rs == 31 encodes a write to the ZR, thus throwing away
1048 * the status return. This is rather silly but valid.
1051 env
->xregs
[rs
] = rc
;
1054 /* instruction faulted, PC does not advance */
1055 /* either way a strex releases any exclusive lock we have */
1056 env
->exclusive_addr
= -1;
1061 /* AArch64 main loop */
1062 void cpu_loop(CPUARMState
*env
)
1064 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1067 target_siginfo_t info
;
1071 trapnr
= cpu_arm_exec(cs
);
1076 ret
= do_syscall(env
,
1085 if (ret
== -TARGET_ERESTARTSYS
) {
1087 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1088 env
->xregs
[0] = ret
;
1091 case EXCP_INTERRUPT
:
1092 /* just indicate that signals should be handled asap */
1095 info
.si_signo
= TARGET_SIGILL
;
1097 info
.si_code
= TARGET_ILL_ILLOPN
;
1098 info
._sifields
._sigfault
._addr
= env
->pc
;
1099 queue_signal(env
, info
.si_signo
, &info
);
1102 if (!do_strex_a64(env
)) {
1105 /* fall through for segv */
1106 case EXCP_PREFETCH_ABORT
:
1107 case EXCP_DATA_ABORT
:
1108 info
.si_signo
= TARGET_SIGSEGV
;
1110 /* XXX: check env->error_code */
1111 info
.si_code
= TARGET_SEGV_MAPERR
;
1112 info
._sifields
._sigfault
._addr
= env
->exception
.vaddress
;
1113 queue_signal(env
, info
.si_signo
, &info
);
1117 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1119 info
.si_signo
= sig
;
1121 info
.si_code
= TARGET_TRAP_BRKPT
;
1122 queue_signal(env
, info
.si_signo
, &info
);
1126 env
->xregs
[0] = do_arm_semihosting(env
);
1129 /* nothing to do here for user-mode, just resume guest code */
1132 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1135 process_pending_signals(env
);
1136 /* Exception return on AArch64 always clears the exclusive monitor,
1137 * so any return to running guest code implies this.
1138 * A strex (successful or otherwise) also clears the monitor, so
1139 * we don't need to specialcase EXCP_STREX.
1141 env
->exclusive_addr
= -1;
1144 #endif /* ndef TARGET_ABI32 */
1148 #ifdef TARGET_UNICORE32
1150 void cpu_loop(CPUUniCore32State
*env
)
1152 CPUState
*cs
= CPU(uc32_env_get_cpu(env
));
1154 unsigned int n
, insn
;
1155 target_siginfo_t info
;
1159 trapnr
= uc32_cpu_exec(cs
);
1162 case UC32_EXCP_PRIV
:
1165 get_user_u32(insn
, env
->regs
[31] - 4);
1166 n
= insn
& 0xffffff;
1168 if (n
>= UC32_SYSCALL_BASE
) {
1170 n
-= UC32_SYSCALL_BASE
;
1171 if (n
== UC32_SYSCALL_NR_set_tls
) {
1172 cpu_set_tls(env
, env
->regs
[0]);
1175 abi_long ret
= do_syscall(env
,
1184 if (ret
== -TARGET_ERESTARTSYS
) {
1186 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
1195 case UC32_EXCP_DTRAP
:
1196 case UC32_EXCP_ITRAP
:
1197 info
.si_signo
= TARGET_SIGSEGV
;
1199 /* XXX: check env->error_code */
1200 info
.si_code
= TARGET_SEGV_MAPERR
;
1201 info
._sifields
._sigfault
._addr
= env
->cp0
.c4_faultaddr
;
1202 queue_signal(env
, info
.si_signo
, &info
);
1204 case EXCP_INTERRUPT
:
1205 /* just indicate that signals should be handled asap */
1211 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1213 info
.si_signo
= sig
;
1215 info
.si_code
= TARGET_TRAP_BRKPT
;
1216 queue_signal(env
, info
.si_signo
, &info
);
1223 process_pending_signals(env
);
1227 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
1233 #define SPARC64_STACK_BIAS 2047
1237 /* WARNING: dealing with register windows _is_ complicated. More info
1238 can be found at http://www.sics.se/~psm/sparcstack.html */
1239 static inline int get_reg_index(CPUSPARCState
*env
, int cwp
, int index
)
1241 index
= (index
+ cwp
* 16) % (16 * env
->nwindows
);
1242 /* wrap handling : if cwp is on the last window, then we use the
1243 registers 'after' the end */
1244 if (index
< 8 && env
->cwp
== env
->nwindows
- 1)
1245 index
+= 16 * env
->nwindows
;
1249 /* save the register window 'cwp1' */
1250 static inline void save_window_offset(CPUSPARCState
*env
, int cwp1
)
1255 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1256 #ifdef TARGET_SPARC64
1258 sp_ptr
+= SPARC64_STACK_BIAS
;
1260 #if defined(DEBUG_WIN)
1261 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" save_cwp=%d\n",
1264 for(i
= 0; i
< 16; i
++) {
1265 /* FIXME - what to do if put_user() fails? */
1266 put_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1267 sp_ptr
+= sizeof(abi_ulong
);
1271 static void save_window(CPUSPARCState
*env
)
1273 #ifndef TARGET_SPARC64
1274 unsigned int new_wim
;
1275 new_wim
= ((env
->wim
>> 1) | (env
->wim
<< (env
->nwindows
- 1))) &
1276 ((1LL << env
->nwindows
) - 1);
1277 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1280 save_window_offset(env
, cpu_cwp_dec(env
, env
->cwp
- 2));
1286 static void restore_window(CPUSPARCState
*env
)
1288 #ifndef TARGET_SPARC64
1289 unsigned int new_wim
;
1291 unsigned int i
, cwp1
;
1294 #ifndef TARGET_SPARC64
1295 new_wim
= ((env
->wim
<< 1) | (env
->wim
>> (env
->nwindows
- 1))) &
1296 ((1LL << env
->nwindows
) - 1);
1299 /* restore the invalid window */
1300 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1301 sp_ptr
= env
->regbase
[get_reg_index(env
, cwp1
, 6)];
1302 #ifdef TARGET_SPARC64
1304 sp_ptr
+= SPARC64_STACK_BIAS
;
1306 #if defined(DEBUG_WIN)
1307 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx
" load_cwp=%d\n",
1310 for(i
= 0; i
< 16; i
++) {
1311 /* FIXME - what to do if get_user() fails? */
1312 get_user_ual(env
->regbase
[get_reg_index(env
, cwp1
, 8 + i
)], sp_ptr
);
1313 sp_ptr
+= sizeof(abi_ulong
);
1315 #ifdef TARGET_SPARC64
1317 if (env
->cleanwin
< env
->nwindows
- 1)
1325 static void flush_windows(CPUSPARCState
*env
)
1331 /* if restore would invoke restore_window(), then we can stop */
1332 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ offset
);
1333 #ifndef TARGET_SPARC64
1334 if (env
->wim
& (1 << cwp1
))
1337 if (env
->canrestore
== 0)
1342 save_window_offset(env
, cwp1
);
1345 cwp1
= cpu_cwp_inc(env
, env
->cwp
+ 1);
1346 #ifndef TARGET_SPARC64
1347 /* set wim so that restore will reload the registers */
1348 env
->wim
= 1 << cwp1
;
1350 #if defined(DEBUG_WIN)
1351 printf("flush_windows: nb=%d\n", offset
- 1);
1355 void cpu_loop (CPUSPARCState
*env
)
1357 CPUState
*cs
= CPU(sparc_env_get_cpu(env
));
1360 target_siginfo_t info
;
1364 trapnr
= cpu_sparc_exec(cs
);
1367 /* Compute PSR before exposing state. */
1368 if (env
->cc_op
!= CC_OP_FLAGS
) {
1373 #ifndef TARGET_SPARC64
1380 ret
= do_syscall (env
, env
->gregs
[1],
1381 env
->regwptr
[0], env
->regwptr
[1],
1382 env
->regwptr
[2], env
->regwptr
[3],
1383 env
->regwptr
[4], env
->regwptr
[5],
1385 if (ret
== -TARGET_ERESTARTSYS
|| ret
== -TARGET_QEMU_ESIGRETURN
) {
1388 if ((abi_ulong
)ret
>= (abi_ulong
)(-515)) {
1389 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1390 env
->xcc
|= PSR_CARRY
;
1392 env
->psr
|= PSR_CARRY
;
1396 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1397 env
->xcc
&= ~PSR_CARRY
;
1399 env
->psr
&= ~PSR_CARRY
;
1402 env
->regwptr
[0] = ret
;
1403 /* next instruction */
1405 env
->npc
= env
->npc
+ 4;
1407 case 0x83: /* flush windows */
1412 /* next instruction */
1414 env
->npc
= env
->npc
+ 4;
1416 #ifndef TARGET_SPARC64
1417 case TT_WIN_OVF
: /* window overflow */
1420 case TT_WIN_UNF
: /* window underflow */
1421 restore_window(env
);
1426 info
.si_signo
= TARGET_SIGSEGV
;
1428 /* XXX: check env->error_code */
1429 info
.si_code
= TARGET_SEGV_MAPERR
;
1430 info
._sifields
._sigfault
._addr
= env
->mmuregs
[4];
1431 queue_signal(env
, info
.si_signo
, &info
);
1435 case TT_SPILL
: /* window overflow */
1438 case TT_FILL
: /* window underflow */
1439 restore_window(env
);
1444 info
.si_signo
= TARGET_SIGSEGV
;
1446 /* XXX: check env->error_code */
1447 info
.si_code
= TARGET_SEGV_MAPERR
;
1448 if (trapnr
== TT_DFAULT
)
1449 info
._sifields
._sigfault
._addr
= env
->dmmuregs
[4];
1451 info
._sifields
._sigfault
._addr
= cpu_tsptr(env
)->tpc
;
1452 queue_signal(env
, info
.si_signo
, &info
);
1455 #ifndef TARGET_ABI32
1458 sparc64_get_context(env
);
1462 sparc64_set_context(env
);
1466 case EXCP_INTERRUPT
:
1467 /* just indicate that signals should be handled asap */
1471 info
.si_signo
= TARGET_SIGILL
;
1473 info
.si_code
= TARGET_ILL_ILLOPC
;
1474 info
._sifields
._sigfault
._addr
= env
->pc
;
1475 queue_signal(env
, info
.si_signo
, &info
);
1482 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
1485 info
.si_signo
= sig
;
1487 info
.si_code
= TARGET_TRAP_BRKPT
;
1488 queue_signal(env
, info
.si_signo
, &info
);
1493 printf ("Unhandled trap: 0x%x\n", trapnr
);
1494 cpu_dump_state(cs
, stderr
, fprintf
, 0);
1497 process_pending_signals (env
);
1504 static inline uint64_t cpu_ppc_get_tb(CPUPPCState
*env
)
1506 return cpu_get_host_ticks();
1509 uint64_t cpu_ppc_load_tbl(CPUPPCState
*env
)
1511 return cpu_ppc_get_tb(env
);
1514 uint32_t cpu_ppc_load_tbu(CPUPPCState
*env
)
1516 return cpu_ppc_get_tb(env
) >> 32;
1519 uint64_t cpu_ppc_load_atbl(CPUPPCState
*env
)
1521 return cpu_ppc_get_tb(env
);
1524 uint32_t cpu_ppc_load_atbu(CPUPPCState
*env
)
1526 return cpu_ppc_get_tb(env
) >> 32;
1529 uint32_t cpu_ppc601_load_rtcu(CPUPPCState
*env
)
1530 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1532 uint32_t cpu_ppc601_load_rtcl(CPUPPCState
*env
)
1534 return cpu_ppc_load_tbl(env
) & 0x3FFFFF80;
1537 /* XXX: to be fixed */
1538 int ppc_dcr_read (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t *valp
)
1543 int ppc_dcr_write (ppc_dcr_t
*dcr_env
, int dcrn
, uint32_t val
)
1548 static int do_store_exclusive(CPUPPCState
*env
)
1551 target_ulong page_addr
;
1552 target_ulong val
, val2
__attribute__((unused
)) = 0;
1556 addr
= env
->reserve_ea
;
1557 page_addr
= addr
& TARGET_PAGE_MASK
;
1560 flags
= page_get_flags(page_addr
);
1561 if ((flags
& PAGE_READ
) == 0) {
1564 int reg
= env
->reserve_info
& 0x1f;
1565 int size
= env
->reserve_info
>> 5;
1568 if (addr
== env
->reserve_addr
) {
1570 case 1: segv
= get_user_u8(val
, addr
); break;
1571 case 2: segv
= get_user_u16(val
, addr
); break;
1572 case 4: segv
= get_user_u32(val
, addr
); break;
1573 #if defined(TARGET_PPC64)
1574 case 8: segv
= get_user_u64(val
, addr
); break;
1576 segv
= get_user_u64(val
, addr
);
1578 segv
= get_user_u64(val2
, addr
+ 8);
1585 if (!segv
&& val
== env
->reserve_val
) {
1586 val
= env
->gpr
[reg
];
1588 case 1: segv
= put_user_u8(val
, addr
); break;
1589 case 2: segv
= put_user_u16(val
, addr
); break;
1590 case 4: segv
= put_user_u32(val
, addr
); break;
1591 #if defined(TARGET_PPC64)
1592 case 8: segv
= put_user_u64(val
, addr
); break;
1594 if (val2
== env
->reserve_val2
) {
1597 val
= env
->gpr
[reg
+1];
1599 val2
= env
->gpr
[reg
+1];
1601 segv
= put_user_u64(val
, addr
);
1603 segv
= put_user_u64(val2
, addr
+ 8);
1616 env
->crf
[0] = (stored
<< 1) | xer_so
;
1617 env
->reserve_addr
= (target_ulong
)-1;
1627 void cpu_loop(CPUPPCState
*env
)
1629 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
1630 target_siginfo_t info
;
1636 trapnr
= cpu_ppc_exec(cs
);
1639 case POWERPC_EXCP_NONE
:
1642 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1643 cpu_abort(cs
, "Critical interrupt while in user mode. "
1646 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1647 cpu_abort(cs
, "Machine check exception while in user mode. "
1650 case POWERPC_EXCP_DSI
: /* Data storage exception */
1651 EXCP_DUMP(env
, "Invalid data memory access: 0x" TARGET_FMT_lx
"\n",
1653 /* XXX: check this. Seems bugged */
1654 switch (env
->error_code
& 0xFF000000) {
1656 info
.si_signo
= TARGET_SIGSEGV
;
1658 info
.si_code
= TARGET_SEGV_MAPERR
;
1661 info
.si_signo
= TARGET_SIGILL
;
1663 info
.si_code
= TARGET_ILL_ILLADR
;
1666 info
.si_signo
= TARGET_SIGSEGV
;
1668 info
.si_code
= TARGET_SEGV_ACCERR
;
1671 /* Let's send a regular segfault... */
1672 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1674 info
.si_signo
= TARGET_SIGSEGV
;
1676 info
.si_code
= TARGET_SEGV_MAPERR
;
1679 info
._sifields
._sigfault
._addr
= env
->nip
;
1680 queue_signal(env
, info
.si_signo
, &info
);
1682 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1683 EXCP_DUMP(env
, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1684 "\n", env
->spr
[SPR_SRR0
]);
1685 /* XXX: check this */
1686 switch (env
->error_code
& 0xFF000000) {
1688 info
.si_signo
= TARGET_SIGSEGV
;
1690 info
.si_code
= TARGET_SEGV_MAPERR
;
1694 info
.si_signo
= TARGET_SIGSEGV
;
1696 info
.si_code
= TARGET_SEGV_ACCERR
;
1699 /* Let's send a regular segfault... */
1700 EXCP_DUMP(env
, "Invalid segfault errno (%02x)\n",
1702 info
.si_signo
= TARGET_SIGSEGV
;
1704 info
.si_code
= TARGET_SEGV_MAPERR
;
1707 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1708 queue_signal(env
, info
.si_signo
, &info
);
1710 case POWERPC_EXCP_EXTERNAL
: /* External input */
1711 cpu_abort(cs
, "External interrupt while in user mode. "
1714 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1715 EXCP_DUMP(env
, "Unaligned memory access\n");
1716 /* XXX: check this */
1717 info
.si_signo
= TARGET_SIGBUS
;
1719 info
.si_code
= TARGET_BUS_ADRALN
;
1720 info
._sifields
._sigfault
._addr
= env
->nip
;
1721 queue_signal(env
, info
.si_signo
, &info
);
1723 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1724 /* XXX: check this */
1725 switch (env
->error_code
& ~0xF) {
1726 case POWERPC_EXCP_FP
:
1727 EXCP_DUMP(env
, "Floating point program exception\n");
1728 info
.si_signo
= TARGET_SIGFPE
;
1730 switch (env
->error_code
& 0xF) {
1731 case POWERPC_EXCP_FP_OX
:
1732 info
.si_code
= TARGET_FPE_FLTOVF
;
1734 case POWERPC_EXCP_FP_UX
:
1735 info
.si_code
= TARGET_FPE_FLTUND
;
1737 case POWERPC_EXCP_FP_ZX
:
1738 case POWERPC_EXCP_FP_VXZDZ
:
1739 info
.si_code
= TARGET_FPE_FLTDIV
;
1741 case POWERPC_EXCP_FP_XX
:
1742 info
.si_code
= TARGET_FPE_FLTRES
;
1744 case POWERPC_EXCP_FP_VXSOFT
:
1745 info
.si_code
= TARGET_FPE_FLTINV
;
1747 case POWERPC_EXCP_FP_VXSNAN
:
1748 case POWERPC_EXCP_FP_VXISI
:
1749 case POWERPC_EXCP_FP_VXIDI
:
1750 case POWERPC_EXCP_FP_VXIMZ
:
1751 case POWERPC_EXCP_FP_VXVC
:
1752 case POWERPC_EXCP_FP_VXSQRT
:
1753 case POWERPC_EXCP_FP_VXCVI
:
1754 info
.si_code
= TARGET_FPE_FLTSUB
;
1757 EXCP_DUMP(env
, "Unknown floating point exception (%02x)\n",
1762 case POWERPC_EXCP_INVAL
:
1763 EXCP_DUMP(env
, "Invalid instruction\n");
1764 info
.si_signo
= TARGET_SIGILL
;
1766 switch (env
->error_code
& 0xF) {
1767 case POWERPC_EXCP_INVAL_INVAL
:
1768 info
.si_code
= TARGET_ILL_ILLOPC
;
1770 case POWERPC_EXCP_INVAL_LSWX
:
1771 info
.si_code
= TARGET_ILL_ILLOPN
;
1773 case POWERPC_EXCP_INVAL_SPR
:
1774 info
.si_code
= TARGET_ILL_PRVREG
;
1776 case POWERPC_EXCP_INVAL_FP
:
1777 info
.si_code
= TARGET_ILL_COPROC
;
1780 EXCP_DUMP(env
, "Unknown invalid operation (%02x)\n",
1781 env
->error_code
& 0xF);
1782 info
.si_code
= TARGET_ILL_ILLADR
;
1786 case POWERPC_EXCP_PRIV
:
1787 EXCP_DUMP(env
, "Privilege violation\n");
1788 info
.si_signo
= TARGET_SIGILL
;
1790 switch (env
->error_code
& 0xF) {
1791 case POWERPC_EXCP_PRIV_OPC
:
1792 info
.si_code
= TARGET_ILL_PRVOPC
;
1794 case POWERPC_EXCP_PRIV_REG
:
1795 info
.si_code
= TARGET_ILL_PRVREG
;
1798 EXCP_DUMP(env
, "Unknown privilege violation (%02x)\n",
1799 env
->error_code
& 0xF);
1800 info
.si_code
= TARGET_ILL_PRVOPC
;
1804 case POWERPC_EXCP_TRAP
:
1805 cpu_abort(cs
, "Tried to call a TRAP\n");
1808 /* Should not happen ! */
1809 cpu_abort(cs
, "Unknown program exception (%02x)\n",
1813 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1814 queue_signal(env
, info
.si_signo
, &info
);
1816 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1817 EXCP_DUMP(env
, "No floating point allowed\n");
1818 info
.si_signo
= TARGET_SIGILL
;
1820 info
.si_code
= TARGET_ILL_COPROC
;
1821 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1822 queue_signal(env
, info
.si_signo
, &info
);
1824 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1825 cpu_abort(cs
, "Syscall exception while in user mode. "
1828 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1829 EXCP_DUMP(env
, "No APU instruction allowed\n");
1830 info
.si_signo
= TARGET_SIGILL
;
1832 info
.si_code
= TARGET_ILL_COPROC
;
1833 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1834 queue_signal(env
, info
.si_signo
, &info
);
1836 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1837 cpu_abort(cs
, "Decrementer interrupt while in user mode. "
1840 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1841 cpu_abort(cs
, "Fix interval timer interrupt while in user mode. "
1844 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1845 cpu_abort(cs
, "Watchdog timer interrupt while in user mode. "
1848 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1849 cpu_abort(cs
, "Data TLB exception while in user mode. "
1852 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1853 cpu_abort(cs
, "Instruction TLB exception while in user mode. "
1856 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavail. */
1857 EXCP_DUMP(env
, "No SPE/floating-point instruction allowed\n");
1858 info
.si_signo
= TARGET_SIGILL
;
1860 info
.si_code
= TARGET_ILL_COPROC
;
1861 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1862 queue_signal(env
, info
.si_signo
, &info
);
1864 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data IRQ */
1865 cpu_abort(cs
, "Embedded floating-point data IRQ not handled\n");
1867 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round IRQ */
1868 cpu_abort(cs
, "Embedded floating-point round IRQ not handled\n");
1870 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor IRQ */
1871 cpu_abort(cs
, "Performance monitor exception not handled\n");
1873 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1874 cpu_abort(cs
, "Doorbell interrupt while in user mode. "
1877 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1878 cpu_abort(cs
, "Doorbell critical interrupt while in user mode. "
1881 case POWERPC_EXCP_RESET
: /* System reset exception */
1882 cpu_abort(cs
, "Reset interrupt while in user mode. "
1885 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1886 cpu_abort(cs
, "Data segment exception while in user mode. "
1889 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1890 cpu_abort(cs
, "Instruction segment exception "
1891 "while in user mode. Aborting\n");
1893 /* PowerPC 64 with hypervisor mode support */
1894 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1895 cpu_abort(cs
, "Hypervisor decrementer interrupt "
1896 "while in user mode. Aborting\n");
1898 case POWERPC_EXCP_TRACE
: /* Trace exception */
1900 * we use this exception to emulate step-by-step execution mode.
1903 /* PowerPC 64 with hypervisor mode support */
1904 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1905 cpu_abort(cs
, "Hypervisor data storage exception "
1906 "while in user mode. Aborting\n");
1908 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage excp */
1909 cpu_abort(cs
, "Hypervisor instruction storage exception "
1910 "while in user mode. Aborting\n");
1912 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
1913 cpu_abort(cs
, "Hypervisor data segment exception "
1914 "while in user mode. Aborting\n");
1916 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment excp */
1917 cpu_abort(cs
, "Hypervisor instruction segment exception "
1918 "while in user mode. Aborting\n");
1920 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1921 EXCP_DUMP(env
, "No Altivec instructions allowed\n");
1922 info
.si_signo
= TARGET_SIGILL
;
1924 info
.si_code
= TARGET_ILL_COPROC
;
1925 info
._sifields
._sigfault
._addr
= env
->nip
- 4;
1926 queue_signal(env
, info
.si_signo
, &info
);
1928 case POWERPC_EXCP_PIT
: /* Programmable interval timer IRQ */
1929 cpu_abort(cs
, "Programmable interval timer interrupt "
1930 "while in user mode. Aborting\n");
1932 case POWERPC_EXCP_IO
: /* IO error exception */
1933 cpu_abort(cs
, "IO error exception while in user mode. "
1936 case POWERPC_EXCP_RUNM
: /* Run mode exception */
1937 cpu_abort(cs
, "Run mode exception while in user mode. "
1940 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
1941 cpu_abort(cs
, "Emulation trap exception not handled\n");
1943 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
1944 cpu_abort(cs
, "Instruction fetch TLB exception "
1945 "while in user-mode. Aborting");
1947 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
1948 cpu_abort(cs
, "Data load TLB exception while in user-mode. "
1951 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
1952 cpu_abort(cs
, "Data store TLB exception while in user-mode. "
1955 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
1956 cpu_abort(cs
, "Floating-point assist exception not handled\n");
1958 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1959 cpu_abort(cs
, "Instruction address breakpoint exception "
1962 case POWERPC_EXCP_SMI
: /* System management interrupt */
1963 cpu_abort(cs
, "System management interrupt while in user mode. "
1966 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1967 cpu_abort(cs
, "Thermal interrupt interrupt while in user mode. "
1970 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor IRQ */
1971 cpu_abort(cs
, "Performance monitor exception not handled\n");
1973 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1974 cpu_abort(cs
, "Vector assist exception not handled\n");
1976 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
1977 cpu_abort(cs
, "Soft patch exception not handled\n");
1979 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1980 cpu_abort(cs
, "Maintenance exception while in user mode. "
1983 case POWERPC_EXCP_STOP
: /* stop translation */
1984 /* We did invalidate the instruction cache. Go on */
1986 case POWERPC_EXCP_BRANCH
: /* branch instruction: */
1987 /* We just stopped because of a branch. Go on */
1989 case POWERPC_EXCP_SYSCALL_USER
:
1990 /* system call in user-mode emulation */
1992 * PPC ABI uses overflow flag in cr0 to signal an error
1995 env
->crf
[0] &= ~0x1;
1996 ret
= do_syscall(env
, env
->gpr
[0], env
->gpr
[3], env
->gpr
[4],
1997 env
->gpr
[5], env
->gpr
[6], env
->gpr
[7],
1999 if (ret
== -TARGET_ERESTARTSYS
) {
2003 if (ret
== (target_ulong
)(-TARGET_QEMU_ESIGRETURN
)) {
2004 /* Returning from a successful sigreturn syscall.
2005 Avoid corrupting register state. */
2008 if (ret
> (target_ulong
)(-515)) {
2014 case POWERPC_EXCP_STCX
:
2015 if (do_store_exclusive(env
)) {
2016 info
.si_signo
= TARGET_SIGSEGV
;
2018 info
.si_code
= TARGET_SEGV_MAPERR
;
2019 info
._sifields
._sigfault
._addr
= env
->nip
;
2020 queue_signal(env
, info
.si_signo
, &info
);
2027 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2029 info
.si_signo
= sig
;
2031 info
.si_code
= TARGET_TRAP_BRKPT
;
2032 queue_signal(env
, info
.si_signo
, &info
);
2036 case EXCP_INTERRUPT
:
2037 /* just indicate that signals should be handled asap */
2040 cpu_abort(cs
, "Unknown exception 0x%d. Aborting\n", trapnr
);
2043 process_pending_signals(env
);
2050 # ifdef TARGET_ABI_MIPSO32
2051 # define MIPS_SYS(name, args) args,
2052 static const uint8_t mips_syscall_args
[] = {
2053 MIPS_SYS(sys_syscall
, 8) /* 4000 */
2054 MIPS_SYS(sys_exit
, 1)
2055 MIPS_SYS(sys_fork
, 0)
2056 MIPS_SYS(sys_read
, 3)
2057 MIPS_SYS(sys_write
, 3)
2058 MIPS_SYS(sys_open
, 3) /* 4005 */
2059 MIPS_SYS(sys_close
, 1)
2060 MIPS_SYS(sys_waitpid
, 3)
2061 MIPS_SYS(sys_creat
, 2)
2062 MIPS_SYS(sys_link
, 2)
2063 MIPS_SYS(sys_unlink
, 1) /* 4010 */
2064 MIPS_SYS(sys_execve
, 0)
2065 MIPS_SYS(sys_chdir
, 1)
2066 MIPS_SYS(sys_time
, 1)
2067 MIPS_SYS(sys_mknod
, 3)
2068 MIPS_SYS(sys_chmod
, 2) /* 4015 */
2069 MIPS_SYS(sys_lchown
, 3)
2070 MIPS_SYS(sys_ni_syscall
, 0)
2071 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_stat */
2072 MIPS_SYS(sys_lseek
, 3)
2073 MIPS_SYS(sys_getpid
, 0) /* 4020 */
2074 MIPS_SYS(sys_mount
, 5)
2075 MIPS_SYS(sys_umount
, 1)
2076 MIPS_SYS(sys_setuid
, 1)
2077 MIPS_SYS(sys_getuid
, 0)
2078 MIPS_SYS(sys_stime
, 1) /* 4025 */
2079 MIPS_SYS(sys_ptrace
, 4)
2080 MIPS_SYS(sys_alarm
, 1)
2081 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_fstat */
2082 MIPS_SYS(sys_pause
, 0)
2083 MIPS_SYS(sys_utime
, 2) /* 4030 */
2084 MIPS_SYS(sys_ni_syscall
, 0)
2085 MIPS_SYS(sys_ni_syscall
, 0)
2086 MIPS_SYS(sys_access
, 2)
2087 MIPS_SYS(sys_nice
, 1)
2088 MIPS_SYS(sys_ni_syscall
, 0) /* 4035 */
2089 MIPS_SYS(sys_sync
, 0)
2090 MIPS_SYS(sys_kill
, 2)
2091 MIPS_SYS(sys_rename
, 2)
2092 MIPS_SYS(sys_mkdir
, 2)
2093 MIPS_SYS(sys_rmdir
, 1) /* 4040 */
2094 MIPS_SYS(sys_dup
, 1)
2095 MIPS_SYS(sys_pipe
, 0)
2096 MIPS_SYS(sys_times
, 1)
2097 MIPS_SYS(sys_ni_syscall
, 0)
2098 MIPS_SYS(sys_brk
, 1) /* 4045 */
2099 MIPS_SYS(sys_setgid
, 1)
2100 MIPS_SYS(sys_getgid
, 0)
2101 MIPS_SYS(sys_ni_syscall
, 0) /* was signal(2) */
2102 MIPS_SYS(sys_geteuid
, 0)
2103 MIPS_SYS(sys_getegid
, 0) /* 4050 */
2104 MIPS_SYS(sys_acct
, 0)
2105 MIPS_SYS(sys_umount2
, 2)
2106 MIPS_SYS(sys_ni_syscall
, 0)
2107 MIPS_SYS(sys_ioctl
, 3)
2108 MIPS_SYS(sys_fcntl
, 3) /* 4055 */
2109 MIPS_SYS(sys_ni_syscall
, 2)
2110 MIPS_SYS(sys_setpgid
, 2)
2111 MIPS_SYS(sys_ni_syscall
, 0)
2112 MIPS_SYS(sys_olduname
, 1)
2113 MIPS_SYS(sys_umask
, 1) /* 4060 */
2114 MIPS_SYS(sys_chroot
, 1)
2115 MIPS_SYS(sys_ustat
, 2)
2116 MIPS_SYS(sys_dup2
, 2)
2117 MIPS_SYS(sys_getppid
, 0)
2118 MIPS_SYS(sys_getpgrp
, 0) /* 4065 */
2119 MIPS_SYS(sys_setsid
, 0)
2120 MIPS_SYS(sys_sigaction
, 3)
2121 MIPS_SYS(sys_sgetmask
, 0)
2122 MIPS_SYS(sys_ssetmask
, 1)
2123 MIPS_SYS(sys_setreuid
, 2) /* 4070 */
2124 MIPS_SYS(sys_setregid
, 2)
2125 MIPS_SYS(sys_sigsuspend
, 0)
2126 MIPS_SYS(sys_sigpending
, 1)
2127 MIPS_SYS(sys_sethostname
, 2)
2128 MIPS_SYS(sys_setrlimit
, 2) /* 4075 */
2129 MIPS_SYS(sys_getrlimit
, 2)
2130 MIPS_SYS(sys_getrusage
, 2)
2131 MIPS_SYS(sys_gettimeofday
, 2)
2132 MIPS_SYS(sys_settimeofday
, 2)
2133 MIPS_SYS(sys_getgroups
, 2) /* 4080 */
2134 MIPS_SYS(sys_setgroups
, 2)
2135 MIPS_SYS(sys_ni_syscall
, 0) /* old_select */
2136 MIPS_SYS(sys_symlink
, 2)
2137 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_lstat */
2138 MIPS_SYS(sys_readlink
, 3) /* 4085 */
2139 MIPS_SYS(sys_uselib
, 1)
2140 MIPS_SYS(sys_swapon
, 2)
2141 MIPS_SYS(sys_reboot
, 3)
2142 MIPS_SYS(old_readdir
, 3)
2143 MIPS_SYS(old_mmap
, 6) /* 4090 */
2144 MIPS_SYS(sys_munmap
, 2)
2145 MIPS_SYS(sys_truncate
, 2)
2146 MIPS_SYS(sys_ftruncate
, 2)
2147 MIPS_SYS(sys_fchmod
, 2)
2148 MIPS_SYS(sys_fchown
, 3) /* 4095 */
2149 MIPS_SYS(sys_getpriority
, 2)
2150 MIPS_SYS(sys_setpriority
, 3)
2151 MIPS_SYS(sys_ni_syscall
, 0)
2152 MIPS_SYS(sys_statfs
, 2)
2153 MIPS_SYS(sys_fstatfs
, 2) /* 4100 */
2154 MIPS_SYS(sys_ni_syscall
, 0) /* was ioperm(2) */
2155 MIPS_SYS(sys_socketcall
, 2)
2156 MIPS_SYS(sys_syslog
, 3)
2157 MIPS_SYS(sys_setitimer
, 3)
2158 MIPS_SYS(sys_getitimer
, 2) /* 4105 */
2159 MIPS_SYS(sys_newstat
, 2)
2160 MIPS_SYS(sys_newlstat
, 2)
2161 MIPS_SYS(sys_newfstat
, 2)
2162 MIPS_SYS(sys_uname
, 1)
2163 MIPS_SYS(sys_ni_syscall
, 0) /* 4110 was iopl(2) */
2164 MIPS_SYS(sys_vhangup
, 0)
2165 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_idle() */
2166 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_vm86 */
2167 MIPS_SYS(sys_wait4
, 4)
2168 MIPS_SYS(sys_swapoff
, 1) /* 4115 */
2169 MIPS_SYS(sys_sysinfo
, 1)
2170 MIPS_SYS(sys_ipc
, 6)
2171 MIPS_SYS(sys_fsync
, 1)
2172 MIPS_SYS(sys_sigreturn
, 0)
2173 MIPS_SYS(sys_clone
, 6) /* 4120 */
2174 MIPS_SYS(sys_setdomainname
, 2)
2175 MIPS_SYS(sys_newuname
, 1)
2176 MIPS_SYS(sys_ni_syscall
, 0) /* sys_modify_ldt */
2177 MIPS_SYS(sys_adjtimex
, 1)
2178 MIPS_SYS(sys_mprotect
, 3) /* 4125 */
2179 MIPS_SYS(sys_sigprocmask
, 3)
2180 MIPS_SYS(sys_ni_syscall
, 0) /* was create_module */
2181 MIPS_SYS(sys_init_module
, 5)
2182 MIPS_SYS(sys_delete_module
, 1)
2183 MIPS_SYS(sys_ni_syscall
, 0) /* 4130 was get_kernel_syms */
2184 MIPS_SYS(sys_quotactl
, 0)
2185 MIPS_SYS(sys_getpgid
, 1)
2186 MIPS_SYS(sys_fchdir
, 1)
2187 MIPS_SYS(sys_bdflush
, 2)
2188 MIPS_SYS(sys_sysfs
, 3) /* 4135 */
2189 MIPS_SYS(sys_personality
, 1)
2190 MIPS_SYS(sys_ni_syscall
, 0) /* for afs_syscall */
2191 MIPS_SYS(sys_setfsuid
, 1)
2192 MIPS_SYS(sys_setfsgid
, 1)
2193 MIPS_SYS(sys_llseek
, 5) /* 4140 */
2194 MIPS_SYS(sys_getdents
, 3)
2195 MIPS_SYS(sys_select
, 5)
2196 MIPS_SYS(sys_flock
, 2)
2197 MIPS_SYS(sys_msync
, 3)
2198 MIPS_SYS(sys_readv
, 3) /* 4145 */
2199 MIPS_SYS(sys_writev
, 3)
2200 MIPS_SYS(sys_cacheflush
, 3)
2201 MIPS_SYS(sys_cachectl
, 3)
2202 MIPS_SYS(sys_sysmips
, 4)
2203 MIPS_SYS(sys_ni_syscall
, 0) /* 4150 */
2204 MIPS_SYS(sys_getsid
, 1)
2205 MIPS_SYS(sys_fdatasync
, 0)
2206 MIPS_SYS(sys_sysctl
, 1)
2207 MIPS_SYS(sys_mlock
, 2)
2208 MIPS_SYS(sys_munlock
, 2) /* 4155 */
2209 MIPS_SYS(sys_mlockall
, 1)
2210 MIPS_SYS(sys_munlockall
, 0)
2211 MIPS_SYS(sys_sched_setparam
, 2)
2212 MIPS_SYS(sys_sched_getparam
, 2)
2213 MIPS_SYS(sys_sched_setscheduler
, 3) /* 4160 */
2214 MIPS_SYS(sys_sched_getscheduler
, 1)
2215 MIPS_SYS(sys_sched_yield
, 0)
2216 MIPS_SYS(sys_sched_get_priority_max
, 1)
2217 MIPS_SYS(sys_sched_get_priority_min
, 1)
2218 MIPS_SYS(sys_sched_rr_get_interval
, 2) /* 4165 */
2219 MIPS_SYS(sys_nanosleep
, 2)
2220 MIPS_SYS(sys_mremap
, 5)
2221 MIPS_SYS(sys_accept
, 3)
2222 MIPS_SYS(sys_bind
, 3)
2223 MIPS_SYS(sys_connect
, 3) /* 4170 */
2224 MIPS_SYS(sys_getpeername
, 3)
2225 MIPS_SYS(sys_getsockname
, 3)
2226 MIPS_SYS(sys_getsockopt
, 5)
2227 MIPS_SYS(sys_listen
, 2)
2228 MIPS_SYS(sys_recv
, 4) /* 4175 */
2229 MIPS_SYS(sys_recvfrom
, 6)
2230 MIPS_SYS(sys_recvmsg
, 3)
2231 MIPS_SYS(sys_send
, 4)
2232 MIPS_SYS(sys_sendmsg
, 3)
2233 MIPS_SYS(sys_sendto
, 6) /* 4180 */
2234 MIPS_SYS(sys_setsockopt
, 5)
2235 MIPS_SYS(sys_shutdown
, 2)
2236 MIPS_SYS(sys_socket
, 3)
2237 MIPS_SYS(sys_socketpair
, 4)
2238 MIPS_SYS(sys_setresuid
, 3) /* 4185 */
2239 MIPS_SYS(sys_getresuid
, 3)
2240 MIPS_SYS(sys_ni_syscall
, 0) /* was sys_query_module */
2241 MIPS_SYS(sys_poll
, 3)
2242 MIPS_SYS(sys_nfsservctl
, 3)
2243 MIPS_SYS(sys_setresgid
, 3) /* 4190 */
2244 MIPS_SYS(sys_getresgid
, 3)
2245 MIPS_SYS(sys_prctl
, 5)
2246 MIPS_SYS(sys_rt_sigreturn
, 0)
2247 MIPS_SYS(sys_rt_sigaction
, 4)
2248 MIPS_SYS(sys_rt_sigprocmask
, 4) /* 4195 */
2249 MIPS_SYS(sys_rt_sigpending
, 2)
2250 MIPS_SYS(sys_rt_sigtimedwait
, 4)
2251 MIPS_SYS(sys_rt_sigqueueinfo
, 3)
2252 MIPS_SYS(sys_rt_sigsuspend
, 0)
2253 MIPS_SYS(sys_pread64
, 6) /* 4200 */
2254 MIPS_SYS(sys_pwrite64
, 6)
2255 MIPS_SYS(sys_chown
, 3)
2256 MIPS_SYS(sys_getcwd
, 2)
2257 MIPS_SYS(sys_capget
, 2)
2258 MIPS_SYS(sys_capset
, 2) /* 4205 */
2259 MIPS_SYS(sys_sigaltstack
, 2)
2260 MIPS_SYS(sys_sendfile
, 4)
2261 MIPS_SYS(sys_ni_syscall
, 0)
2262 MIPS_SYS(sys_ni_syscall
, 0)
2263 MIPS_SYS(sys_mmap2
, 6) /* 4210 */
2264 MIPS_SYS(sys_truncate64
, 4)
2265 MIPS_SYS(sys_ftruncate64
, 4)
2266 MIPS_SYS(sys_stat64
, 2)
2267 MIPS_SYS(sys_lstat64
, 2)
2268 MIPS_SYS(sys_fstat64
, 2) /* 4215 */
2269 MIPS_SYS(sys_pivot_root
, 2)
2270 MIPS_SYS(sys_mincore
, 3)
2271 MIPS_SYS(sys_madvise
, 3)
2272 MIPS_SYS(sys_getdents64
, 3)
2273 MIPS_SYS(sys_fcntl64
, 3) /* 4220 */
2274 MIPS_SYS(sys_ni_syscall
, 0)
2275 MIPS_SYS(sys_gettid
, 0)
2276 MIPS_SYS(sys_readahead
, 5)
2277 MIPS_SYS(sys_setxattr
, 5)
2278 MIPS_SYS(sys_lsetxattr
, 5) /* 4225 */
2279 MIPS_SYS(sys_fsetxattr
, 5)
2280 MIPS_SYS(sys_getxattr
, 4)
2281 MIPS_SYS(sys_lgetxattr
, 4)
2282 MIPS_SYS(sys_fgetxattr
, 4)
2283 MIPS_SYS(sys_listxattr
, 3) /* 4230 */
2284 MIPS_SYS(sys_llistxattr
, 3)
2285 MIPS_SYS(sys_flistxattr
, 3)
2286 MIPS_SYS(sys_removexattr
, 2)
2287 MIPS_SYS(sys_lremovexattr
, 2)
2288 MIPS_SYS(sys_fremovexattr
, 2) /* 4235 */
2289 MIPS_SYS(sys_tkill
, 2)
2290 MIPS_SYS(sys_sendfile64
, 5)
2291 MIPS_SYS(sys_futex
, 6)
2292 MIPS_SYS(sys_sched_setaffinity
, 3)
2293 MIPS_SYS(sys_sched_getaffinity
, 3) /* 4240 */
2294 MIPS_SYS(sys_io_setup
, 2)
2295 MIPS_SYS(sys_io_destroy
, 1)
2296 MIPS_SYS(sys_io_getevents
, 5)
2297 MIPS_SYS(sys_io_submit
, 3)
2298 MIPS_SYS(sys_io_cancel
, 3) /* 4245 */
2299 MIPS_SYS(sys_exit_group
, 1)
2300 MIPS_SYS(sys_lookup_dcookie
, 3)
2301 MIPS_SYS(sys_epoll_create
, 1)
2302 MIPS_SYS(sys_epoll_ctl
, 4)
2303 MIPS_SYS(sys_epoll_wait
, 3) /* 4250 */
2304 MIPS_SYS(sys_remap_file_pages
, 5)
2305 MIPS_SYS(sys_set_tid_address
, 1)
2306 MIPS_SYS(sys_restart_syscall
, 0)
2307 MIPS_SYS(sys_fadvise64_64
, 7)
2308 MIPS_SYS(sys_statfs64
, 3) /* 4255 */
2309 MIPS_SYS(sys_fstatfs64
, 2)
2310 MIPS_SYS(sys_timer_create
, 3)
2311 MIPS_SYS(sys_timer_settime
, 4)
2312 MIPS_SYS(sys_timer_gettime
, 2)
2313 MIPS_SYS(sys_timer_getoverrun
, 1) /* 4260 */
2314 MIPS_SYS(sys_timer_delete
, 1)
2315 MIPS_SYS(sys_clock_settime
, 2)
2316 MIPS_SYS(sys_clock_gettime
, 2)
2317 MIPS_SYS(sys_clock_getres
, 2)
2318 MIPS_SYS(sys_clock_nanosleep
, 4) /* 4265 */
2319 MIPS_SYS(sys_tgkill
, 3)
2320 MIPS_SYS(sys_utimes
, 2)
2321 MIPS_SYS(sys_mbind
, 4)
2322 MIPS_SYS(sys_ni_syscall
, 0) /* sys_get_mempolicy */
2323 MIPS_SYS(sys_ni_syscall
, 0) /* 4270 sys_set_mempolicy */
2324 MIPS_SYS(sys_mq_open
, 4)
2325 MIPS_SYS(sys_mq_unlink
, 1)
2326 MIPS_SYS(sys_mq_timedsend
, 5)
2327 MIPS_SYS(sys_mq_timedreceive
, 5)
2328 MIPS_SYS(sys_mq_notify
, 2) /* 4275 */
2329 MIPS_SYS(sys_mq_getsetattr
, 3)
2330 MIPS_SYS(sys_ni_syscall
, 0) /* sys_vserver */
2331 MIPS_SYS(sys_waitid
, 4)
2332 MIPS_SYS(sys_ni_syscall
, 0) /* available, was setaltroot */
2333 MIPS_SYS(sys_add_key
, 5)
2334 MIPS_SYS(sys_request_key
, 4)
2335 MIPS_SYS(sys_keyctl
, 5)
2336 MIPS_SYS(sys_set_thread_area
, 1)
2337 MIPS_SYS(sys_inotify_init
, 0)
2338 MIPS_SYS(sys_inotify_add_watch
, 3) /* 4285 */
2339 MIPS_SYS(sys_inotify_rm_watch
, 2)
2340 MIPS_SYS(sys_migrate_pages
, 4)
2341 MIPS_SYS(sys_openat
, 4)
2342 MIPS_SYS(sys_mkdirat
, 3)
2343 MIPS_SYS(sys_mknodat
, 4) /* 4290 */
2344 MIPS_SYS(sys_fchownat
, 5)
2345 MIPS_SYS(sys_futimesat
, 3)
2346 MIPS_SYS(sys_fstatat64
, 4)
2347 MIPS_SYS(sys_unlinkat
, 3)
2348 MIPS_SYS(sys_renameat
, 4) /* 4295 */
2349 MIPS_SYS(sys_linkat
, 5)
2350 MIPS_SYS(sys_symlinkat
, 3)
2351 MIPS_SYS(sys_readlinkat
, 4)
2352 MIPS_SYS(sys_fchmodat
, 3)
2353 MIPS_SYS(sys_faccessat
, 3) /* 4300 */
2354 MIPS_SYS(sys_pselect6
, 6)
2355 MIPS_SYS(sys_ppoll
, 5)
2356 MIPS_SYS(sys_unshare
, 1)
2357 MIPS_SYS(sys_splice
, 6)
2358 MIPS_SYS(sys_sync_file_range
, 7) /* 4305 */
2359 MIPS_SYS(sys_tee
, 4)
2360 MIPS_SYS(sys_vmsplice
, 4)
2361 MIPS_SYS(sys_move_pages
, 6)
2362 MIPS_SYS(sys_set_robust_list
, 2)
2363 MIPS_SYS(sys_get_robust_list
, 3) /* 4310 */
2364 MIPS_SYS(sys_kexec_load
, 4)
2365 MIPS_SYS(sys_getcpu
, 3)
2366 MIPS_SYS(sys_epoll_pwait
, 6)
2367 MIPS_SYS(sys_ioprio_set
, 3)
2368 MIPS_SYS(sys_ioprio_get
, 2)
2369 MIPS_SYS(sys_utimensat
, 4)
2370 MIPS_SYS(sys_signalfd
, 3)
2371 MIPS_SYS(sys_ni_syscall
, 0) /* was timerfd */
2372 MIPS_SYS(sys_eventfd
, 1)
2373 MIPS_SYS(sys_fallocate
, 6) /* 4320 */
2374 MIPS_SYS(sys_timerfd_create
, 2)
2375 MIPS_SYS(sys_timerfd_gettime
, 2)
2376 MIPS_SYS(sys_timerfd_settime
, 4)
2377 MIPS_SYS(sys_signalfd4
, 4)
2378 MIPS_SYS(sys_eventfd2
, 2) /* 4325 */
2379 MIPS_SYS(sys_epoll_create1
, 1)
2380 MIPS_SYS(sys_dup3
, 3)
2381 MIPS_SYS(sys_pipe2
, 2)
2382 MIPS_SYS(sys_inotify_init1
, 1)
2383 MIPS_SYS(sys_preadv
, 6) /* 4330 */
2384 MIPS_SYS(sys_pwritev
, 6)
2385 MIPS_SYS(sys_rt_tgsigqueueinfo
, 4)
2386 MIPS_SYS(sys_perf_event_open
, 5)
2387 MIPS_SYS(sys_accept4
, 4)
2388 MIPS_SYS(sys_recvmmsg
, 5) /* 4335 */
2389 MIPS_SYS(sys_fanotify_init
, 2)
2390 MIPS_SYS(sys_fanotify_mark
, 6)
2391 MIPS_SYS(sys_prlimit64
, 4)
2392 MIPS_SYS(sys_name_to_handle_at
, 5)
2393 MIPS_SYS(sys_open_by_handle_at
, 3) /* 4340 */
2394 MIPS_SYS(sys_clock_adjtime
, 2)
2395 MIPS_SYS(sys_syncfs
, 1)
2400 static int do_store_exclusive(CPUMIPSState
*env
)
2403 target_ulong page_addr
;
2411 page_addr
= addr
& TARGET_PAGE_MASK
;
2414 flags
= page_get_flags(page_addr
);
2415 if ((flags
& PAGE_READ
) == 0) {
2418 reg
= env
->llreg
& 0x1f;
2419 d
= (env
->llreg
& 0x20) != 0;
2421 segv
= get_user_s64(val
, addr
);
2423 segv
= get_user_s32(val
, addr
);
2426 if (val
!= env
->llval
) {
2427 env
->active_tc
.gpr
[reg
] = 0;
2430 segv
= put_user_u64(env
->llnewval
, addr
);
2432 segv
= put_user_u32(env
->llnewval
, addr
);
2435 env
->active_tc
.gpr
[reg
] = 1;
2442 env
->active_tc
.PC
+= 4;
2455 static int do_break(CPUMIPSState
*env
, target_siginfo_t
*info
,
2463 info
->si_signo
= TARGET_SIGFPE
;
2465 info
->si_code
= (code
== BRK_OVERFLOW
) ? FPE_INTOVF
: FPE_INTDIV
;
2466 queue_signal(env
, info
->si_signo
, &*info
);
2470 info
->si_signo
= TARGET_SIGTRAP
;
2472 queue_signal(env
, info
->si_signo
, &*info
);
2480 void cpu_loop(CPUMIPSState
*env
)
2482 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2483 target_siginfo_t info
;
2486 # ifdef TARGET_ABI_MIPSO32
2487 unsigned int syscall_num
;
2492 trapnr
= cpu_mips_exec(cs
);
2496 env
->active_tc
.PC
+= 4;
2497 # ifdef TARGET_ABI_MIPSO32
2498 syscall_num
= env
->active_tc
.gpr
[2] - 4000;
2499 if (syscall_num
>= sizeof(mips_syscall_args
)) {
2500 ret
= -TARGET_ENOSYS
;
2504 abi_ulong arg5
= 0, arg6
= 0, arg7
= 0, arg8
= 0;
2506 nb_args
= mips_syscall_args
[syscall_num
];
2507 sp_reg
= env
->active_tc
.gpr
[29];
2509 /* these arguments are taken from the stack */
2511 if ((ret
= get_user_ual(arg8
, sp_reg
+ 28)) != 0) {
2515 if ((ret
= get_user_ual(arg7
, sp_reg
+ 24)) != 0) {
2519 if ((ret
= get_user_ual(arg6
, sp_reg
+ 20)) != 0) {
2523 if ((ret
= get_user_ual(arg5
, sp_reg
+ 16)) != 0) {
2529 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2530 env
->active_tc
.gpr
[4],
2531 env
->active_tc
.gpr
[5],
2532 env
->active_tc
.gpr
[6],
2533 env
->active_tc
.gpr
[7],
2534 arg5
, arg6
, arg7
, arg8
);
2538 ret
= do_syscall(env
, env
->active_tc
.gpr
[2],
2539 env
->active_tc
.gpr
[4], env
->active_tc
.gpr
[5],
2540 env
->active_tc
.gpr
[6], env
->active_tc
.gpr
[7],
2541 env
->active_tc
.gpr
[8], env
->active_tc
.gpr
[9],
2542 env
->active_tc
.gpr
[10], env
->active_tc
.gpr
[11]);
2544 if (ret
== -TARGET_ERESTARTSYS
) {
2545 env
->active_tc
.PC
-= 4;
2548 if (ret
== -TARGET_QEMU_ESIGRETURN
) {
2549 /* Returning from a successful sigreturn syscall.
2550 Avoid clobbering register state. */
2553 if ((abi_ulong
)ret
>= (abi_ulong
)-1133) {
2554 env
->active_tc
.gpr
[7] = 1; /* error flag */
2557 env
->active_tc
.gpr
[7] = 0; /* error flag */
2559 env
->active_tc
.gpr
[2] = ret
;
2565 info
.si_signo
= TARGET_SIGSEGV
;
2567 /* XXX: check env->error_code */
2568 info
.si_code
= TARGET_SEGV_MAPERR
;
2569 info
._sifields
._sigfault
._addr
= env
->CP0_BadVAddr
;
2570 queue_signal(env
, info
.si_signo
, &info
);
2574 info
.si_signo
= TARGET_SIGILL
;
2577 queue_signal(env
, info
.si_signo
, &info
);
2579 case EXCP_INTERRUPT
:
2580 /* just indicate that signals should be handled asap */
2586 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2589 info
.si_signo
= sig
;
2591 info
.si_code
= TARGET_TRAP_BRKPT
;
2592 queue_signal(env
, info
.si_signo
, &info
);
2597 if (do_store_exclusive(env
)) {
2598 info
.si_signo
= TARGET_SIGSEGV
;
2600 info
.si_code
= TARGET_SEGV_MAPERR
;
2601 info
._sifields
._sigfault
._addr
= env
->active_tc
.PC
;
2602 queue_signal(env
, info
.si_signo
, &info
);
2606 info
.si_signo
= TARGET_SIGILL
;
2608 info
.si_code
= TARGET_ILL_ILLOPC
;
2609 queue_signal(env
, info
.si_signo
, &info
);
2611 /* The code below was inspired by the MIPS Linux kernel trap
2612 * handling code in arch/mips/kernel/traps.c.
2616 abi_ulong trap_instr
;
2619 if (env
->hflags
& MIPS_HFLAG_M16
) {
2620 if (env
->insn_flags
& ASE_MICROMIPS
) {
2621 /* microMIPS mode */
2622 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2627 if ((trap_instr
>> 10) == 0x11) {
2628 /* 16-bit instruction */
2629 code
= trap_instr
& 0xf;
2631 /* 32-bit instruction */
2634 ret
= get_user_u16(instr_lo
,
2635 env
->active_tc
.PC
+ 2);
2639 trap_instr
= (trap_instr
<< 16) | instr_lo
;
2640 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2641 /* Unfortunately, microMIPS also suffers from
2642 the old assembler bug... */
2643 if (code
>= (1 << 10)) {
2649 ret
= get_user_u16(trap_instr
, env
->active_tc
.PC
);
2653 code
= (trap_instr
>> 6) & 0x3f;
2656 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2661 /* As described in the original Linux kernel code, the
2662 * below checks on 'code' are to work around an old
2665 code
= ((trap_instr
>> 6) & ((1 << 20) - 1));
2666 if (code
>= (1 << 10)) {
2671 if (do_break(env
, &info
, code
) != 0) {
2678 abi_ulong trap_instr
;
2679 unsigned int code
= 0;
2681 if (env
->hflags
& MIPS_HFLAG_M16
) {
2682 /* microMIPS mode */
2685 ret
= get_user_u16(instr
[0], env
->active_tc
.PC
) ||
2686 get_user_u16(instr
[1], env
->active_tc
.PC
+ 2);
2688 trap_instr
= (instr
[0] << 16) | instr
[1];
2690 ret
= get_user_u32(trap_instr
, env
->active_tc
.PC
);
2697 /* The immediate versions don't provide a code. */
2698 if (!(trap_instr
& 0xFC000000)) {
2699 if (env
->hflags
& MIPS_HFLAG_M16
) {
2700 /* microMIPS mode */
2701 code
= ((trap_instr
>> 12) & ((1 << 4) - 1));
2703 code
= ((trap_instr
>> 6) & ((1 << 10) - 1));
2707 if (do_break(env
, &info
, code
) != 0) {
2714 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
2717 process_pending_signals(env
);
2722 #ifdef TARGET_OPENRISC
2724 void cpu_loop(CPUOpenRISCState
*env
)
2726 CPUState
*cs
= CPU(openrisc_env_get_cpu(env
));
2732 trapnr
= cpu_openrisc_exec(cs
);
2738 qemu_log_mask(CPU_LOG_INT
, "\nReset request, exit, pc is %#x\n", env
->pc
);
2742 qemu_log_mask(CPU_LOG_INT
, "\nBus error, exit, pc is %#x\n", env
->pc
);
2743 gdbsig
= TARGET_SIGBUS
;
2747 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2748 gdbsig
= TARGET_SIGSEGV
;
2751 qemu_log_mask(CPU_LOG_INT
, "\nTick time interrupt pc is %#x\n", env
->pc
);
2754 qemu_log_mask(CPU_LOG_INT
, "\nAlignment pc is %#x\n", env
->pc
);
2755 gdbsig
= TARGET_SIGBUS
;
2758 qemu_log_mask(CPU_LOG_INT
, "\nIllegal instructionpc is %#x\n", env
->pc
);
2759 gdbsig
= TARGET_SIGILL
;
2762 qemu_log_mask(CPU_LOG_INT
, "\nExternal interruptpc is %#x\n", env
->pc
);
2766 qemu_log_mask(CPU_LOG_INT
, "\nTLB miss\n");
2769 qemu_log_mask(CPU_LOG_INT
, "\nRange\n");
2770 gdbsig
= TARGET_SIGSEGV
;
2773 env
->pc
+= 4; /* 0xc00; */
2774 ret
= do_syscall(env
,
2775 env
->gpr
[11], /* return value */
2776 env
->gpr
[3], /* r3 - r7 are params */
2782 if (ret
== -TARGET_ERESTARTSYS
) {
2784 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2789 qemu_log_mask(CPU_LOG_INT
, "\nFloating point error\n");
2792 qemu_log_mask(CPU_LOG_INT
, "\nTrap\n");
2793 gdbsig
= TARGET_SIGTRAP
;
2796 qemu_log_mask(CPU_LOG_INT
, "\nNR\n");
2799 EXCP_DUMP(env
, "\nqemu: unhandled CPU exception %#x - aborting\n",
2801 gdbsig
= TARGET_SIGILL
;
2805 gdb_handlesig(cs
, gdbsig
);
2806 if (gdbsig
!= TARGET_SIGTRAP
) {
2811 process_pending_signals(env
);
2815 #endif /* TARGET_OPENRISC */
2818 void cpu_loop(CPUSH4State
*env
)
2820 CPUState
*cs
= CPU(sh_env_get_cpu(env
));
2822 target_siginfo_t info
;
2826 trapnr
= cpu_sh4_exec(cs
);
2832 ret
= do_syscall(env
,
2841 if (ret
== -TARGET_ERESTARTSYS
) {
2843 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2844 env
->gregs
[0] = ret
;
2847 case EXCP_INTERRUPT
:
2848 /* just indicate that signals should be handled asap */
2854 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2857 info
.si_signo
= sig
;
2859 info
.si_code
= TARGET_TRAP_BRKPT
;
2860 queue_signal(env
, info
.si_signo
, &info
);
2866 info
.si_signo
= TARGET_SIGSEGV
;
2868 info
.si_code
= TARGET_SEGV_MAPERR
;
2869 info
._sifields
._sigfault
._addr
= env
->tea
;
2870 queue_signal(env
, info
.si_signo
, &info
);
2874 printf ("Unhandled trap: 0x%x\n", trapnr
);
2875 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2878 process_pending_signals (env
);
2884 void cpu_loop(CPUCRISState
*env
)
2886 CPUState
*cs
= CPU(cris_env_get_cpu(env
));
2888 target_siginfo_t info
;
2892 trapnr
= cpu_cris_exec(cs
);
2897 info
.si_signo
= TARGET_SIGSEGV
;
2899 /* XXX: check env->error_code */
2900 info
.si_code
= TARGET_SEGV_MAPERR
;
2901 info
._sifields
._sigfault
._addr
= env
->pregs
[PR_EDA
];
2902 queue_signal(env
, info
.si_signo
, &info
);
2905 case EXCP_INTERRUPT
:
2906 /* just indicate that signals should be handled asap */
2909 ret
= do_syscall(env
,
2918 if (ret
== -TARGET_ERESTARTSYS
) {
2920 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2921 env
->regs
[10] = ret
;
2928 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
2931 info
.si_signo
= sig
;
2933 info
.si_code
= TARGET_TRAP_BRKPT
;
2934 queue_signal(env
, info
.si_signo
, &info
);
2939 printf ("Unhandled trap: 0x%x\n", trapnr
);
2940 cpu_dump_state(cs
, stderr
, fprintf
, 0);
2943 process_pending_signals (env
);
2948 #ifdef TARGET_MICROBLAZE
2949 void cpu_loop(CPUMBState
*env
)
2951 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
2953 target_siginfo_t info
;
2957 trapnr
= cpu_mb_exec(cs
);
2962 info
.si_signo
= TARGET_SIGSEGV
;
2964 /* XXX: check env->error_code */
2965 info
.si_code
= TARGET_SEGV_MAPERR
;
2966 info
._sifields
._sigfault
._addr
= 0;
2967 queue_signal(env
, info
.si_signo
, &info
);
2970 case EXCP_INTERRUPT
:
2971 /* just indicate that signals should be handled asap */
2974 /* Return address is 4 bytes after the call. */
2976 env
->sregs
[SR_PC
] = env
->regs
[14];
2977 ret
= do_syscall(env
,
2986 if (ret
== -TARGET_ERESTARTSYS
) {
2987 /* Wind back to before the syscall. */
2988 env
->sregs
[SR_PC
] -= 4;
2989 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
2992 /* All syscall exits result in guest r14 being equal to the
2993 * PC we return to, because the kernel syscall exit "rtbd" does
2994 * this. (This is true even for sigreturn(); note that r14 is
2995 * not a userspace-usable register, as the kernel may clobber it
2998 env
->regs
[14] = env
->sregs
[SR_PC
];
3001 env
->regs
[17] = env
->sregs
[SR_PC
] + 4;
3002 if (env
->iflags
& D_FLAG
) {
3003 env
->sregs
[SR_ESR
] |= 1 << 12;
3004 env
->sregs
[SR_PC
] -= 4;
3005 /* FIXME: if branch was immed, replay the imm as well. */
3008 env
->iflags
&= ~(IMM_FLAG
| D_FLAG
);
3010 switch (env
->sregs
[SR_ESR
] & 31) {
3011 case ESR_EC_DIVZERO
:
3012 info
.si_signo
= TARGET_SIGFPE
;
3014 info
.si_code
= TARGET_FPE_FLTDIV
;
3015 info
._sifields
._sigfault
._addr
= 0;
3016 queue_signal(env
, info
.si_signo
, &info
);
3019 info
.si_signo
= TARGET_SIGFPE
;
3021 if (env
->sregs
[SR_FSR
] & FSR_IO
) {
3022 info
.si_code
= TARGET_FPE_FLTINV
;
3024 if (env
->sregs
[SR_FSR
] & FSR_DZ
) {
3025 info
.si_code
= TARGET_FPE_FLTDIV
;
3027 info
._sifields
._sigfault
._addr
= 0;
3028 queue_signal(env
, info
.si_signo
, &info
);
3031 printf ("Unhandled hw-exception: 0x%x\n",
3032 env
->sregs
[SR_ESR
] & ESR_EC_MASK
);
3033 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3042 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3045 info
.si_signo
= sig
;
3047 info
.si_code
= TARGET_TRAP_BRKPT
;
3048 queue_signal(env
, info
.si_signo
, &info
);
3053 printf ("Unhandled trap: 0x%x\n", trapnr
);
3054 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3057 process_pending_signals (env
);
3064 void cpu_loop(CPUM68KState
*env
)
3066 CPUState
*cs
= CPU(m68k_env_get_cpu(env
));
3069 target_siginfo_t info
;
3070 TaskState
*ts
= cs
->opaque
;
3074 trapnr
= cpu_m68k_exec(cs
);
3079 if (ts
->sim_syscalls
) {
3081 get_user_u16(nr
, env
->pc
+ 2);
3083 do_m68k_simcall(env
, nr
);
3089 case EXCP_HALT_INSN
:
3090 /* Semihosing syscall. */
3092 do_m68k_semihosting(env
, env
->dregs
[0]);
3096 case EXCP_UNSUPPORTED
:
3098 info
.si_signo
= TARGET_SIGILL
;
3100 info
.si_code
= TARGET_ILL_ILLOPN
;
3101 info
._sifields
._sigfault
._addr
= env
->pc
;
3102 queue_signal(env
, info
.si_signo
, &info
);
3107 ts
->sim_syscalls
= 0;
3110 ret
= do_syscall(env
,
3119 if (ret
== -TARGET_ERESTARTSYS
) {
3121 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3122 env
->dregs
[0] = ret
;
3126 case EXCP_INTERRUPT
:
3127 /* just indicate that signals should be handled asap */
3131 info
.si_signo
= TARGET_SIGSEGV
;
3133 /* XXX: check env->error_code */
3134 info
.si_code
= TARGET_SEGV_MAPERR
;
3135 info
._sifields
._sigfault
._addr
= env
->mmu
.ar
;
3136 queue_signal(env
, info
.si_signo
, &info
);
3143 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3146 info
.si_signo
= sig
;
3148 info
.si_code
= TARGET_TRAP_BRKPT
;
3149 queue_signal(env
, info
.si_signo
, &info
);
3154 EXCP_DUMP(env
, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr
);
3157 process_pending_signals(env
);
3160 #endif /* TARGET_M68K */
3163 static void do_store_exclusive(CPUAlphaState
*env
, int reg
, int quad
)
3165 target_ulong addr
, val
, tmp
;
3166 target_siginfo_t info
;
3169 addr
= env
->lock_addr
;
3170 tmp
= env
->lock_st_addr
;
3171 env
->lock_addr
= -1;
3172 env
->lock_st_addr
= 0;
3178 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3182 if (val
== env
->lock_value
) {
3184 if (quad
? put_user_u64(tmp
, addr
) : put_user_u32(tmp
, addr
)) {
3201 info
.si_signo
= TARGET_SIGSEGV
;
3203 info
.si_code
= TARGET_SEGV_MAPERR
;
3204 info
._sifields
._sigfault
._addr
= addr
;
3205 queue_signal(env
, TARGET_SIGSEGV
, &info
);
3208 void cpu_loop(CPUAlphaState
*env
)
3210 CPUState
*cs
= CPU(alpha_env_get_cpu(env
));
3212 target_siginfo_t info
;
3217 trapnr
= cpu_alpha_exec(cs
);
3220 /* All of the traps imply a transition through PALcode, which
3221 implies an REI instruction has been executed. Which means
3222 that the intr_flag should be cleared. */
3227 fprintf(stderr
, "Reset requested. Exit\n");
3231 fprintf(stderr
, "Machine check exception. Exit\n");
3234 case EXCP_SMP_INTERRUPT
:
3235 case EXCP_CLK_INTERRUPT
:
3236 case EXCP_DEV_INTERRUPT
:
3237 fprintf(stderr
, "External interrupt. Exit\n");
3241 env
->lock_addr
= -1;
3242 info
.si_signo
= TARGET_SIGSEGV
;
3244 info
.si_code
= (page_get_flags(env
->trap_arg0
) & PAGE_VALID
3245 ? TARGET_SEGV_ACCERR
: TARGET_SEGV_MAPERR
);
3246 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3247 queue_signal(env
, info
.si_signo
, &info
);
3250 env
->lock_addr
= -1;
3251 info
.si_signo
= TARGET_SIGBUS
;
3253 info
.si_code
= TARGET_BUS_ADRALN
;
3254 info
._sifields
._sigfault
._addr
= env
->trap_arg0
;
3255 queue_signal(env
, info
.si_signo
, &info
);
3259 env
->lock_addr
= -1;
3260 info
.si_signo
= TARGET_SIGILL
;
3262 info
.si_code
= TARGET_ILL_ILLOPC
;
3263 info
._sifields
._sigfault
._addr
= env
->pc
;
3264 queue_signal(env
, info
.si_signo
, &info
);
3267 env
->lock_addr
= -1;
3268 info
.si_signo
= TARGET_SIGFPE
;
3270 info
.si_code
= TARGET_FPE_FLTINV
;
3271 info
._sifields
._sigfault
._addr
= env
->pc
;
3272 queue_signal(env
, info
.si_signo
, &info
);
3275 /* No-op. Linux simply re-enables the FPU. */
3278 env
->lock_addr
= -1;
3279 switch (env
->error_code
) {
3282 info
.si_signo
= TARGET_SIGTRAP
;
3284 info
.si_code
= TARGET_TRAP_BRKPT
;
3285 info
._sifields
._sigfault
._addr
= env
->pc
;
3286 queue_signal(env
, info
.si_signo
, &info
);
3290 info
.si_signo
= TARGET_SIGTRAP
;
3293 info
._sifields
._sigfault
._addr
= env
->pc
;
3294 queue_signal(env
, info
.si_signo
, &info
);
3298 trapnr
= env
->ir
[IR_V0
];
3299 sysret
= do_syscall(env
, trapnr
,
3300 env
->ir
[IR_A0
], env
->ir
[IR_A1
],
3301 env
->ir
[IR_A2
], env
->ir
[IR_A3
],
3302 env
->ir
[IR_A4
], env
->ir
[IR_A5
],
3304 if (sysret
== -TARGET_ERESTARTSYS
) {
3308 if (sysret
== -TARGET_QEMU_ESIGRETURN
) {
3311 /* Syscall writes 0 to V0 to bypass error check, similar
3312 to how this is handled internal to Linux kernel.
3313 (Ab)use trapnr temporarily as boolean indicating error. */
3314 trapnr
= (env
->ir
[IR_V0
] != 0 && sysret
< 0);
3315 env
->ir
[IR_V0
] = (trapnr
? -sysret
: sysret
);
3316 env
->ir
[IR_A3
] = trapnr
;
3320 /* ??? We can probably elide the code using page_unprotect
3321 that is checking for self-modifying code. Instead we
3322 could simply call tb_flush here. Until we work out the
3323 changes required to turn off the extra write protection,
3324 this can be a no-op. */
3328 /* Handled in the translator for usermode. */
3332 /* Handled in the translator for usermode. */
3336 info
.si_signo
= TARGET_SIGFPE
;
3337 switch (env
->ir
[IR_A0
]) {
3338 case TARGET_GEN_INTOVF
:
3339 info
.si_code
= TARGET_FPE_INTOVF
;
3341 case TARGET_GEN_INTDIV
:
3342 info
.si_code
= TARGET_FPE_INTDIV
;
3344 case TARGET_GEN_FLTOVF
:
3345 info
.si_code
= TARGET_FPE_FLTOVF
;
3347 case TARGET_GEN_FLTUND
:
3348 info
.si_code
= TARGET_FPE_FLTUND
;
3350 case TARGET_GEN_FLTINV
:
3351 info
.si_code
= TARGET_FPE_FLTINV
;
3353 case TARGET_GEN_FLTINE
:
3354 info
.si_code
= TARGET_FPE_FLTRES
;
3356 case TARGET_GEN_ROPRAND
:
3360 info
.si_signo
= TARGET_SIGTRAP
;
3365 info
._sifields
._sigfault
._addr
= env
->pc
;
3366 queue_signal(env
, info
.si_signo
, &info
);
3373 info
.si_signo
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3374 if (info
.si_signo
) {
3375 env
->lock_addr
= -1;
3377 info
.si_code
= TARGET_TRAP_BRKPT
;
3378 queue_signal(env
, info
.si_signo
, &info
);
3383 do_store_exclusive(env
, env
->error_code
, trapnr
- EXCP_STL_C
);
3385 case EXCP_INTERRUPT
:
3386 /* Just indicate that signals should be handled asap. */
3389 printf ("Unhandled trap: 0x%x\n", trapnr
);
3390 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3393 process_pending_signals (env
);
3396 #endif /* TARGET_ALPHA */
3399 void cpu_loop(CPUS390XState
*env
)
3401 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
3403 target_siginfo_t info
;
3409 trapnr
= cpu_s390x_exec(cs
);
3412 case EXCP_INTERRUPT
:
3413 /* Just indicate that signals should be handled asap. */
3417 n
= env
->int_svc_code
;
3419 /* syscalls > 255 */
3422 env
->psw
.addr
+= env
->int_svc_ilen
;
3423 ret
= do_syscall(env
, n
, env
->regs
[2], env
->regs
[3],
3424 env
->regs
[4], env
->regs
[5],
3425 env
->regs
[6], env
->regs
[7], 0, 0);
3426 if (ret
== -TARGET_ERESTARTSYS
) {
3427 env
->psw
.addr
-= env
->int_svc_ilen
;
3428 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3434 sig
= gdb_handlesig(cs
, TARGET_SIGTRAP
);
3436 n
= TARGET_TRAP_BRKPT
;
3441 n
= env
->int_pgm_code
;
3444 case PGM_PRIVILEGED
:
3445 sig
= TARGET_SIGILL
;
3446 n
= TARGET_ILL_ILLOPC
;
3448 case PGM_PROTECTION
:
3449 case PGM_ADDRESSING
:
3450 sig
= TARGET_SIGSEGV
;
3451 /* XXX: check env->error_code */
3452 n
= TARGET_SEGV_MAPERR
;
3453 addr
= env
->__excp_addr
;
3456 case PGM_SPECIFICATION
:
3457 case PGM_SPECIAL_OP
:
3460 sig
= TARGET_SIGILL
;
3461 n
= TARGET_ILL_ILLOPN
;
3464 case PGM_FIXPT_OVERFLOW
:
3465 sig
= TARGET_SIGFPE
;
3466 n
= TARGET_FPE_INTOVF
;
3468 case PGM_FIXPT_DIVIDE
:
3469 sig
= TARGET_SIGFPE
;
3470 n
= TARGET_FPE_INTDIV
;
3474 n
= (env
->fpc
>> 8) & 0xff;
3476 /* compare-and-trap */
3479 /* An IEEE exception, simulated or otherwise. */
3481 n
= TARGET_FPE_FLTINV
;
3482 } else if (n
& 0x40) {
3483 n
= TARGET_FPE_FLTDIV
;
3484 } else if (n
& 0x20) {
3485 n
= TARGET_FPE_FLTOVF
;
3486 } else if (n
& 0x10) {
3487 n
= TARGET_FPE_FLTUND
;
3488 } else if (n
& 0x08) {
3489 n
= TARGET_FPE_FLTRES
;
3491 /* ??? Quantum exception; BFP, DFP error. */
3494 sig
= TARGET_SIGFPE
;
3499 fprintf(stderr
, "Unhandled program exception: %#x\n", n
);
3500 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3506 addr
= env
->psw
.addr
;
3508 info
.si_signo
= sig
;
3511 info
._sifields
._sigfault
._addr
= addr
;
3512 queue_signal(env
, info
.si_signo
, &info
);
3516 fprintf(stderr
, "Unhandled trap: 0x%x\n", trapnr
);
3517 cpu_dump_state(cs
, stderr
, fprintf
, 0);
3520 process_pending_signals (env
);
3524 #endif /* TARGET_S390X */
3526 #ifdef TARGET_TILEGX
3528 static void gen_sigill_reg(CPUTLGState
*env
)
3530 target_siginfo_t info
;
3532 info
.si_signo
= TARGET_SIGILL
;
3534 info
.si_code
= TARGET_ILL_PRVREG
;
3535 info
._sifields
._sigfault
._addr
= env
->pc
;
3536 queue_signal(env
, info
.si_signo
, &info
);
3539 static void do_signal(CPUTLGState
*env
, int signo
, int sigcode
)
3541 target_siginfo_t info
;
3543 info
.si_signo
= signo
;
3545 info
._sifields
._sigfault
._addr
= env
->pc
;
3547 if (signo
== TARGET_SIGSEGV
) {
3548 /* The passed in sigcode is a dummy; check for a page mapping
3549 and pass either MAPERR or ACCERR. */
3550 target_ulong addr
= env
->excaddr
;
3551 info
._sifields
._sigfault
._addr
= addr
;
3552 if (page_check_range(addr
, 1, PAGE_VALID
) < 0) {
3553 sigcode
= TARGET_SEGV_MAPERR
;
3555 sigcode
= TARGET_SEGV_ACCERR
;
3558 info
.si_code
= sigcode
;
3560 queue_signal(env
, info
.si_signo
, &info
);
3563 static void gen_sigsegv_maperr(CPUTLGState
*env
, target_ulong addr
)
3565 env
->excaddr
= addr
;
3566 do_signal(env
, TARGET_SIGSEGV
, 0);
3569 static void set_regval(CPUTLGState
*env
, uint8_t reg
, uint64_t val
)
3571 if (unlikely(reg
>= TILEGX_R_COUNT
)) {
3582 gen_sigill_reg(env
);
3585 g_assert_not_reached();
3588 env
->regs
[reg
] = val
;
3592 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3593 * memory at the address held in the first source register. If the values are
3594 * not equal, then no memory operation is performed. If the values are equal,
3595 * the 8-byte quantity from the second source register is written into memory
3596 * at the address held in the first source register. In either case, the result
3597 * of the instruction is the value read from memory. The compare and write to
3598 * memory are atomic and thus can be used for synchronization purposes. This
3599 * instruction only operates for addresses aligned to a 8-byte boundary.
3600 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3602 * Functional Description (64-bit)
3603 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3604 * rf[Dest] = memVal;
3605 * if (memVal == SPR[CmpValueSPR])
3606 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3608 * Functional Description (32-bit)
3609 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3610 * rf[Dest] = memVal;
3611 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3612 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3615 * This function also processes exch and exch4 which need not process SPR.
3617 static void do_exch(CPUTLGState
*env
, bool quad
, bool cmp
)
3620 target_long val
, sprval
;
3624 addr
= env
->atomic_srca
;
3625 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3626 goto sigsegv_maperr
;
3631 sprval
= env
->spregs
[TILEGX_SPR_CMPEXCH
];
3633 sprval
= sextract64(env
->spregs
[TILEGX_SPR_CMPEXCH
], 0, 32);
3637 if (!cmp
|| val
== sprval
) {
3638 target_long valb
= env
->atomic_srcb
;
3639 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3640 goto sigsegv_maperr
;
3644 set_regval(env
, env
->atomic_dstr
, val
);
3650 gen_sigsegv_maperr(env
, addr
);
3653 static void do_fetch(CPUTLGState
*env
, int trapnr
, bool quad
)
3657 target_long val
, valb
;
3661 addr
= env
->atomic_srca
;
3662 valb
= env
->atomic_srcb
;
3663 if (quad
? get_user_s64(val
, addr
) : get_user_s32(val
, addr
)) {
3664 goto sigsegv_maperr
;
3668 case TILEGX_EXCP_OPCODE_FETCHADD
:
3669 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3672 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3678 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3680 if ((int32_t)valb
< 0) {
3684 case TILEGX_EXCP_OPCODE_FETCHAND
:
3685 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3688 case TILEGX_EXCP_OPCODE_FETCHOR
:
3689 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3693 g_assert_not_reached();
3697 if (quad
? put_user_u64(valb
, addr
) : put_user_u32(valb
, addr
)) {
3698 goto sigsegv_maperr
;
3702 set_regval(env
, env
->atomic_dstr
, val
);
3708 gen_sigsegv_maperr(env
, addr
);
3711 void cpu_loop(CPUTLGState
*env
)
3713 CPUState
*cs
= CPU(tilegx_env_get_cpu(env
));
3718 trapnr
= cpu_tilegx_exec(cs
);
3721 case TILEGX_EXCP_SYSCALL
:
3723 abi_ulong ret
= do_syscall(env
, env
->regs
[TILEGX_R_NR
],
3724 env
->regs
[0], env
->regs
[1],
3725 env
->regs
[2], env
->regs
[3],
3726 env
->regs
[4], env
->regs
[5],
3727 env
->regs
[6], env
->regs
[7]);
3728 if (ret
== -TARGET_ERESTARTSYS
) {
3730 } else if (ret
!= -TARGET_QEMU_ESIGRETURN
) {
3731 env
->regs
[TILEGX_R_RE
] = ret
;
3732 env
->regs
[TILEGX_R_ERR
] = TILEGX_IS_ERRNO(ret
) ? -ret
: 0;
3736 case TILEGX_EXCP_OPCODE_EXCH
:
3737 do_exch(env
, true, false);
3739 case TILEGX_EXCP_OPCODE_EXCH4
:
3740 do_exch(env
, false, false);
3742 case TILEGX_EXCP_OPCODE_CMPEXCH
:
3743 do_exch(env
, true, true);
3745 case TILEGX_EXCP_OPCODE_CMPEXCH4
:
3746 do_exch(env
, false, true);
3748 case TILEGX_EXCP_OPCODE_FETCHADD
:
3749 case TILEGX_EXCP_OPCODE_FETCHADDGEZ
:
3750 case TILEGX_EXCP_OPCODE_FETCHAND
:
3751 case TILEGX_EXCP_OPCODE_FETCHOR
:
3752 do_fetch(env
, trapnr
, true);
3754 case TILEGX_EXCP_OPCODE_FETCHADD4
:
3755 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4
:
3756 case TILEGX_EXCP_OPCODE_FETCHAND4
:
3757 case TILEGX_EXCP_OPCODE_FETCHOR4
:
3758 do_fetch(env
, trapnr
, false);
3760 case TILEGX_EXCP_SIGNAL
:
3761 do_signal(env
, env
->signo
, env
->sigcode
);
3763 case TILEGX_EXCP_REG_IDN_ACCESS
:
3764 case TILEGX_EXCP_REG_UDN_ACCESS
:
3765 gen_sigill_reg(env
);
3768 fprintf(stderr
, "trapnr is %d[0x%x].\n", trapnr
, trapnr
);
3769 g_assert_not_reached();
3771 process_pending_signals(env
);
3777 THREAD CPUState
*thread_cpu
;
3779 void task_settid(TaskState
*ts
)
3781 if (ts
->ts_tid
== 0) {
3782 ts
->ts_tid
= (pid_t
)syscall(SYS_gettid
);
3786 void stop_all_tasks(void)
3789 * We trust that when using NPTL, start_exclusive()
3790 * handles thread stopping correctly.
3795 /* Assumes contents are already zeroed. */
3796 void init_task_state(TaskState
*ts
)
3801 CPUArchState
*cpu_copy(CPUArchState
*env
)
3803 CPUState
*cpu
= ENV_GET_CPU(env
);
3804 CPUState
*new_cpu
= cpu_init(cpu_model
);
3805 CPUArchState
*new_env
= new_cpu
->env_ptr
;
3809 /* Reset non arch specific state */
3812 memcpy(new_env
, env
, sizeof(CPUArchState
));
3814 /* Clone all break/watchpoints.
3815 Note: Once we support ptrace with hw-debug register access, make sure
3816 BP_CPU break/watchpoints are handled correctly on clone. */
3817 QTAILQ_INIT(&new_cpu
->breakpoints
);
3818 QTAILQ_INIT(&new_cpu
->watchpoints
);
3819 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
3820 cpu_breakpoint_insert(new_cpu
, bp
->pc
, bp
->flags
, NULL
);
3822 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
3823 cpu_watchpoint_insert(new_cpu
, wp
->vaddr
, wp
->len
, wp
->flags
, NULL
);
3829 static void handle_arg_help(const char *arg
)
3831 usage(EXIT_SUCCESS
);
3834 static void handle_arg_log(const char *arg
)
3838 mask
= qemu_str_to_log_mask(arg
);
3840 qemu_print_log_usage(stdout
);
3843 qemu_log_needs_buffers();
3847 static void handle_arg_log_filename(const char *arg
)
3849 qemu_set_log_filename(arg
, &error_fatal
);
3852 static void handle_arg_set_env(const char *arg
)
3854 char *r
, *p
, *token
;
3855 r
= p
= strdup(arg
);
3856 while ((token
= strsep(&p
, ",")) != NULL
) {
3857 if (envlist_setenv(envlist
, token
) != 0) {
3858 usage(EXIT_FAILURE
);
3864 static void handle_arg_unset_env(const char *arg
)
3866 char *r
, *p
, *token
;
3867 r
= p
= strdup(arg
);
3868 while ((token
= strsep(&p
, ",")) != NULL
) {
3869 if (envlist_unsetenv(envlist
, token
) != 0) {
3870 usage(EXIT_FAILURE
);
3876 static void handle_arg_argv0(const char *arg
)
3878 argv0
= strdup(arg
);
3881 static void handle_arg_stack_size(const char *arg
)
3884 guest_stack_size
= strtoul(arg
, &p
, 0);
3885 if (guest_stack_size
== 0) {
3886 usage(EXIT_FAILURE
);
3890 guest_stack_size
*= 1024 * 1024;
3891 } else if (*p
== 'k' || *p
== 'K') {
3892 guest_stack_size
*= 1024;
3896 static void handle_arg_ld_prefix(const char *arg
)
3898 interp_prefix
= strdup(arg
);
3901 static void handle_arg_pagesize(const char *arg
)
3903 qemu_host_page_size
= atoi(arg
);
3904 if (qemu_host_page_size
== 0 ||
3905 (qemu_host_page_size
& (qemu_host_page_size
- 1)) != 0) {
3906 fprintf(stderr
, "page size must be a power of two\n");
3911 static void handle_arg_randseed(const char *arg
)
3913 unsigned long long seed
;
3915 if (parse_uint_full(arg
, &seed
, 0) != 0 || seed
> UINT_MAX
) {
3916 fprintf(stderr
, "Invalid seed number: %s\n", arg
);
3922 static void handle_arg_gdb(const char *arg
)
3924 gdbstub_port
= atoi(arg
);
3927 static void handle_arg_uname(const char *arg
)
3929 qemu_uname_release
= strdup(arg
);
3932 static void handle_arg_cpu(const char *arg
)
3934 cpu_model
= strdup(arg
);
3935 if (cpu_model
== NULL
|| is_help_option(cpu_model
)) {
3936 /* XXX: implement xxx_cpu_list for targets that still miss it */
3937 #if defined(cpu_list)
3938 cpu_list(stdout
, &fprintf
);
3944 static void handle_arg_guest_base(const char *arg
)
3946 guest_base
= strtol(arg
, NULL
, 0);
3947 have_guest_base
= 1;
3950 static void handle_arg_reserved_va(const char *arg
)
3954 reserved_va
= strtoul(arg
, &p
, 0);
3968 unsigned long unshifted
= reserved_va
;
3970 reserved_va
<<= shift
;
3971 if (((reserved_va
>> shift
) != unshifted
)
3972 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3973 || (reserved_va
> (1ul << TARGET_VIRT_ADDR_SPACE_BITS
))
3976 fprintf(stderr
, "Reserved virtual address too big\n");
3981 fprintf(stderr
, "Unrecognised -R size suffix '%s'\n", p
);
3986 static void handle_arg_singlestep(const char *arg
)
3991 static void handle_arg_strace(const char *arg
)
3996 static void handle_arg_version(const char *arg
)
3998 printf("qemu-" TARGET_NAME
" version " QEMU_VERSION QEMU_PKGVERSION
3999 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
4003 struct qemu_argument
{
4007 void (*handle_opt
)(const char *arg
);
4008 const char *example
;
4012 static const struct qemu_argument arg_table
[] = {
4013 {"h", "", false, handle_arg_help
,
4014 "", "print this help"},
4015 {"help", "", false, handle_arg_help
,
4017 {"g", "QEMU_GDB", true, handle_arg_gdb
,
4018 "port", "wait gdb connection to 'port'"},
4019 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix
,
4020 "path", "set the elf interpreter prefix to 'path'"},
4021 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size
,
4022 "size", "set the stack size to 'size' bytes"},
4023 {"cpu", "QEMU_CPU", true, handle_arg_cpu
,
4024 "model", "select CPU (-cpu help for list)"},
4025 {"E", "QEMU_SET_ENV", true, handle_arg_set_env
,
4026 "var=value", "sets targets environment variable (see below)"},
4027 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env
,
4028 "var", "unsets targets environment variable (see below)"},
4029 {"0", "QEMU_ARGV0", true, handle_arg_argv0
,
4030 "argv0", "forces target process argv[0] to be 'argv0'"},
4031 {"r", "QEMU_UNAME", true, handle_arg_uname
,
4032 "uname", "set qemu uname release string to 'uname'"},
4033 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base
,
4034 "address", "set guest_base address to 'address'"},
4035 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va
,
4036 "size", "reserve 'size' bytes for guest virtual address space"},
4037 {"d", "QEMU_LOG", true, handle_arg_log
,
4038 "item[,...]", "enable logging of specified items "
4039 "(use '-d help' for a list of items)"},
4040 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename
,
4041 "logfile", "write logs to 'logfile' (default stderr)"},
4042 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize
,
4043 "pagesize", "set the host page size to 'pagesize'"},
4044 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep
,
4045 "", "run in singlestep mode"},
4046 {"strace", "QEMU_STRACE", false, handle_arg_strace
,
4047 "", "log system calls"},
4048 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed
,
4049 "", "Seed for pseudo-random number generator"},
4050 {"version", "QEMU_VERSION", false, handle_arg_version
,
4051 "", "display version information and exit"},
4052 {NULL
, NULL
, false, NULL
, NULL
, NULL
}
4055 static void usage(int exitcode
)
4057 const struct qemu_argument
*arginfo
;
4061 printf("usage: qemu-" TARGET_NAME
" [options] program [arguments...]\n"
4062 "Linux CPU emulator (compiled for " TARGET_NAME
" emulation)\n"
4064 "Options and associated environment variables:\n"
4067 /* Calculate column widths. We must always have at least enough space
4068 * for the column header.
4070 maxarglen
= strlen("Argument");
4071 maxenvlen
= strlen("Env-variable");
4073 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4074 int arglen
= strlen(arginfo
->argv
);
4075 if (arginfo
->has_arg
) {
4076 arglen
+= strlen(arginfo
->example
) + 1;
4078 if (strlen(arginfo
->env
) > maxenvlen
) {
4079 maxenvlen
= strlen(arginfo
->env
);
4081 if (arglen
> maxarglen
) {
4086 printf("%-*s %-*s Description\n", maxarglen
+1, "Argument",
4087 maxenvlen
, "Env-variable");
4089 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4090 if (arginfo
->has_arg
) {
4091 printf("-%s %-*s %-*s %s\n", arginfo
->argv
,
4092 (int)(maxarglen
- strlen(arginfo
->argv
) - 1),
4093 arginfo
->example
, maxenvlen
, arginfo
->env
, arginfo
->help
);
4095 printf("-%-*s %-*s %s\n", maxarglen
, arginfo
->argv
,
4096 maxenvlen
, arginfo
->env
,
4103 "QEMU_LD_PREFIX = %s\n"
4104 "QEMU_STACK_SIZE = %ld byte\n",
4109 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4110 "QEMU_UNSET_ENV environment variables to set and unset\n"
4111 "environment variables for the target process.\n"
4112 "It is possible to provide several variables by separating them\n"
4113 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4114 "provide the -E and -U options multiple times.\n"
4115 "The following lines are equivalent:\n"
4116 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4117 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4118 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4119 "Note that if you provide several changes to a single variable\n"
4120 "the last change will stay in effect.\n");
4125 static int parse_args(int argc
, char **argv
)
4129 const struct qemu_argument
*arginfo
;
4131 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4132 if (arginfo
->env
== NULL
) {
4136 r
= getenv(arginfo
->env
);
4138 arginfo
->handle_opt(r
);
4144 if (optind
>= argc
) {
4153 if (!strcmp(r
, "-")) {
4156 /* Treat --foo the same as -foo. */
4161 for (arginfo
= arg_table
; arginfo
->handle_opt
!= NULL
; arginfo
++) {
4162 if (!strcmp(r
, arginfo
->argv
)) {
4163 if (arginfo
->has_arg
) {
4164 if (optind
>= argc
) {
4165 (void) fprintf(stderr
,
4166 "qemu: missing argument for option '%s'\n", r
);
4169 arginfo
->handle_opt(argv
[optind
]);
4172 arginfo
->handle_opt(NULL
);
4178 /* no option matched the current argv */
4179 if (arginfo
->handle_opt
== NULL
) {
4180 (void) fprintf(stderr
, "qemu: unknown option '%s'\n", r
);
4185 if (optind
>= argc
) {
4186 (void) fprintf(stderr
, "qemu: no user program specified\n");
4190 filename
= argv
[optind
];
4191 exec_path
= argv
[optind
];
4196 int main(int argc
, char **argv
, char **envp
)
4198 struct target_pt_regs regs1
, *regs
= ®s1
;
4199 struct image_info info1
, *info
= &info1
;
4200 struct linux_binprm bprm
;
4205 char **target_environ
, **wrk
;
4212 module_call_init(MODULE_INIT_QOM
);
4214 if ((envlist
= envlist_create()) == NULL
) {
4215 (void) fprintf(stderr
, "Unable to allocate envlist\n");
4219 /* add current environment into the list */
4220 for (wrk
= environ
; *wrk
!= NULL
; wrk
++) {
4221 (void) envlist_setenv(envlist
, *wrk
);
4224 /* Read the stack limit from the kernel. If it's "unlimited",
4225 then we can do little else besides use the default. */
4228 if (getrlimit(RLIMIT_STACK
, &lim
) == 0
4229 && lim
.rlim_cur
!= RLIM_INFINITY
4230 && lim
.rlim_cur
== (target_long
)lim
.rlim_cur
) {
4231 guest_stack_size
= lim
.rlim_cur
;
4239 optind
= parse_args(argc
, argv
);
4242 memset(regs
, 0, sizeof(struct target_pt_regs
));
4244 /* Zero out image_info */
4245 memset(info
, 0, sizeof(struct image_info
));
4247 memset(&bprm
, 0, sizeof (bprm
));
4249 /* Scan interp_prefix dir for replacement files. */
4250 init_paths(interp_prefix
);
4252 init_qemu_uname_release();
4254 if (cpu_model
== NULL
) {
4255 #if defined(TARGET_I386)
4256 #ifdef TARGET_X86_64
4257 cpu_model
= "qemu64";
4259 cpu_model
= "qemu32";
4261 #elif defined(TARGET_ARM)
4263 #elif defined(TARGET_UNICORE32)
4265 #elif defined(TARGET_M68K)
4267 #elif defined(TARGET_SPARC)
4268 #ifdef TARGET_SPARC64
4269 cpu_model
= "TI UltraSparc II";
4271 cpu_model
= "Fujitsu MB86904";
4273 #elif defined(TARGET_MIPS)
4274 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4279 #elif defined TARGET_OPENRISC
4280 cpu_model
= "or1200";
4281 #elif defined(TARGET_PPC)
4282 # ifdef TARGET_PPC64
4283 cpu_model
= "POWER8";
4287 #elif defined TARGET_SH4
4288 cpu_model
= TYPE_SH7785_CPU
;
4294 /* NOTE: we need to init the CPU at this stage to get
4295 qemu_host_page_size */
4296 cpu
= cpu_init(cpu_model
);
4298 fprintf(stderr
, "Unable to find CPU definition\n");
4306 if (getenv("QEMU_STRACE")) {
4310 if (getenv("QEMU_RAND_SEED")) {
4311 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4314 target_environ
= envlist_to_environ(envlist
, NULL
);
4315 envlist_free(envlist
);
4318 * Now that page sizes are configured in cpu_init() we can do
4319 * proper page alignment for guest_base.
4321 guest_base
= HOST_PAGE_ALIGN(guest_base
);
4323 if (reserved_va
|| have_guest_base
) {
4324 guest_base
= init_guest_space(guest_base
, reserved_va
, 0,
4326 if (guest_base
== (unsigned long)-1) {
4327 fprintf(stderr
, "Unable to reserve 0x%lx bytes of virtual address "
4328 "space for use as guest address space (check your virtual "
4329 "memory ulimit setting or reserve less using -R option)\n",
4335 mmap_next_start
= reserved_va
;
4340 * Read in mmap_min_addr kernel parameter. This value is used
4341 * When loading the ELF image to determine whether guest_base
4342 * is needed. It is also used in mmap_find_vma.
4347 if ((fp
= fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL
) {
4349 if (fscanf(fp
, "%lu", &tmp
) == 1) {
4350 mmap_min_addr
= tmp
;
4351 qemu_log_mask(CPU_LOG_PAGE
, "host mmap_min_addr=0x%lx\n", mmap_min_addr
);
4358 * Prepare copy of argv vector for target.
4360 target_argc
= argc
- optind
;
4361 target_argv
= calloc(target_argc
+ 1, sizeof (char *));
4362 if (target_argv
== NULL
) {
4363 (void) fprintf(stderr
, "Unable to allocate memory for target_argv\n");
4368 * If argv0 is specified (using '-0' switch) we replace
4369 * argv[0] pointer with the given one.
4372 if (argv0
!= NULL
) {
4373 target_argv
[i
++] = strdup(argv0
);
4375 for (; i
< target_argc
; i
++) {
4376 target_argv
[i
] = strdup(argv
[optind
+ i
]);
4378 target_argv
[target_argc
] = NULL
;
4380 ts
= g_new0(TaskState
, 1);
4381 init_task_state(ts
);
4382 /* build Task State */
4388 execfd
= qemu_getauxval(AT_EXECFD
);
4390 execfd
= open(filename
, O_RDONLY
);
4392 printf("Error while loading %s: %s\n", filename
, strerror(errno
));
4393 _exit(EXIT_FAILURE
);
4397 ret
= loader_exec(execfd
, filename
, target_argv
, target_environ
, regs
,
4400 printf("Error while loading %s: %s\n", filename
, strerror(-ret
));
4401 _exit(EXIT_FAILURE
);
4404 for (wrk
= target_environ
; *wrk
; wrk
++) {
4408 free(target_environ
);
4410 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
4411 qemu_log("guest_base 0x%lx\n", guest_base
);
4414 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx
"\n", info
->start_brk
);
4415 qemu_log("end_code 0x" TARGET_ABI_FMT_lx
"\n", info
->end_code
);
4416 qemu_log("start_code 0x" TARGET_ABI_FMT_lx
"\n",
4418 qemu_log("start_data 0x" TARGET_ABI_FMT_lx
"\n",
4420 qemu_log("end_data 0x" TARGET_ABI_FMT_lx
"\n", info
->end_data
);
4421 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx
"\n",
4423 qemu_log("brk 0x" TARGET_ABI_FMT_lx
"\n", info
->brk
);
4424 qemu_log("entry 0x" TARGET_ABI_FMT_lx
"\n", info
->entry
);
4427 target_set_brk(info
->brk
);
4431 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4432 generating the prologue until now so that the prologue can take
4433 the real value of GUEST_BASE into account. */
4434 tcg_prologue_init(&tcg_ctx
);
4436 #if defined(TARGET_I386)
4437 env
->cr
[0] = CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
;
4438 env
->hflags
|= HF_PE_MASK
| HF_CPL_MASK
;
4439 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4440 env
->cr
[4] |= CR4_OSFXSR_MASK
;
4441 env
->hflags
|= HF_OSFXSR_MASK
;
4443 #ifndef TARGET_ABI32
4444 /* enable 64 bit mode if possible */
4445 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
4446 fprintf(stderr
, "The selected x86 CPU does not support 64 bit mode\n");
4449 env
->cr
[4] |= CR4_PAE_MASK
;
4450 env
->efer
|= MSR_EFER_LMA
| MSR_EFER_LME
;
4451 env
->hflags
|= HF_LMA_MASK
;
4454 /* flags setup : we activate the IRQs by default as in user mode */
4455 env
->eflags
|= IF_MASK
;
4457 /* linux register setup */
4458 #ifndef TARGET_ABI32
4459 env
->regs
[R_EAX
] = regs
->rax
;
4460 env
->regs
[R_EBX
] = regs
->rbx
;
4461 env
->regs
[R_ECX
] = regs
->rcx
;
4462 env
->regs
[R_EDX
] = regs
->rdx
;
4463 env
->regs
[R_ESI
] = regs
->rsi
;
4464 env
->regs
[R_EDI
] = regs
->rdi
;
4465 env
->regs
[R_EBP
] = regs
->rbp
;
4466 env
->regs
[R_ESP
] = regs
->rsp
;
4467 env
->eip
= regs
->rip
;
4469 env
->regs
[R_EAX
] = regs
->eax
;
4470 env
->regs
[R_EBX
] = regs
->ebx
;
4471 env
->regs
[R_ECX
] = regs
->ecx
;
4472 env
->regs
[R_EDX
] = regs
->edx
;
4473 env
->regs
[R_ESI
] = regs
->esi
;
4474 env
->regs
[R_EDI
] = regs
->edi
;
4475 env
->regs
[R_EBP
] = regs
->ebp
;
4476 env
->regs
[R_ESP
] = regs
->esp
;
4477 env
->eip
= regs
->eip
;
4480 /* linux interrupt setup */
4481 #ifndef TARGET_ABI32
4482 env
->idt
.limit
= 511;
4484 env
->idt
.limit
= 255;
4486 env
->idt
.base
= target_mmap(0, sizeof(uint64_t) * (env
->idt
.limit
+ 1),
4487 PROT_READ
|PROT_WRITE
,
4488 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4489 idt_table
= g2h(env
->idt
.base
);
4512 /* linux segment setup */
4514 uint64_t *gdt_table
;
4515 env
->gdt
.base
= target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES
,
4516 PROT_READ
|PROT_WRITE
,
4517 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4518 env
->gdt
.limit
= sizeof(uint64_t) * TARGET_GDT_ENTRIES
- 1;
4519 gdt_table
= g2h(env
->gdt
.base
);
4521 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4522 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4523 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4525 /* 64 bit code segment */
4526 write_dt(&gdt_table
[__USER_CS
>> 3], 0, 0xfffff,
4527 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4529 (3 << DESC_DPL_SHIFT
) | (0xa << DESC_TYPE_SHIFT
));
4531 write_dt(&gdt_table
[__USER_DS
>> 3], 0, 0xfffff,
4532 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
| DESC_S_MASK
|
4533 (3 << DESC_DPL_SHIFT
) | (0x2 << DESC_TYPE_SHIFT
));
4535 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
4536 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
4538 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
4539 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
4540 cpu_x86_load_seg(env
, R_FS
, __USER_DS
);
4541 cpu_x86_load_seg(env
, R_GS
, __USER_DS
);
4542 /* This hack makes Wine work... */
4543 env
->segs
[R_FS
].selector
= 0;
4545 cpu_x86_load_seg(env
, R_DS
, 0);
4546 cpu_x86_load_seg(env
, R_ES
, 0);
4547 cpu_x86_load_seg(env
, R_FS
, 0);
4548 cpu_x86_load_seg(env
, R_GS
, 0);
4550 #elif defined(TARGET_AARCH64)
4554 if (!(arm_feature(env
, ARM_FEATURE_AARCH64
))) {
4556 "The selected ARM CPU does not support 64 bit mode\n");
4560 for (i
= 0; i
< 31; i
++) {
4561 env
->xregs
[i
] = regs
->regs
[i
];
4564 env
->xregs
[31] = regs
->sp
;
4566 #elif defined(TARGET_ARM)
4569 cpsr_write(env
, regs
->uregs
[16], CPSR_USER
| CPSR_EXEC
,
4571 for(i
= 0; i
< 16; i
++) {
4572 env
->regs
[i
] = regs
->uregs
[i
];
4574 #ifdef TARGET_WORDS_BIGENDIAN
4576 if (EF_ARM_EABI_VERSION(info
->elf_flags
) >= EF_ARM_EABI_VER4
4577 && (info
->elf_flags
& EF_ARM_BE8
)) {
4578 env
->uncached_cpsr
|= CPSR_E
;
4579 env
->cp15
.sctlr_el
[1] |= SCTLR_E0E
;
4581 env
->cp15
.sctlr_el
[1] |= SCTLR_B
;
4585 #elif defined(TARGET_UNICORE32)
4588 cpu_asr_write(env
, regs
->uregs
[32], 0xffffffff);
4589 for (i
= 0; i
< 32; i
++) {
4590 env
->regs
[i
] = regs
->uregs
[i
];
4593 #elif defined(TARGET_SPARC)
4597 env
->npc
= regs
->npc
;
4599 for(i
= 0; i
< 8; i
++)
4600 env
->gregs
[i
] = regs
->u_regs
[i
];
4601 for(i
= 0; i
< 8; i
++)
4602 env
->regwptr
[i
] = regs
->u_regs
[i
+ 8];
4604 #elif defined(TARGET_PPC)
4608 #if defined(TARGET_PPC64)
4609 #if defined(TARGET_ABI32)
4610 env
->msr
&= ~((target_ulong
)1 << MSR_SF
);
4612 env
->msr
|= (target_ulong
)1 << MSR_SF
;
4615 env
->nip
= regs
->nip
;
4616 for(i
= 0; i
< 32; i
++) {
4617 env
->gpr
[i
] = regs
->gpr
[i
];
4620 #elif defined(TARGET_M68K)
4623 env
->dregs
[0] = regs
->d0
;
4624 env
->dregs
[1] = regs
->d1
;
4625 env
->dregs
[2] = regs
->d2
;
4626 env
->dregs
[3] = regs
->d3
;
4627 env
->dregs
[4] = regs
->d4
;
4628 env
->dregs
[5] = regs
->d5
;
4629 env
->dregs
[6] = regs
->d6
;
4630 env
->dregs
[7] = regs
->d7
;
4631 env
->aregs
[0] = regs
->a0
;
4632 env
->aregs
[1] = regs
->a1
;
4633 env
->aregs
[2] = regs
->a2
;
4634 env
->aregs
[3] = regs
->a3
;
4635 env
->aregs
[4] = regs
->a4
;
4636 env
->aregs
[5] = regs
->a5
;
4637 env
->aregs
[6] = regs
->a6
;
4638 env
->aregs
[7] = regs
->usp
;
4640 ts
->sim_syscalls
= 1;
4642 #elif defined(TARGET_MICROBLAZE)
4644 env
->regs
[0] = regs
->r0
;
4645 env
->regs
[1] = regs
->r1
;
4646 env
->regs
[2] = regs
->r2
;
4647 env
->regs
[3] = regs
->r3
;
4648 env
->regs
[4] = regs
->r4
;
4649 env
->regs
[5] = regs
->r5
;
4650 env
->regs
[6] = regs
->r6
;
4651 env
->regs
[7] = regs
->r7
;
4652 env
->regs
[8] = regs
->r8
;
4653 env
->regs
[9] = regs
->r9
;
4654 env
->regs
[10] = regs
->r10
;
4655 env
->regs
[11] = regs
->r11
;
4656 env
->regs
[12] = regs
->r12
;
4657 env
->regs
[13] = regs
->r13
;
4658 env
->regs
[14] = regs
->r14
;
4659 env
->regs
[15] = regs
->r15
;
4660 env
->regs
[16] = regs
->r16
;
4661 env
->regs
[17] = regs
->r17
;
4662 env
->regs
[18] = regs
->r18
;
4663 env
->regs
[19] = regs
->r19
;
4664 env
->regs
[20] = regs
->r20
;
4665 env
->regs
[21] = regs
->r21
;
4666 env
->regs
[22] = regs
->r22
;
4667 env
->regs
[23] = regs
->r23
;
4668 env
->regs
[24] = regs
->r24
;
4669 env
->regs
[25] = regs
->r25
;
4670 env
->regs
[26] = regs
->r26
;
4671 env
->regs
[27] = regs
->r27
;
4672 env
->regs
[28] = regs
->r28
;
4673 env
->regs
[29] = regs
->r29
;
4674 env
->regs
[30] = regs
->r30
;
4675 env
->regs
[31] = regs
->r31
;
4676 env
->sregs
[SR_PC
] = regs
->pc
;
4678 #elif defined(TARGET_MIPS)
4682 for(i
= 0; i
< 32; i
++) {
4683 env
->active_tc
.gpr
[i
] = regs
->regs
[i
];
4685 env
->active_tc
.PC
= regs
->cp0_epc
& ~(target_ulong
)1;
4686 if (regs
->cp0_epc
& 1) {
4687 env
->hflags
|= MIPS_HFLAG_M16
;
4690 #elif defined(TARGET_OPENRISC)
4694 for (i
= 0; i
< 32; i
++) {
4695 env
->gpr
[i
] = regs
->gpr
[i
];
4701 #elif defined(TARGET_SH4)
4705 for(i
= 0; i
< 16; i
++) {
4706 env
->gregs
[i
] = regs
->regs
[i
];
4710 #elif defined(TARGET_ALPHA)
4714 for(i
= 0; i
< 28; i
++) {
4715 env
->ir
[i
] = ((abi_ulong
*)regs
)[i
];
4717 env
->ir
[IR_SP
] = regs
->usp
;
4720 #elif defined(TARGET_CRIS)
4722 env
->regs
[0] = regs
->r0
;
4723 env
->regs
[1] = regs
->r1
;
4724 env
->regs
[2] = regs
->r2
;
4725 env
->regs
[3] = regs
->r3
;
4726 env
->regs
[4] = regs
->r4
;
4727 env
->regs
[5] = regs
->r5
;
4728 env
->regs
[6] = regs
->r6
;
4729 env
->regs
[7] = regs
->r7
;
4730 env
->regs
[8] = regs
->r8
;
4731 env
->regs
[9] = regs
->r9
;
4732 env
->regs
[10] = regs
->r10
;
4733 env
->regs
[11] = regs
->r11
;
4734 env
->regs
[12] = regs
->r12
;
4735 env
->regs
[13] = regs
->r13
;
4736 env
->regs
[14] = info
->start_stack
;
4737 env
->regs
[15] = regs
->acr
;
4738 env
->pc
= regs
->erp
;
4740 #elif defined(TARGET_S390X)
4743 for (i
= 0; i
< 16; i
++) {
4744 env
->regs
[i
] = regs
->gprs
[i
];
4746 env
->psw
.mask
= regs
->psw
.mask
;
4747 env
->psw
.addr
= regs
->psw
.addr
;
4749 #elif defined(TARGET_TILEGX)
4752 for (i
= 0; i
< TILEGX_R_COUNT
; i
++) {
4753 env
->regs
[i
] = regs
->regs
[i
];
4755 for (i
= 0; i
< TILEGX_SPR_COUNT
; i
++) {
4761 #error unsupported target CPU
4764 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4765 ts
->stack_base
= info
->start_stack
;
4766 ts
->heap_base
= info
->brk
;
4767 /* This will be filled in on the first SYS_HEAPINFO call. */
4772 if (gdbserver_start(gdbstub_port
) < 0) {
4773 fprintf(stderr
, "qemu: could not open gdbserver on port %d\n",
4777 gdb_handlesig(cpu
, 0);