12 #endif /* DEBUG_REMAP */
14 #include "qemu-types.h"
17 #include "syscall_defs.h"
19 #include "target_signal.h"
21 #include "qemu-queue.h"
23 #if defined(CONFIG_USE_NPTL)
24 #define THREAD __thread
29 /* This struct is used to hold certain information about the image.
30 * Basically, it replicates in user space what would be certain
31 * task_struct fields in the kernel
45 abi_ulong start_stack
;
46 abi_ulong stack_limit
;
48 abi_ulong code_offset
;
49 abi_ulong data_offset
;
56 #ifdef CONFIG_USE_FDPIC
57 abi_ulong loadmap_addr
;
60 abi_ulong pt_dynamic_addr
;
61 struct image_info
*other_info
;
66 /* Information about the current linux thread */
67 struct vm86_saved_state
{
68 uint32_t eax
; /* return code */
78 uint16_t cs
, ss
, ds
, es
, fs
, gs
;
84 #include "nwfpe/fpa11.h"
87 #define MAX_SIGQUEUE_SIZE 1024
90 struct sigqueue
*next
;
91 target_siginfo_t info
;
94 struct emulated_sigtable
{
95 int pending
; /* true if signal is pending */
96 struct sigqueue
*first
;
97 struct sigqueue info
; /* in order to always have memory for the
98 first signal, we put it here */
101 /* NOTE: we force a big alignment so that the stack stored after is
103 typedef struct TaskState
{
104 pid_t ts_tid
; /* tid (or pid) of this task */
110 #ifdef TARGET_UNICORE32
113 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
114 abi_ulong target_v86
;
115 struct vm86_saved_state vm86_saved_regs
;
116 struct target_vm86plus_struct vm86plus
;
120 #ifdef CONFIG_USE_NPTL
121 abi_ulong child_tidptr
;
126 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
127 /* Extra fields for semihosted binaries. */
132 int used
; /* non zero if used */
133 struct image_info
*info
;
134 struct linux_binprm
*bprm
;
136 struct emulated_sigtable sigtab
[TARGET_NSIG
];
137 struct sigqueue sigqueue_table
[MAX_SIGQUEUE_SIZE
]; /* siginfo queue */
138 struct sigqueue
*first_free
; /* first free siginfo queue entry */
139 int signal_pending
; /* non zero if a signal may be pending */
140 } __attribute__((aligned(16))) TaskState
;
142 extern char *exec_path
;
143 void init_task_state(TaskState
*ts
);
144 void task_settid(TaskState
*);
145 void stop_all_tasks(void);
146 extern const char *qemu_uname_release
;
147 extern unsigned long mmap_min_addr
;
149 /* ??? See if we can avoid exposing so much of the loader internals. */
151 * MAX_ARG_PAGES defines the number of pages allocated for arguments
152 * and envelope for the new program. 32 should suffice, this gives
153 * a maximum env+arg of 128kB w/4KB pages!
155 #define MAX_ARG_PAGES 33
157 /* Read a good amount of data initially, to hopefully get all the
158 program headers loaded. */
159 #define BPRM_BUF_SIZE 1024
162 * This structure is used to hold the arguments that are
163 * used when loading binaries.
165 struct linux_binprm
{
166 char buf
[BPRM_BUF_SIZE
] __attribute__((aligned
));
167 void *page
[MAX_ARG_PAGES
];
174 char * filename
; /* Name of binary */
175 int (*core_dump
)(int, const CPUArchState
*); /* coredump routine */
178 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
);
179 abi_ulong
loader_build_argptr(int envc
, int argc
, abi_ulong sp
,
180 abi_ulong stringp
, int push_ptr
);
181 int loader_exec(const char * filename
, char ** argv
, char ** envp
,
182 struct target_pt_regs
* regs
, struct image_info
*infop
,
183 struct linux_binprm
*);
185 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
186 struct image_info
* info
);
187 int load_flt_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
188 struct image_info
* info
);
190 abi_long
memcpy_to_target(abi_ulong dest
, const void *src
,
192 void target_set_brk(abi_ulong new_brk
);
193 abi_long
do_brk(abi_ulong new_brk
);
194 void syscall_init(void);
195 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
196 abi_long arg2
, abi_long arg3
, abi_long arg4
,
197 abi_long arg5
, abi_long arg6
, abi_long arg7
,
199 void gemu_log(const char *fmt
, ...) GCC_FMT_ATTR(1, 2);
200 extern THREAD CPUArchState
*thread_env
;
201 void cpu_loop(CPUArchState
*env
);
202 char *target_strerror(int err
);
203 int get_osversion(void);
204 void fork_start(void);
205 void fork_end(int child
);
207 /* Return true if the proposed guest_base is suitable for the guest.
208 * The guest code may leave a page mapped and populate it if the
209 * address is suitable.
211 bool guest_validate_base(unsigned long guest_base
);
213 #include "qemu-log.h"
216 void print_syscall(int num
,
217 abi_long arg1
, abi_long arg2
, abi_long arg3
,
218 abi_long arg4
, abi_long arg5
, abi_long arg6
);
219 void print_syscall_ret(int num
, abi_long arg1
);
220 extern int do_strace
;
223 void process_pending_signals(CPUArchState
*cpu_env
);
224 void signal_init(void);
225 int queue_signal(CPUArchState
*env
, int sig
, target_siginfo_t
*info
);
226 void host_to_target_siginfo(target_siginfo_t
*tinfo
, const siginfo_t
*info
);
227 void target_to_host_siginfo(siginfo_t
*info
, const target_siginfo_t
*tinfo
);
228 int target_to_host_signal(int sig
);
229 int host_to_target_signal(int sig
);
230 long do_sigreturn(CPUArchState
*env
);
231 long do_rt_sigreturn(CPUArchState
*env
);
232 abi_long
do_sigaltstack(abi_ulong uss_addr
, abi_ulong uoss_addr
, abi_ulong sp
);
236 void save_v86_state(CPUX86State
*env
);
237 void handle_vm86_trap(CPUX86State
*env
, int trapno
);
238 void handle_vm86_fault(CPUX86State
*env
);
239 int do_vm86(CPUX86State
*env
, long subfunction
, abi_ulong v86_addr
);
240 #elif defined(TARGET_SPARC64)
241 void sparc64_set_context(CPUSPARCState
*env
);
242 void sparc64_get_context(CPUSPARCState
*env
);
246 int target_mprotect(abi_ulong start
, abi_ulong len
, int prot
);
247 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int prot
,
248 int flags
, int fd
, abi_ulong offset
);
249 int target_munmap(abi_ulong start
, abi_ulong len
);
250 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
251 abi_ulong new_size
, unsigned long flags
,
253 int target_msync(abi_ulong start
, abi_ulong len
, int flags
);
254 extern unsigned long last_brk
;
255 extern abi_ulong mmap_next_start
;
256 void mmap_lock(void);
257 void mmap_unlock(void);
258 abi_ulong
mmap_find_vma(abi_ulong
, abi_ulong
);
259 void cpu_list_lock(void);
260 void cpu_list_unlock(void);
261 #if defined(CONFIG_USE_NPTL)
262 void mmap_fork_start(void);
263 void mmap_fork_end(int child
);
267 extern unsigned long guest_stack_size
;
271 #define VERIFY_READ 0
272 #define VERIFY_WRITE 1 /* implies read access */
274 static inline int access_ok(int type
, abi_ulong addr
, abi_ulong size
)
276 return page_check_range((target_ulong
)addr
, size
,
277 (type
== VERIFY_READ
) ? PAGE_READ
: (PAGE_READ
| PAGE_WRITE
)) == 0;
280 /* NOTE __get_user and __put_user use host pointers and don't check access. */
281 /* These are usually used to access struct data members once the
282 * struct has been locked - usually with lock_user_struct().
284 #define __put_user(x, hptr)\
286 switch(sizeof(*hptr)) {\
288 *(uint8_t *)(hptr) = (uint8_t)(typeof(*hptr))(x);\
291 *(uint16_t *)(hptr) = tswap16((uint16_t)(typeof(*hptr))(x));\
294 *(uint32_t *)(hptr) = tswap32((uint32_t)(typeof(*hptr))(x));\
297 *(uint64_t *)(hptr) = tswap64((typeof(*hptr))(x));\
305 #define __get_user(x, hptr) \
307 switch(sizeof(*hptr)) {\
309 x = (typeof(*hptr))*(uint8_t *)(hptr);\
312 x = (typeof(*hptr))tswap16(*(uint16_t *)(hptr));\
315 x = (typeof(*hptr))tswap32(*(uint32_t *)(hptr));\
318 x = (typeof(*hptr))tswap64(*(uint64_t *)(hptr));\
328 /* put_user()/get_user() take a guest address and check access */
329 /* These are usually used to access an atomic data type, such as an int,
330 * that has been passed by address. These internally perform locking
331 * and unlocking on the data type.
333 #define put_user(x, gaddr, target_type) \
335 abi_ulong __gaddr = (gaddr); \
336 target_type *__hptr; \
338 if ((__hptr = lock_user(VERIFY_WRITE, __gaddr, sizeof(target_type), 0))) { \
339 __ret = __put_user((x), __hptr); \
340 unlock_user(__hptr, __gaddr, sizeof(target_type)); \
342 __ret = -TARGET_EFAULT; \
346 #define get_user(x, gaddr, target_type) \
348 abi_ulong __gaddr = (gaddr); \
349 target_type *__hptr; \
351 if ((__hptr = lock_user(VERIFY_READ, __gaddr, sizeof(target_type), 1))) { \
352 __ret = __get_user((x), __hptr); \
353 unlock_user(__hptr, __gaddr, 0); \
355 /* avoid warning */ \
357 __ret = -TARGET_EFAULT; \
362 #define put_user_ual(x, gaddr) put_user((x), (gaddr), abi_ulong)
363 #define put_user_sal(x, gaddr) put_user((x), (gaddr), abi_long)
364 #define put_user_u64(x, gaddr) put_user((x), (gaddr), uint64_t)
365 #define put_user_s64(x, gaddr) put_user((x), (gaddr), int64_t)
366 #define put_user_u32(x, gaddr) put_user((x), (gaddr), uint32_t)
367 #define put_user_s32(x, gaddr) put_user((x), (gaddr), int32_t)
368 #define put_user_u16(x, gaddr) put_user((x), (gaddr), uint16_t)
369 #define put_user_s16(x, gaddr) put_user((x), (gaddr), int16_t)
370 #define put_user_u8(x, gaddr) put_user((x), (gaddr), uint8_t)
371 #define put_user_s8(x, gaddr) put_user((x), (gaddr), int8_t)
373 #define get_user_ual(x, gaddr) get_user((x), (gaddr), abi_ulong)
374 #define get_user_sal(x, gaddr) get_user((x), (gaddr), abi_long)
375 #define get_user_u64(x, gaddr) get_user((x), (gaddr), uint64_t)
376 #define get_user_s64(x, gaddr) get_user((x), (gaddr), int64_t)
377 #define get_user_u32(x, gaddr) get_user((x), (gaddr), uint32_t)
378 #define get_user_s32(x, gaddr) get_user((x), (gaddr), int32_t)
379 #define get_user_u16(x, gaddr) get_user((x), (gaddr), uint16_t)
380 #define get_user_s16(x, gaddr) get_user((x), (gaddr), int16_t)
381 #define get_user_u8(x, gaddr) get_user((x), (gaddr), uint8_t)
382 #define get_user_s8(x, gaddr) get_user((x), (gaddr), int8_t)
384 /* copy_from_user() and copy_to_user() are usually used to copy data
385 * buffers between the target and host. These internally perform
386 * locking/unlocking of the memory.
388 abi_long
copy_from_user(void *hptr
, abi_ulong gaddr
, size_t len
);
389 abi_long
copy_to_user(abi_ulong gaddr
, void *hptr
, size_t len
);
391 /* Functions for accessing guest memory. The tget and tput functions
392 read/write single values, byteswapping as necessary. The lock_user
393 gets a pointer to a contiguous area of guest memory, but does not perform
394 and byteswapping. lock_user may return either a pointer to the guest
395 memory, or a temporary buffer. */
397 /* Lock an area of guest memory into the host. If copy is true then the
398 host area will have the same contents as the guest. */
399 static inline void *lock_user(int type
, abi_ulong guest_addr
, long len
, int copy
)
401 if (!access_ok(type
, guest_addr
, len
))
408 memcpy(addr
, g2h(guest_addr
), len
);
410 memset(addr
, 0, len
);
414 return g2h(guest_addr
);
418 /* Unlock an area of guest memory. The first LEN bytes must be
419 flushed back to guest memory. host_ptr = NULL is explicitly
420 allowed and does nothing. */
421 static inline void unlock_user(void *host_ptr
, abi_ulong guest_addr
,
428 if (host_ptr
== g2h(guest_addr
))
431 memcpy(g2h(guest_addr
), host_ptr
, len
);
436 /* Return the length of a string in target memory or -TARGET_EFAULT if
438 abi_long
target_strlen(abi_ulong gaddr
);
440 /* Like lock_user but for null terminated strings. */
441 static inline void *lock_user_string(abi_ulong guest_addr
)
444 len
= target_strlen(guest_addr
);
447 return lock_user(VERIFY_READ
, guest_addr
, (long)(len
+ 1), 1);
450 /* Helper macros for locking/ulocking a target struct. */
451 #define lock_user_struct(type, host_ptr, guest_addr, copy) \
452 (host_ptr = lock_user(type, guest_addr, sizeof(*host_ptr), copy))
453 #define unlock_user_struct(host_ptr, guest_addr, copy) \
454 unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0)
456 #if defined(CONFIG_USE_NPTL)