wxx: Add support for ncurses
[qemu/ar7.git] / linux-user / syscall.c
blob951753143c97c86c6bc95f8af7a9f2c03e05b26a
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include <elf.h>
22 #include <endian.h>
23 #include <grp.h>
24 #include <sys/ipc.h>
25 #include <sys/msg.h>
26 #include <sys/wait.h>
27 #include <sys/mount.h>
28 #include <sys/file.h>
29 #include <sys/fsuid.h>
30 #include <sys/personality.h>
31 #include <sys/prctl.h>
32 #include <sys/resource.h>
33 #include <sys/mman.h>
34 #include <sys/swap.h>
35 #include <linux/capability.h>
36 #include <sched.h>
37 #ifdef __ia64__
38 int __clone2(int (*fn)(void *), void *child_stack_base,
39 size_t stack_size, int flags, void *arg, ...);
40 #endif
41 #include <sys/socket.h>
42 #include <sys/un.h>
43 #include <sys/uio.h>
44 #include <sys/poll.h>
45 #include <sys/times.h>
46 #include <sys/shm.h>
47 #include <sys/sem.h>
48 #include <sys/statfs.h>
49 #include <utime.h>
50 #include <sys/sysinfo.h>
51 #include <sys/signalfd.h>
52 //#include <sys/user.h>
53 #include <netinet/ip.h>
54 #include <netinet/tcp.h>
55 #include <linux/wireless.h>
56 #include <linux/icmp.h>
57 #include "qemu-common.h"
58 #ifdef CONFIG_TIMERFD
59 #include <sys/timerfd.h>
60 #endif
61 #ifdef TARGET_GPROF
62 #include <sys/gmon.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #if defined(CONFIG_FIEMAP)
93 #include <linux/fiemap.h>
94 #endif
95 #include <linux/fb.h>
96 #include <linux/vt.h>
97 #include <linux/dm-ioctl.h>
98 #include <linux/reboot.h>
99 #include <linux/route.h>
100 #include <linux/filter.h>
101 #include <linux/blkpg.h>
102 #include "linux_loop.h"
103 #include "uname.h"
105 #include "qemu.h"
107 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
108 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
110 //#define DEBUG
112 //#include <linux/msdos_fs.h>
113 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
114 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117 #undef _syscall0
118 #undef _syscall1
119 #undef _syscall2
120 #undef _syscall3
121 #undef _syscall4
122 #undef _syscall5
123 #undef _syscall6
125 #define _syscall0(type,name) \
126 static type name (void) \
128 return syscall(__NR_##name); \
131 #define _syscall1(type,name,type1,arg1) \
132 static type name (type1 arg1) \
134 return syscall(__NR_##name, arg1); \
137 #define _syscall2(type,name,type1,arg1,type2,arg2) \
138 static type name (type1 arg1,type2 arg2) \
140 return syscall(__NR_##name, arg1, arg2); \
143 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
144 static type name (type1 arg1,type2 arg2,type3 arg3) \
146 return syscall(__NR_##name, arg1, arg2, arg3); \
149 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
150 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
152 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
155 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 type5,arg5) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
163 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 type5,arg5,type6,arg6) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
166 type6 arg6) \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
172 #define __NR_sys_uname __NR_uname
173 #define __NR_sys_getcwd1 __NR_getcwd
174 #define __NR_sys_getdents __NR_getdents
175 #define __NR_sys_getdents64 __NR_getdents64
176 #define __NR_sys_getpriority __NR_getpriority
177 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
178 #define __NR_sys_syslog __NR_syslog
179 #define __NR_sys_tgkill __NR_tgkill
180 #define __NR_sys_tkill __NR_tkill
181 #define __NR_sys_futex __NR_futex
182 #define __NR_sys_inotify_init __NR_inotify_init
183 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
184 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
186 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
187 defined(__s390x__)
188 #define __NR__llseek __NR_lseek
189 #endif
191 /* Newer kernel ports have llseek() instead of _llseek() */
192 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
193 #define TARGET_NR__llseek TARGET_NR_llseek
194 #endif
196 #ifdef __NR_gettid
197 _syscall0(int, gettid)
198 #else
199 /* This is a replacement for the host gettid() and must return a host
200 errno. */
201 static int gettid(void) {
202 return -ENOSYS;
204 #endif
205 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
206 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
207 #endif
208 #if !defined(__NR_getdents) || \
209 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
210 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
211 #endif
212 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
213 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
214 loff_t *, res, uint, wh);
215 #endif
216 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
217 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
218 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
219 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
220 #endif
221 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
222 _syscall2(int,sys_tkill,int,tid,int,sig)
223 #endif
224 #ifdef __NR_exit_group
225 _syscall1(int,exit_group,int,error_code)
226 #endif
227 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
228 _syscall1(int,set_tid_address,int *,tidptr)
229 #endif
230 #if defined(TARGET_NR_futex) && defined(__NR_futex)
231 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
232 const struct timespec *,timeout,int *,uaddr2,int,val3)
233 #endif
234 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
235 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
236 unsigned long *, user_mask_ptr);
237 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
238 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
241 void *, arg);
242 _syscall2(int, capget, struct __user_cap_header_struct *, header,
243 struct __user_cap_data_struct *, data);
244 _syscall2(int, capset, struct __user_cap_header_struct *, header,
245 struct __user_cap_data_struct *, data);
246 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
247 _syscall2(int, ioprio_get, int, which, int, who)
248 #endif
249 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
250 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
251 #endif
252 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
253 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
254 #endif
256 static bitmask_transtbl fcntl_flags_tbl[] = {
257 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
258 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
259 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
260 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
261 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
262 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
263 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
264 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
265 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
266 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
267 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
268 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
269 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
270 #if defined(O_DIRECT)
271 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
272 #endif
273 #if defined(O_NOATIME)
274 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
275 #endif
276 #if defined(O_CLOEXEC)
277 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
278 #endif
279 #if defined(O_PATH)
280 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
281 #endif
282 /* Don't terminate the list prematurely on 64-bit host+guest. */
283 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
284 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
285 #endif
286 { 0, 0, 0, 0 }
289 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
290 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
291 typedef struct TargetFdTrans {
292 TargetFdDataFunc host_to_target_data;
293 TargetFdDataFunc target_to_host_data;
294 TargetFdAddrFunc target_to_host_addr;
295 } TargetFdTrans;
297 static TargetFdTrans **target_fd_trans;
299 static unsigned int target_fd_max;
301 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
303 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
304 return target_fd_trans[fd]->host_to_target_data;
306 return NULL;
309 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
311 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
312 return target_fd_trans[fd]->target_to_host_addr;
314 return NULL;
317 static void fd_trans_register(int fd, TargetFdTrans *trans)
319 unsigned int oldmax;
321 if (fd >= target_fd_max) {
322 oldmax = target_fd_max;
323 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
324 target_fd_trans = g_renew(TargetFdTrans *,
325 target_fd_trans, target_fd_max);
326 memset((void *)(target_fd_trans + oldmax), 0,
327 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
329 target_fd_trans[fd] = trans;
332 static void fd_trans_unregister(int fd)
334 if (fd >= 0 && fd < target_fd_max) {
335 target_fd_trans[fd] = NULL;
339 static void fd_trans_dup(int oldfd, int newfd)
341 fd_trans_unregister(newfd);
342 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
343 fd_trans_register(newfd, target_fd_trans[oldfd]);
347 static int sys_getcwd1(char *buf, size_t size)
349 if (getcwd(buf, size) == NULL) {
350 /* getcwd() sets errno */
351 return (-1);
353 return strlen(buf)+1;
356 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
359 * open(2) has extra parameter 'mode' when called with
360 * flag O_CREAT.
362 if ((flags & O_CREAT) != 0) {
363 return (openat(dirfd, pathname, flags, mode));
365 return (openat(dirfd, pathname, flags));
368 #ifdef TARGET_NR_utimensat
369 #ifdef CONFIG_UTIMENSAT
370 static int sys_utimensat(int dirfd, const char *pathname,
371 const struct timespec times[2], int flags)
373 if (pathname == NULL)
374 return futimens(dirfd, times);
375 else
376 return utimensat(dirfd, pathname, times, flags);
378 #elif defined(__NR_utimensat)
379 #define __NR_sys_utimensat __NR_utimensat
380 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
381 const struct timespec *,tsp,int,flags)
382 #else
383 static int sys_utimensat(int dirfd, const char *pathname,
384 const struct timespec times[2], int flags)
386 errno = ENOSYS;
387 return -1;
389 #endif
390 #endif /* TARGET_NR_utimensat */
392 #ifdef CONFIG_INOTIFY
393 #include <sys/inotify.h>
395 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
396 static int sys_inotify_init(void)
398 return (inotify_init());
400 #endif
401 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
402 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
404 return (inotify_add_watch(fd, pathname, mask));
406 #endif
407 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
408 static int sys_inotify_rm_watch(int fd, int32_t wd)
410 return (inotify_rm_watch(fd, wd));
412 #endif
413 #ifdef CONFIG_INOTIFY1
414 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
415 static int sys_inotify_init1(int flags)
417 return (inotify_init1(flags));
419 #endif
420 #endif
421 #else
422 /* Userspace can usually survive runtime without inotify */
423 #undef TARGET_NR_inotify_init
424 #undef TARGET_NR_inotify_init1
425 #undef TARGET_NR_inotify_add_watch
426 #undef TARGET_NR_inotify_rm_watch
427 #endif /* CONFIG_INOTIFY */
429 #if defined(TARGET_NR_ppoll)
430 #ifndef __NR_ppoll
431 # define __NR_ppoll -1
432 #endif
433 #define __NR_sys_ppoll __NR_ppoll
434 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
435 struct timespec *, timeout, const sigset_t *, sigmask,
436 size_t, sigsetsize)
437 #endif
439 #if defined(TARGET_NR_pselect6)
440 #ifndef __NR_pselect6
441 # define __NR_pselect6 -1
442 #endif
443 #define __NR_sys_pselect6 __NR_pselect6
444 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
445 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
446 #endif
448 #if defined(TARGET_NR_prlimit64)
449 #ifndef __NR_prlimit64
450 # define __NR_prlimit64 -1
451 #endif
452 #define __NR_sys_prlimit64 __NR_prlimit64
453 /* The glibc rlimit structure may not be that used by the underlying syscall */
454 struct host_rlimit64 {
455 uint64_t rlim_cur;
456 uint64_t rlim_max;
458 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
459 const struct host_rlimit64 *, new_limit,
460 struct host_rlimit64 *, old_limit)
461 #endif
464 #if defined(TARGET_NR_timer_create)
465 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
466 static timer_t g_posix_timers[32] = { 0, } ;
468 static inline int next_free_host_timer(void)
470 int k ;
471 /* FIXME: Does finding the next free slot require a lock? */
472 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
473 if (g_posix_timers[k] == 0) {
474 g_posix_timers[k] = (timer_t) 1;
475 return k;
478 return -1;
480 #endif
482 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
483 #ifdef TARGET_ARM
484 static inline int regpairs_aligned(void *cpu_env) {
485 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
487 #elif defined(TARGET_MIPS)
488 static inline int regpairs_aligned(void *cpu_env) { return 1; }
489 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
490 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
491 * of registers which translates to the same as ARM/MIPS, because we start with
492 * r3 as arg1 */
493 static inline int regpairs_aligned(void *cpu_env) { return 1; }
494 #else
495 static inline int regpairs_aligned(void *cpu_env) { return 0; }
496 #endif
498 #define ERRNO_TABLE_SIZE 1200
500 /* target_to_host_errno_table[] is initialized from
501 * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
506 * This list is the union of errno values overridden in asm-<arch>/errno.h
507 * minus the errnos that are not actually generic to all archs.
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510 [EAGAIN] = TARGET_EAGAIN,
511 [EIDRM] = TARGET_EIDRM,
512 [ECHRNG] = TARGET_ECHRNG,
513 [EL2NSYNC] = TARGET_EL2NSYNC,
514 [EL3HLT] = TARGET_EL3HLT,
515 [EL3RST] = TARGET_EL3RST,
516 [ELNRNG] = TARGET_ELNRNG,
517 [EUNATCH] = TARGET_EUNATCH,
518 [ENOCSI] = TARGET_ENOCSI,
519 [EL2HLT] = TARGET_EL2HLT,
520 [EDEADLK] = TARGET_EDEADLK,
521 [ENOLCK] = TARGET_ENOLCK,
522 [EBADE] = TARGET_EBADE,
523 [EBADR] = TARGET_EBADR,
524 [EXFULL] = TARGET_EXFULL,
525 [ENOANO] = TARGET_ENOANO,
526 [EBADRQC] = TARGET_EBADRQC,
527 [EBADSLT] = TARGET_EBADSLT,
528 [EBFONT] = TARGET_EBFONT,
529 [ENOSTR] = TARGET_ENOSTR,
530 [ENODATA] = TARGET_ENODATA,
531 [ETIME] = TARGET_ETIME,
532 [ENOSR] = TARGET_ENOSR,
533 [ENONET] = TARGET_ENONET,
534 [ENOPKG] = TARGET_ENOPKG,
535 [EREMOTE] = TARGET_EREMOTE,
536 [ENOLINK] = TARGET_ENOLINK,
537 [EADV] = TARGET_EADV,
538 [ESRMNT] = TARGET_ESRMNT,
539 [ECOMM] = TARGET_ECOMM,
540 [EPROTO] = TARGET_EPROTO,
541 [EDOTDOT] = TARGET_EDOTDOT,
542 [EMULTIHOP] = TARGET_EMULTIHOP,
543 [EBADMSG] = TARGET_EBADMSG,
544 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
545 [EOVERFLOW] = TARGET_EOVERFLOW,
546 [ENOTUNIQ] = TARGET_ENOTUNIQ,
547 [EBADFD] = TARGET_EBADFD,
548 [EREMCHG] = TARGET_EREMCHG,
549 [ELIBACC] = TARGET_ELIBACC,
550 [ELIBBAD] = TARGET_ELIBBAD,
551 [ELIBSCN] = TARGET_ELIBSCN,
552 [ELIBMAX] = TARGET_ELIBMAX,
553 [ELIBEXEC] = TARGET_ELIBEXEC,
554 [EILSEQ] = TARGET_EILSEQ,
555 [ENOSYS] = TARGET_ENOSYS,
556 [ELOOP] = TARGET_ELOOP,
557 [ERESTART] = TARGET_ERESTART,
558 [ESTRPIPE] = TARGET_ESTRPIPE,
559 [ENOTEMPTY] = TARGET_ENOTEMPTY,
560 [EUSERS] = TARGET_EUSERS,
561 [ENOTSOCK] = TARGET_ENOTSOCK,
562 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
563 [EMSGSIZE] = TARGET_EMSGSIZE,
564 [EPROTOTYPE] = TARGET_EPROTOTYPE,
565 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
566 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
567 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
568 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
569 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
570 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
571 [EADDRINUSE] = TARGET_EADDRINUSE,
572 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
573 [ENETDOWN] = TARGET_ENETDOWN,
574 [ENETUNREACH] = TARGET_ENETUNREACH,
575 [ENETRESET] = TARGET_ENETRESET,
576 [ECONNABORTED] = TARGET_ECONNABORTED,
577 [ECONNRESET] = TARGET_ECONNRESET,
578 [ENOBUFS] = TARGET_ENOBUFS,
579 [EISCONN] = TARGET_EISCONN,
580 [ENOTCONN] = TARGET_ENOTCONN,
581 [EUCLEAN] = TARGET_EUCLEAN,
582 [ENOTNAM] = TARGET_ENOTNAM,
583 [ENAVAIL] = TARGET_ENAVAIL,
584 [EISNAM] = TARGET_EISNAM,
585 [EREMOTEIO] = TARGET_EREMOTEIO,
586 [ESHUTDOWN] = TARGET_ESHUTDOWN,
587 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
588 [ETIMEDOUT] = TARGET_ETIMEDOUT,
589 [ECONNREFUSED] = TARGET_ECONNREFUSED,
590 [EHOSTDOWN] = TARGET_EHOSTDOWN,
591 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
592 [EALREADY] = TARGET_EALREADY,
593 [EINPROGRESS] = TARGET_EINPROGRESS,
594 [ESTALE] = TARGET_ESTALE,
595 [ECANCELED] = TARGET_ECANCELED,
596 [ENOMEDIUM] = TARGET_ENOMEDIUM,
597 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
598 #ifdef ENOKEY
599 [ENOKEY] = TARGET_ENOKEY,
600 #endif
601 #ifdef EKEYEXPIRED
602 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
603 #endif
604 #ifdef EKEYREVOKED
605 [EKEYREVOKED] = TARGET_EKEYREVOKED,
606 #endif
607 #ifdef EKEYREJECTED
608 [EKEYREJECTED] = TARGET_EKEYREJECTED,
609 #endif
610 #ifdef EOWNERDEAD
611 [EOWNERDEAD] = TARGET_EOWNERDEAD,
612 #endif
613 #ifdef ENOTRECOVERABLE
614 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
615 #endif
618 static inline int host_to_target_errno(int err)
620 if(host_to_target_errno_table[err])
621 return host_to_target_errno_table[err];
622 return err;
625 static inline int target_to_host_errno(int err)
627 if (target_to_host_errno_table[err])
628 return target_to_host_errno_table[err];
629 return err;
632 static inline abi_long get_errno(abi_long ret)
634 if (ret == -1)
635 return -host_to_target_errno(errno);
636 else
637 return ret;
640 static inline int is_error(abi_long ret)
642 return (abi_ulong)ret >= (abi_ulong)(-4096);
645 char *target_strerror(int err)
647 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
648 return NULL;
650 return strerror(target_to_host_errno(err));
653 static inline int host_to_target_sock_type(int host_type)
655 int target_type;
657 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
658 case SOCK_DGRAM:
659 target_type = TARGET_SOCK_DGRAM;
660 break;
661 case SOCK_STREAM:
662 target_type = TARGET_SOCK_STREAM;
663 break;
664 default:
665 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
666 break;
669 #if defined(SOCK_CLOEXEC)
670 if (host_type & SOCK_CLOEXEC) {
671 target_type |= TARGET_SOCK_CLOEXEC;
673 #endif
675 #if defined(SOCK_NONBLOCK)
676 if (host_type & SOCK_NONBLOCK) {
677 target_type |= TARGET_SOCK_NONBLOCK;
679 #endif
681 return target_type;
684 static abi_ulong target_brk;
685 static abi_ulong target_original_brk;
686 static abi_ulong brk_page;
688 void target_set_brk(abi_ulong new_brk)
690 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
691 brk_page = HOST_PAGE_ALIGN(target_brk);
694 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
695 #define DEBUGF_BRK(message, args...)
697 /* do_brk() must return target values and target errnos. */
698 abi_long do_brk(abi_ulong new_brk)
700 abi_long mapped_addr;
701 int new_alloc_size;
703 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
705 if (!new_brk) {
706 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
707 return target_brk;
709 if (new_brk < target_original_brk) {
710 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
711 target_brk);
712 return target_brk;
715 /* If the new brk is less than the highest page reserved to the
716 * target heap allocation, set it and we're almost done... */
717 if (new_brk <= brk_page) {
718 /* Heap contents are initialized to zero, as for anonymous
719 * mapped pages. */
720 if (new_brk > target_brk) {
721 memset(g2h(target_brk), 0, new_brk - target_brk);
723 target_brk = new_brk;
724 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
725 return target_brk;
728 /* We need to allocate more memory after the brk... Note that
729 * we don't use MAP_FIXED because that will map over the top of
730 * any existing mapping (like the one with the host libc or qemu
731 * itself); instead we treat "mapped but at wrong address" as
732 * a failure and unmap again.
734 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
735 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
736 PROT_READ|PROT_WRITE,
737 MAP_ANON|MAP_PRIVATE, 0, 0));
739 if (mapped_addr == brk_page) {
740 /* Heap contents are initialized to zero, as for anonymous
741 * mapped pages. Technically the new pages are already
742 * initialized to zero since they *are* anonymous mapped
743 * pages, however we have to take care with the contents that
744 * come from the remaining part of the previous page: it may
745 * contains garbage data due to a previous heap usage (grown
746 * then shrunken). */
747 memset(g2h(target_brk), 0, brk_page - target_brk);
749 target_brk = new_brk;
750 brk_page = HOST_PAGE_ALIGN(target_brk);
751 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
752 target_brk);
753 return target_brk;
754 } else if (mapped_addr != -1) {
755 /* Mapped but at wrong address, meaning there wasn't actually
756 * enough space for this brk.
758 target_munmap(mapped_addr, new_alloc_size);
759 mapped_addr = -1;
760 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
762 else {
763 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
766 #if defined(TARGET_ALPHA)
767 /* We (partially) emulate OSF/1 on Alpha, which requires we
768 return a proper errno, not an unchanged brk value. */
769 return -TARGET_ENOMEM;
770 #endif
771 /* For everything else, return the previous break. */
772 return target_brk;
775 static inline abi_long copy_from_user_fdset(fd_set *fds,
776 abi_ulong target_fds_addr,
777 int n)
779 int i, nw, j, k;
780 abi_ulong b, *target_fds;
782 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
783 if (!(target_fds = lock_user(VERIFY_READ,
784 target_fds_addr,
785 sizeof(abi_ulong) * nw,
786 1)))
787 return -TARGET_EFAULT;
789 FD_ZERO(fds);
790 k = 0;
791 for (i = 0; i < nw; i++) {
792 /* grab the abi_ulong */
793 __get_user(b, &target_fds[i]);
794 for (j = 0; j < TARGET_ABI_BITS; j++) {
795 /* check the bit inside the abi_ulong */
796 if ((b >> j) & 1)
797 FD_SET(k, fds);
798 k++;
802 unlock_user(target_fds, target_fds_addr, 0);
804 return 0;
807 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
808 abi_ulong target_fds_addr,
809 int n)
811 if (target_fds_addr) {
812 if (copy_from_user_fdset(fds, target_fds_addr, n))
813 return -TARGET_EFAULT;
814 *fds_ptr = fds;
815 } else {
816 *fds_ptr = NULL;
818 return 0;
821 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
822 const fd_set *fds,
823 int n)
825 int i, nw, j, k;
826 abi_long v;
827 abi_ulong *target_fds;
829 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
830 if (!(target_fds = lock_user(VERIFY_WRITE,
831 target_fds_addr,
832 sizeof(abi_ulong) * nw,
833 0)))
834 return -TARGET_EFAULT;
836 k = 0;
837 for (i = 0; i < nw; i++) {
838 v = 0;
839 for (j = 0; j < TARGET_ABI_BITS; j++) {
840 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
841 k++;
843 __put_user(v, &target_fds[i]);
846 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
848 return 0;
851 #if defined(__alpha__)
852 #define HOST_HZ 1024
853 #else
854 #define HOST_HZ 100
855 #endif
857 static inline abi_long host_to_target_clock_t(long ticks)
859 #if HOST_HZ == TARGET_HZ
860 return ticks;
861 #else
862 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
863 #endif
866 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
867 const struct rusage *rusage)
869 struct target_rusage *target_rusage;
871 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
872 return -TARGET_EFAULT;
873 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
874 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
875 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
876 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
877 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
878 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
879 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
880 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
881 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
882 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
883 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
884 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
885 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
886 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
887 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
888 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
889 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
890 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
891 unlock_user_struct(target_rusage, target_addr, 1);
893 return 0;
896 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
898 abi_ulong target_rlim_swap;
899 rlim_t result;
901 target_rlim_swap = tswapal(target_rlim);
902 if (target_rlim_swap == TARGET_RLIM_INFINITY)
903 return RLIM_INFINITY;
905 result = target_rlim_swap;
906 if (target_rlim_swap != (rlim_t)result)
907 return RLIM_INFINITY;
909 return result;
912 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
914 abi_ulong target_rlim_swap;
915 abi_ulong result;
917 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
918 target_rlim_swap = TARGET_RLIM_INFINITY;
919 else
920 target_rlim_swap = rlim;
921 result = tswapal(target_rlim_swap);
923 return result;
926 static inline int target_to_host_resource(int code)
928 switch (code) {
929 case TARGET_RLIMIT_AS:
930 return RLIMIT_AS;
931 case TARGET_RLIMIT_CORE:
932 return RLIMIT_CORE;
933 case TARGET_RLIMIT_CPU:
934 return RLIMIT_CPU;
935 case TARGET_RLIMIT_DATA:
936 return RLIMIT_DATA;
937 case TARGET_RLIMIT_FSIZE:
938 return RLIMIT_FSIZE;
939 case TARGET_RLIMIT_LOCKS:
940 return RLIMIT_LOCKS;
941 case TARGET_RLIMIT_MEMLOCK:
942 return RLIMIT_MEMLOCK;
943 case TARGET_RLIMIT_MSGQUEUE:
944 return RLIMIT_MSGQUEUE;
945 case TARGET_RLIMIT_NICE:
946 return RLIMIT_NICE;
947 case TARGET_RLIMIT_NOFILE:
948 return RLIMIT_NOFILE;
949 case TARGET_RLIMIT_NPROC:
950 return RLIMIT_NPROC;
951 case TARGET_RLIMIT_RSS:
952 return RLIMIT_RSS;
953 case TARGET_RLIMIT_RTPRIO:
954 return RLIMIT_RTPRIO;
955 case TARGET_RLIMIT_SIGPENDING:
956 return RLIMIT_SIGPENDING;
957 case TARGET_RLIMIT_STACK:
958 return RLIMIT_STACK;
959 default:
960 return code;
964 static inline abi_long copy_from_user_timeval(struct timeval *tv,
965 abi_ulong target_tv_addr)
967 struct target_timeval *target_tv;
969 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
970 return -TARGET_EFAULT;
972 __get_user(tv->tv_sec, &target_tv->tv_sec);
973 __get_user(tv->tv_usec, &target_tv->tv_usec);
975 unlock_user_struct(target_tv, target_tv_addr, 0);
977 return 0;
980 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
981 const struct timeval *tv)
983 struct target_timeval *target_tv;
985 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
986 return -TARGET_EFAULT;
988 __put_user(tv->tv_sec, &target_tv->tv_sec);
989 __put_user(tv->tv_usec, &target_tv->tv_usec);
991 unlock_user_struct(target_tv, target_tv_addr, 1);
993 return 0;
996 static inline abi_long copy_from_user_timezone(struct timezone *tz,
997 abi_ulong target_tz_addr)
999 struct target_timezone *target_tz;
1001 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1002 return -TARGET_EFAULT;
1005 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1006 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1008 unlock_user_struct(target_tz, target_tz_addr, 0);
1010 return 0;
1013 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1014 #include <mqueue.h>
1016 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1017 abi_ulong target_mq_attr_addr)
1019 struct target_mq_attr *target_mq_attr;
1021 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1022 target_mq_attr_addr, 1))
1023 return -TARGET_EFAULT;
1025 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1026 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1027 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1028 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1030 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1032 return 0;
1035 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1036 const struct mq_attr *attr)
1038 struct target_mq_attr *target_mq_attr;
1040 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1041 target_mq_attr_addr, 0))
1042 return -TARGET_EFAULT;
1044 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1045 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1046 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1047 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1049 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1051 return 0;
1053 #endif
1055 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1056 /* do_select() must return target values and target errnos. */
1057 static abi_long do_select(int n,
1058 abi_ulong rfd_addr, abi_ulong wfd_addr,
1059 abi_ulong efd_addr, abi_ulong target_tv_addr)
1061 fd_set rfds, wfds, efds;
1062 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1063 struct timeval tv, *tv_ptr;
1064 abi_long ret;
1066 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1067 if (ret) {
1068 return ret;
1070 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1071 if (ret) {
1072 return ret;
1074 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1075 if (ret) {
1076 return ret;
1079 if (target_tv_addr) {
1080 if (copy_from_user_timeval(&tv, target_tv_addr))
1081 return -TARGET_EFAULT;
1082 tv_ptr = &tv;
1083 } else {
1084 tv_ptr = NULL;
1087 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1089 if (!is_error(ret)) {
1090 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1091 return -TARGET_EFAULT;
1092 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1093 return -TARGET_EFAULT;
1094 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1095 return -TARGET_EFAULT;
1097 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1098 return -TARGET_EFAULT;
1101 return ret;
1103 #endif
1105 static abi_long do_pipe2(int host_pipe[], int flags)
1107 #ifdef CONFIG_PIPE2
1108 return pipe2(host_pipe, flags);
1109 #else
1110 return -ENOSYS;
1111 #endif
1114 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1115 int flags, int is_pipe2)
1117 int host_pipe[2];
1118 abi_long ret;
1119 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1121 if (is_error(ret))
1122 return get_errno(ret);
1124 /* Several targets have special calling conventions for the original
1125 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1126 if (!is_pipe2) {
1127 #if defined(TARGET_ALPHA)
1128 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1129 return host_pipe[0];
1130 #elif defined(TARGET_MIPS)
1131 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1132 return host_pipe[0];
1133 #elif defined(TARGET_SH4)
1134 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1135 return host_pipe[0];
1136 #elif defined(TARGET_SPARC)
1137 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1138 return host_pipe[0];
1139 #endif
1142 if (put_user_s32(host_pipe[0], pipedes)
1143 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1144 return -TARGET_EFAULT;
1145 return get_errno(ret);
1148 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1149 abi_ulong target_addr,
1150 socklen_t len)
1152 struct target_ip_mreqn *target_smreqn;
1154 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1155 if (!target_smreqn)
1156 return -TARGET_EFAULT;
1157 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1158 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1159 if (len == sizeof(struct target_ip_mreqn))
1160 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1161 unlock_user(target_smreqn, target_addr, 0);
1163 return 0;
1166 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1167 abi_ulong target_addr,
1168 socklen_t len)
1170 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1171 sa_family_t sa_family;
1172 struct target_sockaddr *target_saddr;
1174 if (fd_trans_target_to_host_addr(fd)) {
1175 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1178 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1179 if (!target_saddr)
1180 return -TARGET_EFAULT;
1182 sa_family = tswap16(target_saddr->sa_family);
1184 /* Oops. The caller might send a incomplete sun_path; sun_path
1185 * must be terminated by \0 (see the manual page), but
1186 * unfortunately it is quite common to specify sockaddr_un
1187 * length as "strlen(x->sun_path)" while it should be
1188 * "strlen(...) + 1". We'll fix that here if needed.
1189 * Linux kernel has a similar feature.
1192 if (sa_family == AF_UNIX) {
1193 if (len < unix_maxlen && len > 0) {
1194 char *cp = (char*)target_saddr;
1196 if ( cp[len-1] && !cp[len] )
1197 len++;
1199 if (len > unix_maxlen)
1200 len = unix_maxlen;
1203 memcpy(addr, target_saddr, len);
1204 addr->sa_family = sa_family;
1205 if (sa_family == AF_PACKET) {
1206 struct target_sockaddr_ll *lladdr;
1208 lladdr = (struct target_sockaddr_ll *)addr;
1209 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1210 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1212 unlock_user(target_saddr, target_addr, 0);
1214 return 0;
1217 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1218 struct sockaddr *addr,
1219 socklen_t len)
1221 struct target_sockaddr *target_saddr;
1223 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1224 if (!target_saddr)
1225 return -TARGET_EFAULT;
1226 memcpy(target_saddr, addr, len);
1227 target_saddr->sa_family = tswap16(addr->sa_family);
1228 unlock_user(target_saddr, target_addr, len);
1230 return 0;
1233 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1234 struct target_msghdr *target_msgh)
1236 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1237 abi_long msg_controllen;
1238 abi_ulong target_cmsg_addr;
1239 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1240 socklen_t space = 0;
1242 msg_controllen = tswapal(target_msgh->msg_controllen);
1243 if (msg_controllen < sizeof (struct target_cmsghdr))
1244 goto the_end;
1245 target_cmsg_addr = tswapal(target_msgh->msg_control);
1246 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1247 target_cmsg_start = target_cmsg;
1248 if (!target_cmsg)
1249 return -TARGET_EFAULT;
1251 while (cmsg && target_cmsg) {
1252 void *data = CMSG_DATA(cmsg);
1253 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1255 int len = tswapal(target_cmsg->cmsg_len)
1256 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1258 space += CMSG_SPACE(len);
1259 if (space > msgh->msg_controllen) {
1260 space -= CMSG_SPACE(len);
1261 /* This is a QEMU bug, since we allocated the payload
1262 * area ourselves (unlike overflow in host-to-target
1263 * conversion, which is just the guest giving us a buffer
1264 * that's too small). It can't happen for the payload types
1265 * we currently support; if it becomes an issue in future
1266 * we would need to improve our allocation strategy to
1267 * something more intelligent than "twice the size of the
1268 * target buffer we're reading from".
1270 gemu_log("Host cmsg overflow\n");
1271 break;
1274 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1275 cmsg->cmsg_level = SOL_SOCKET;
1276 } else {
1277 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1279 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1280 cmsg->cmsg_len = CMSG_LEN(len);
1282 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1283 int *fd = (int *)data;
1284 int *target_fd = (int *)target_data;
1285 int i, numfds = len / sizeof(int);
1287 for (i = 0; i < numfds; i++) {
1288 __get_user(fd[i], target_fd + i);
1290 } else if (cmsg->cmsg_level == SOL_SOCKET
1291 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1292 struct ucred *cred = (struct ucred *)data;
1293 struct target_ucred *target_cred =
1294 (struct target_ucred *)target_data;
1296 __get_user(cred->pid, &target_cred->pid);
1297 __get_user(cred->uid, &target_cred->uid);
1298 __get_user(cred->gid, &target_cred->gid);
1299 } else {
1300 gemu_log("Unsupported ancillary data: %d/%d\n",
1301 cmsg->cmsg_level, cmsg->cmsg_type);
1302 memcpy(data, target_data, len);
1305 cmsg = CMSG_NXTHDR(msgh, cmsg);
1306 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1307 target_cmsg_start);
1309 unlock_user(target_cmsg, target_cmsg_addr, 0);
1310 the_end:
1311 msgh->msg_controllen = space;
1312 return 0;
1315 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1316 struct msghdr *msgh)
1318 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1319 abi_long msg_controllen;
1320 abi_ulong target_cmsg_addr;
1321 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1322 socklen_t space = 0;
1324 msg_controllen = tswapal(target_msgh->msg_controllen);
1325 if (msg_controllen < sizeof (struct target_cmsghdr))
1326 goto the_end;
1327 target_cmsg_addr = tswapal(target_msgh->msg_control);
1328 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1329 target_cmsg_start = target_cmsg;
1330 if (!target_cmsg)
1331 return -TARGET_EFAULT;
1333 while (cmsg && target_cmsg) {
1334 void *data = CMSG_DATA(cmsg);
1335 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1337 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1338 int tgt_len, tgt_space;
1340 /* We never copy a half-header but may copy half-data;
1341 * this is Linux's behaviour in put_cmsg(). Note that
1342 * truncation here is a guest problem (which we report
1343 * to the guest via the CTRUNC bit), unlike truncation
1344 * in target_to_host_cmsg, which is a QEMU bug.
1346 if (msg_controllen < sizeof(struct cmsghdr)) {
1347 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1348 break;
1351 if (cmsg->cmsg_level == SOL_SOCKET) {
1352 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1353 } else {
1354 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1356 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1358 tgt_len = TARGET_CMSG_LEN(len);
1360 /* Payload types which need a different size of payload on
1361 * the target must adjust tgt_len here.
1363 switch (cmsg->cmsg_level) {
1364 case SOL_SOCKET:
1365 switch (cmsg->cmsg_type) {
1366 case SO_TIMESTAMP:
1367 tgt_len = sizeof(struct target_timeval);
1368 break;
1369 default:
1370 break;
1372 default:
1373 break;
1376 if (msg_controllen < tgt_len) {
1377 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1378 tgt_len = msg_controllen;
1381 /* We must now copy-and-convert len bytes of payload
1382 * into tgt_len bytes of destination space. Bear in mind
1383 * that in both source and destination we may be dealing
1384 * with a truncated value!
1386 switch (cmsg->cmsg_level) {
1387 case SOL_SOCKET:
1388 switch (cmsg->cmsg_type) {
1389 case SCM_RIGHTS:
1391 int *fd = (int *)data;
1392 int *target_fd = (int *)target_data;
1393 int i, numfds = tgt_len / sizeof(int);
1395 for (i = 0; i < numfds; i++) {
1396 __put_user(fd[i], target_fd + i);
1398 break;
1400 case SO_TIMESTAMP:
1402 struct timeval *tv = (struct timeval *)data;
1403 struct target_timeval *target_tv =
1404 (struct target_timeval *)target_data;
1406 if (len != sizeof(struct timeval) ||
1407 tgt_len != sizeof(struct target_timeval)) {
1408 goto unimplemented;
1411 /* copy struct timeval to target */
1412 __put_user(tv->tv_sec, &target_tv->tv_sec);
1413 __put_user(tv->tv_usec, &target_tv->tv_usec);
1414 break;
1416 case SCM_CREDENTIALS:
1418 struct ucred *cred = (struct ucred *)data;
1419 struct target_ucred *target_cred =
1420 (struct target_ucred *)target_data;
1422 __put_user(cred->pid, &target_cred->pid);
1423 __put_user(cred->uid, &target_cred->uid);
1424 __put_user(cred->gid, &target_cred->gid);
1425 break;
1427 default:
1428 goto unimplemented;
1430 break;
1432 default:
1433 unimplemented:
1434 gemu_log("Unsupported ancillary data: %d/%d\n",
1435 cmsg->cmsg_level, cmsg->cmsg_type);
1436 memcpy(target_data, data, MIN(len, tgt_len));
1437 if (tgt_len > len) {
1438 memset(target_data + len, 0, tgt_len - len);
1442 target_cmsg->cmsg_len = tswapal(tgt_len);
1443 tgt_space = TARGET_CMSG_SPACE(len);
1444 if (msg_controllen < tgt_space) {
1445 tgt_space = msg_controllen;
1447 msg_controllen -= tgt_space;
1448 space += tgt_space;
1449 cmsg = CMSG_NXTHDR(msgh, cmsg);
1450 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1451 target_cmsg_start);
1453 unlock_user(target_cmsg, target_cmsg_addr, space);
1454 the_end:
1455 target_msgh->msg_controllen = tswapal(space);
1456 return 0;
1459 /* do_setsockopt() Must return target values and target errnos. */
1460 static abi_long do_setsockopt(int sockfd, int level, int optname,
1461 abi_ulong optval_addr, socklen_t optlen)
1463 abi_long ret;
1464 int val;
1465 struct ip_mreqn *ip_mreq;
1466 struct ip_mreq_source *ip_mreq_source;
1468 switch(level) {
1469 case SOL_TCP:
1470 /* TCP options all take an 'int' value. */
1471 if (optlen < sizeof(uint32_t))
1472 return -TARGET_EINVAL;
1474 if (get_user_u32(val, optval_addr))
1475 return -TARGET_EFAULT;
1476 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1477 break;
1478 case SOL_IP:
1479 switch(optname) {
1480 case IP_TOS:
1481 case IP_TTL:
1482 case IP_HDRINCL:
1483 case IP_ROUTER_ALERT:
1484 case IP_RECVOPTS:
1485 case IP_RETOPTS:
1486 case IP_PKTINFO:
1487 case IP_MTU_DISCOVER:
1488 case IP_RECVERR:
1489 case IP_RECVTOS:
1490 #ifdef IP_FREEBIND
1491 case IP_FREEBIND:
1492 #endif
1493 case IP_MULTICAST_TTL:
1494 case IP_MULTICAST_LOOP:
1495 val = 0;
1496 if (optlen >= sizeof(uint32_t)) {
1497 if (get_user_u32(val, optval_addr))
1498 return -TARGET_EFAULT;
1499 } else if (optlen >= 1) {
1500 if (get_user_u8(val, optval_addr))
1501 return -TARGET_EFAULT;
1503 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1504 break;
1505 case IP_ADD_MEMBERSHIP:
1506 case IP_DROP_MEMBERSHIP:
1507 if (optlen < sizeof (struct target_ip_mreq) ||
1508 optlen > sizeof (struct target_ip_mreqn))
1509 return -TARGET_EINVAL;
1511 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1512 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1513 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1514 break;
1516 case IP_BLOCK_SOURCE:
1517 case IP_UNBLOCK_SOURCE:
1518 case IP_ADD_SOURCE_MEMBERSHIP:
1519 case IP_DROP_SOURCE_MEMBERSHIP:
1520 if (optlen != sizeof (struct target_ip_mreq_source))
1521 return -TARGET_EINVAL;
1523 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1524 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1525 unlock_user (ip_mreq_source, optval_addr, 0);
1526 break;
1528 default:
1529 goto unimplemented;
1531 break;
1532 case SOL_IPV6:
1533 switch (optname) {
1534 case IPV6_MTU_DISCOVER:
1535 case IPV6_MTU:
1536 case IPV6_V6ONLY:
1537 case IPV6_RECVPKTINFO:
1538 val = 0;
1539 if (optlen < sizeof(uint32_t)) {
1540 return -TARGET_EINVAL;
1542 if (get_user_u32(val, optval_addr)) {
1543 return -TARGET_EFAULT;
1545 ret = get_errno(setsockopt(sockfd, level, optname,
1546 &val, sizeof(val)));
1547 break;
1548 default:
1549 goto unimplemented;
1551 break;
1552 case SOL_RAW:
1553 switch (optname) {
1554 case ICMP_FILTER:
1555 /* struct icmp_filter takes an u32 value */
1556 if (optlen < sizeof(uint32_t)) {
1557 return -TARGET_EINVAL;
1560 if (get_user_u32(val, optval_addr)) {
1561 return -TARGET_EFAULT;
1563 ret = get_errno(setsockopt(sockfd, level, optname,
1564 &val, sizeof(val)));
1565 break;
1567 default:
1568 goto unimplemented;
1570 break;
1571 case TARGET_SOL_SOCKET:
1572 switch (optname) {
1573 case TARGET_SO_RCVTIMEO:
1575 struct timeval tv;
1577 optname = SO_RCVTIMEO;
1579 set_timeout:
1580 if (optlen != sizeof(struct target_timeval)) {
1581 return -TARGET_EINVAL;
1584 if (copy_from_user_timeval(&tv, optval_addr)) {
1585 return -TARGET_EFAULT;
1588 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1589 &tv, sizeof(tv)));
1590 return ret;
1592 case TARGET_SO_SNDTIMEO:
1593 optname = SO_SNDTIMEO;
1594 goto set_timeout;
1595 case TARGET_SO_ATTACH_FILTER:
1597 struct target_sock_fprog *tfprog;
1598 struct target_sock_filter *tfilter;
1599 struct sock_fprog fprog;
1600 struct sock_filter *filter;
1601 int i;
1603 if (optlen != sizeof(*tfprog)) {
1604 return -TARGET_EINVAL;
1606 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1607 return -TARGET_EFAULT;
1609 if (!lock_user_struct(VERIFY_READ, tfilter,
1610 tswapal(tfprog->filter), 0)) {
1611 unlock_user_struct(tfprog, optval_addr, 1);
1612 return -TARGET_EFAULT;
1615 fprog.len = tswap16(tfprog->len);
1616 filter = g_try_new(struct sock_filter, fprog.len);
1617 if (filter == NULL) {
1618 unlock_user_struct(tfilter, tfprog->filter, 1);
1619 unlock_user_struct(tfprog, optval_addr, 1);
1620 return -TARGET_ENOMEM;
1622 for (i = 0; i < fprog.len; i++) {
1623 filter[i].code = tswap16(tfilter[i].code);
1624 filter[i].jt = tfilter[i].jt;
1625 filter[i].jf = tfilter[i].jf;
1626 filter[i].k = tswap32(tfilter[i].k);
1628 fprog.filter = filter;
1630 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1631 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1632 g_free(filter);
1634 unlock_user_struct(tfilter, tfprog->filter, 1);
1635 unlock_user_struct(tfprog, optval_addr, 1);
1636 return ret;
1638 case TARGET_SO_BINDTODEVICE:
1640 char *dev_ifname, *addr_ifname;
1642 if (optlen > IFNAMSIZ - 1) {
1643 optlen = IFNAMSIZ - 1;
1645 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1646 if (!dev_ifname) {
1647 return -TARGET_EFAULT;
1649 optname = SO_BINDTODEVICE;
1650 addr_ifname = alloca(IFNAMSIZ);
1651 memcpy(addr_ifname, dev_ifname, optlen);
1652 addr_ifname[optlen] = 0;
1653 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1654 addr_ifname, optlen));
1655 unlock_user (dev_ifname, optval_addr, 0);
1656 return ret;
1658 /* Options with 'int' argument. */
1659 case TARGET_SO_DEBUG:
1660 optname = SO_DEBUG;
1661 break;
1662 case TARGET_SO_REUSEADDR:
1663 optname = SO_REUSEADDR;
1664 break;
1665 case TARGET_SO_TYPE:
1666 optname = SO_TYPE;
1667 break;
1668 case TARGET_SO_ERROR:
1669 optname = SO_ERROR;
1670 break;
1671 case TARGET_SO_DONTROUTE:
1672 optname = SO_DONTROUTE;
1673 break;
1674 case TARGET_SO_BROADCAST:
1675 optname = SO_BROADCAST;
1676 break;
1677 case TARGET_SO_SNDBUF:
1678 optname = SO_SNDBUF;
1679 break;
1680 case TARGET_SO_SNDBUFFORCE:
1681 optname = SO_SNDBUFFORCE;
1682 break;
1683 case TARGET_SO_RCVBUF:
1684 optname = SO_RCVBUF;
1685 break;
1686 case TARGET_SO_RCVBUFFORCE:
1687 optname = SO_RCVBUFFORCE;
1688 break;
1689 case TARGET_SO_KEEPALIVE:
1690 optname = SO_KEEPALIVE;
1691 break;
1692 case TARGET_SO_OOBINLINE:
1693 optname = SO_OOBINLINE;
1694 break;
1695 case TARGET_SO_NO_CHECK:
1696 optname = SO_NO_CHECK;
1697 break;
1698 case TARGET_SO_PRIORITY:
1699 optname = SO_PRIORITY;
1700 break;
1701 #ifdef SO_BSDCOMPAT
1702 case TARGET_SO_BSDCOMPAT:
1703 optname = SO_BSDCOMPAT;
1704 break;
1705 #endif
1706 case TARGET_SO_PASSCRED:
1707 optname = SO_PASSCRED;
1708 break;
1709 case TARGET_SO_PASSSEC:
1710 optname = SO_PASSSEC;
1711 break;
1712 case TARGET_SO_TIMESTAMP:
1713 optname = SO_TIMESTAMP;
1714 break;
1715 case TARGET_SO_RCVLOWAT:
1716 optname = SO_RCVLOWAT;
1717 break;
1718 break;
1719 default:
1720 goto unimplemented;
1722 if (optlen < sizeof(uint32_t))
1723 return -TARGET_EINVAL;
1725 if (get_user_u32(val, optval_addr))
1726 return -TARGET_EFAULT;
1727 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1728 break;
1729 default:
1730 unimplemented:
1731 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1732 ret = -TARGET_ENOPROTOOPT;
1734 return ret;
1737 /* do_getsockopt() Must return target values and target errnos. */
1738 static abi_long do_getsockopt(int sockfd, int level, int optname,
1739 abi_ulong optval_addr, abi_ulong optlen)
1741 abi_long ret;
1742 int len, val;
1743 socklen_t lv;
1745 switch(level) {
1746 case TARGET_SOL_SOCKET:
1747 level = SOL_SOCKET;
1748 switch (optname) {
1749 /* These don't just return a single integer */
1750 case TARGET_SO_LINGER:
1751 case TARGET_SO_RCVTIMEO:
1752 case TARGET_SO_SNDTIMEO:
1753 case TARGET_SO_PEERNAME:
1754 goto unimplemented;
1755 case TARGET_SO_PEERCRED: {
1756 struct ucred cr;
1757 socklen_t crlen;
1758 struct target_ucred *tcr;
1760 if (get_user_u32(len, optlen)) {
1761 return -TARGET_EFAULT;
1763 if (len < 0) {
1764 return -TARGET_EINVAL;
1767 crlen = sizeof(cr);
1768 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1769 &cr, &crlen));
1770 if (ret < 0) {
1771 return ret;
1773 if (len > crlen) {
1774 len = crlen;
1776 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1777 return -TARGET_EFAULT;
1779 __put_user(cr.pid, &tcr->pid);
1780 __put_user(cr.uid, &tcr->uid);
1781 __put_user(cr.gid, &tcr->gid);
1782 unlock_user_struct(tcr, optval_addr, 1);
1783 if (put_user_u32(len, optlen)) {
1784 return -TARGET_EFAULT;
1786 break;
1788 /* Options with 'int' argument. */
1789 case TARGET_SO_DEBUG:
1790 optname = SO_DEBUG;
1791 goto int_case;
1792 case TARGET_SO_REUSEADDR:
1793 optname = SO_REUSEADDR;
1794 goto int_case;
1795 case TARGET_SO_TYPE:
1796 optname = SO_TYPE;
1797 goto int_case;
1798 case TARGET_SO_ERROR:
1799 optname = SO_ERROR;
1800 goto int_case;
1801 case TARGET_SO_DONTROUTE:
1802 optname = SO_DONTROUTE;
1803 goto int_case;
1804 case TARGET_SO_BROADCAST:
1805 optname = SO_BROADCAST;
1806 goto int_case;
1807 case TARGET_SO_SNDBUF:
1808 optname = SO_SNDBUF;
1809 goto int_case;
1810 case TARGET_SO_RCVBUF:
1811 optname = SO_RCVBUF;
1812 goto int_case;
1813 case TARGET_SO_KEEPALIVE:
1814 optname = SO_KEEPALIVE;
1815 goto int_case;
1816 case TARGET_SO_OOBINLINE:
1817 optname = SO_OOBINLINE;
1818 goto int_case;
1819 case TARGET_SO_NO_CHECK:
1820 optname = SO_NO_CHECK;
1821 goto int_case;
1822 case TARGET_SO_PRIORITY:
1823 optname = SO_PRIORITY;
1824 goto int_case;
1825 #ifdef SO_BSDCOMPAT
1826 case TARGET_SO_BSDCOMPAT:
1827 optname = SO_BSDCOMPAT;
1828 goto int_case;
1829 #endif
1830 case TARGET_SO_PASSCRED:
1831 optname = SO_PASSCRED;
1832 goto int_case;
1833 case TARGET_SO_TIMESTAMP:
1834 optname = SO_TIMESTAMP;
1835 goto int_case;
1836 case TARGET_SO_RCVLOWAT:
1837 optname = SO_RCVLOWAT;
1838 goto int_case;
1839 case TARGET_SO_ACCEPTCONN:
1840 optname = SO_ACCEPTCONN;
1841 goto int_case;
1842 default:
1843 goto int_case;
1845 break;
1846 case SOL_TCP:
1847 /* TCP options all take an 'int' value. */
1848 int_case:
1849 if (get_user_u32(len, optlen))
1850 return -TARGET_EFAULT;
1851 if (len < 0)
1852 return -TARGET_EINVAL;
1853 lv = sizeof(lv);
1854 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1855 if (ret < 0)
1856 return ret;
1857 if (optname == SO_TYPE) {
1858 val = host_to_target_sock_type(val);
1860 if (len > lv)
1861 len = lv;
1862 if (len == 4) {
1863 if (put_user_u32(val, optval_addr))
1864 return -TARGET_EFAULT;
1865 } else {
1866 if (put_user_u8(val, optval_addr))
1867 return -TARGET_EFAULT;
1869 if (put_user_u32(len, optlen))
1870 return -TARGET_EFAULT;
1871 break;
1872 case SOL_IP:
1873 switch(optname) {
1874 case IP_TOS:
1875 case IP_TTL:
1876 case IP_HDRINCL:
1877 case IP_ROUTER_ALERT:
1878 case IP_RECVOPTS:
1879 case IP_RETOPTS:
1880 case IP_PKTINFO:
1881 case IP_MTU_DISCOVER:
1882 case IP_RECVERR:
1883 case IP_RECVTOS:
1884 #ifdef IP_FREEBIND
1885 case IP_FREEBIND:
1886 #endif
1887 case IP_MULTICAST_TTL:
1888 case IP_MULTICAST_LOOP:
1889 if (get_user_u32(len, optlen))
1890 return -TARGET_EFAULT;
1891 if (len < 0)
1892 return -TARGET_EINVAL;
1893 lv = sizeof(lv);
1894 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1895 if (ret < 0)
1896 return ret;
1897 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1898 len = 1;
1899 if (put_user_u32(len, optlen)
1900 || put_user_u8(val, optval_addr))
1901 return -TARGET_EFAULT;
1902 } else {
1903 if (len > sizeof(int))
1904 len = sizeof(int);
1905 if (put_user_u32(len, optlen)
1906 || put_user_u32(val, optval_addr))
1907 return -TARGET_EFAULT;
1909 break;
1910 default:
1911 ret = -TARGET_ENOPROTOOPT;
1912 break;
1914 break;
1915 default:
1916 unimplemented:
1917 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1918 level, optname);
1919 ret = -TARGET_EOPNOTSUPP;
1920 break;
1922 return ret;
1925 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1926 int count, int copy)
1928 struct target_iovec *target_vec;
1929 struct iovec *vec;
1930 abi_ulong total_len, max_len;
1931 int i;
1932 int err = 0;
1933 bool bad_address = false;
1935 if (count == 0) {
1936 errno = 0;
1937 return NULL;
1939 if (count < 0 || count > IOV_MAX) {
1940 errno = EINVAL;
1941 return NULL;
1944 vec = g_try_new0(struct iovec, count);
1945 if (vec == NULL) {
1946 errno = ENOMEM;
1947 return NULL;
1950 target_vec = lock_user(VERIFY_READ, target_addr,
1951 count * sizeof(struct target_iovec), 1);
1952 if (target_vec == NULL) {
1953 err = EFAULT;
1954 goto fail2;
1957 /* ??? If host page size > target page size, this will result in a
1958 value larger than what we can actually support. */
1959 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1960 total_len = 0;
1962 for (i = 0; i < count; i++) {
1963 abi_ulong base = tswapal(target_vec[i].iov_base);
1964 abi_long len = tswapal(target_vec[i].iov_len);
1966 if (len < 0) {
1967 err = EINVAL;
1968 goto fail;
1969 } else if (len == 0) {
1970 /* Zero length pointer is ignored. */
1971 vec[i].iov_base = 0;
1972 } else {
1973 vec[i].iov_base = lock_user(type, base, len, copy);
1974 /* If the first buffer pointer is bad, this is a fault. But
1975 * subsequent bad buffers will result in a partial write; this
1976 * is realized by filling the vector with null pointers and
1977 * zero lengths. */
1978 if (!vec[i].iov_base) {
1979 if (i == 0) {
1980 err = EFAULT;
1981 goto fail;
1982 } else {
1983 bad_address = true;
1986 if (bad_address) {
1987 len = 0;
1989 if (len > max_len - total_len) {
1990 len = max_len - total_len;
1993 vec[i].iov_len = len;
1994 total_len += len;
1997 unlock_user(target_vec, target_addr, 0);
1998 return vec;
2000 fail:
2001 while (--i >= 0) {
2002 if (tswapal(target_vec[i].iov_len) > 0) {
2003 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2006 unlock_user(target_vec, target_addr, 0);
2007 fail2:
2008 g_free(vec);
2009 errno = err;
2010 return NULL;
2013 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2014 int count, int copy)
2016 struct target_iovec *target_vec;
2017 int i;
2019 target_vec = lock_user(VERIFY_READ, target_addr,
2020 count * sizeof(struct target_iovec), 1);
2021 if (target_vec) {
2022 for (i = 0; i < count; i++) {
2023 abi_ulong base = tswapal(target_vec[i].iov_base);
2024 abi_long len = tswapal(target_vec[i].iov_len);
2025 if (len < 0) {
2026 break;
2028 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2030 unlock_user(target_vec, target_addr, 0);
2033 g_free(vec);
2036 static inline int target_to_host_sock_type(int *type)
2038 int host_type = 0;
2039 int target_type = *type;
2041 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2042 case TARGET_SOCK_DGRAM:
2043 host_type = SOCK_DGRAM;
2044 break;
2045 case TARGET_SOCK_STREAM:
2046 host_type = SOCK_STREAM;
2047 break;
2048 default:
2049 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2050 break;
2052 if (target_type & TARGET_SOCK_CLOEXEC) {
2053 #if defined(SOCK_CLOEXEC)
2054 host_type |= SOCK_CLOEXEC;
2055 #else
2056 return -TARGET_EINVAL;
2057 #endif
2059 if (target_type & TARGET_SOCK_NONBLOCK) {
2060 #if defined(SOCK_NONBLOCK)
2061 host_type |= SOCK_NONBLOCK;
2062 #elif !defined(O_NONBLOCK)
2063 return -TARGET_EINVAL;
2064 #endif
2066 *type = host_type;
2067 return 0;
2070 /* Try to emulate socket type flags after socket creation. */
2071 static int sock_flags_fixup(int fd, int target_type)
2073 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2074 if (target_type & TARGET_SOCK_NONBLOCK) {
2075 int flags = fcntl(fd, F_GETFL);
2076 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2077 close(fd);
2078 return -TARGET_EINVAL;
2081 #endif
2082 return fd;
2085 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2086 abi_ulong target_addr,
2087 socklen_t len)
2089 struct sockaddr *addr = host_addr;
2090 struct target_sockaddr *target_saddr;
2092 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2093 if (!target_saddr) {
2094 return -TARGET_EFAULT;
2097 memcpy(addr, target_saddr, len);
2098 addr->sa_family = tswap16(target_saddr->sa_family);
2099 /* spkt_protocol is big-endian */
2101 unlock_user(target_saddr, target_addr, 0);
2102 return 0;
2105 static TargetFdTrans target_packet_trans = {
2106 .target_to_host_addr = packet_target_to_host_sockaddr,
2109 /* do_socket() Must return target values and target errnos. */
2110 static abi_long do_socket(int domain, int type, int protocol)
2112 int target_type = type;
2113 int ret;
2115 ret = target_to_host_sock_type(&type);
2116 if (ret) {
2117 return ret;
2120 if (domain == PF_NETLINK)
2121 return -TARGET_EAFNOSUPPORT;
2123 if (domain == AF_PACKET ||
2124 (domain == AF_INET && type == SOCK_PACKET)) {
2125 protocol = tswap16(protocol);
2128 ret = get_errno(socket(domain, type, protocol));
2129 if (ret >= 0) {
2130 ret = sock_flags_fixup(ret, target_type);
2131 if (type == SOCK_PACKET) {
2132 /* Manage an obsolete case :
2133 * if socket type is SOCK_PACKET, bind by name
2135 fd_trans_register(ret, &target_packet_trans);
2138 return ret;
2141 /* do_bind() Must return target values and target errnos. */
2142 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2143 socklen_t addrlen)
2145 void *addr;
2146 abi_long ret;
2148 if ((int)addrlen < 0) {
2149 return -TARGET_EINVAL;
2152 addr = alloca(addrlen+1);
2154 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2155 if (ret)
2156 return ret;
2158 return get_errno(bind(sockfd, addr, addrlen));
2161 /* do_connect() Must return target values and target errnos. */
2162 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2163 socklen_t addrlen)
2165 void *addr;
2166 abi_long ret;
2168 if ((int)addrlen < 0) {
2169 return -TARGET_EINVAL;
2172 addr = alloca(addrlen+1);
2174 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2175 if (ret)
2176 return ret;
2178 return get_errno(connect(sockfd, addr, addrlen));
2181 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2182 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2183 int flags, int send)
2185 abi_long ret, len;
2186 struct msghdr msg;
2187 int count;
2188 struct iovec *vec;
2189 abi_ulong target_vec;
2191 if (msgp->msg_name) {
2192 msg.msg_namelen = tswap32(msgp->msg_namelen);
2193 msg.msg_name = alloca(msg.msg_namelen+1);
2194 ret = target_to_host_sockaddr(fd, msg.msg_name,
2195 tswapal(msgp->msg_name),
2196 msg.msg_namelen);
2197 if (ret) {
2198 goto out2;
2200 } else {
2201 msg.msg_name = NULL;
2202 msg.msg_namelen = 0;
2204 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2205 msg.msg_control = alloca(msg.msg_controllen);
2206 msg.msg_flags = tswap32(msgp->msg_flags);
2208 count = tswapal(msgp->msg_iovlen);
2209 target_vec = tswapal(msgp->msg_iov);
2210 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2211 target_vec, count, send);
2212 if (vec == NULL) {
2213 ret = -host_to_target_errno(errno);
2214 goto out2;
2216 msg.msg_iovlen = count;
2217 msg.msg_iov = vec;
2219 if (send) {
2220 ret = target_to_host_cmsg(&msg, msgp);
2221 if (ret == 0)
2222 ret = get_errno(sendmsg(fd, &msg, flags));
2223 } else {
2224 ret = get_errno(recvmsg(fd, &msg, flags));
2225 if (!is_error(ret)) {
2226 len = ret;
2227 ret = host_to_target_cmsg(msgp, &msg);
2228 if (!is_error(ret)) {
2229 msgp->msg_namelen = tswap32(msg.msg_namelen);
2230 if (msg.msg_name != NULL) {
2231 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2232 msg.msg_name, msg.msg_namelen);
2233 if (ret) {
2234 goto out;
2238 ret = len;
2243 out:
2244 unlock_iovec(vec, target_vec, count, !send);
2245 out2:
2246 return ret;
2249 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2250 int flags, int send)
2252 abi_long ret;
2253 struct target_msghdr *msgp;
2255 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2256 msgp,
2257 target_msg,
2258 send ? 1 : 0)) {
2259 return -TARGET_EFAULT;
2261 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2262 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2263 return ret;
2266 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2267 * so it might not have this *mmsg-specific flag either.
2269 #ifndef MSG_WAITFORONE
2270 #define MSG_WAITFORONE 0x10000
2271 #endif
2273 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2274 unsigned int vlen, unsigned int flags,
2275 int send)
2277 struct target_mmsghdr *mmsgp;
2278 abi_long ret = 0;
2279 int i;
2281 if (vlen > UIO_MAXIOV) {
2282 vlen = UIO_MAXIOV;
2285 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2286 if (!mmsgp) {
2287 return -TARGET_EFAULT;
2290 for (i = 0; i < vlen; i++) {
2291 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2292 if (is_error(ret)) {
2293 break;
2295 mmsgp[i].msg_len = tswap32(ret);
2296 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2297 if (flags & MSG_WAITFORONE) {
2298 flags |= MSG_DONTWAIT;
2302 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2304 /* Return number of datagrams sent if we sent any at all;
2305 * otherwise return the error.
2307 if (i) {
2308 return i;
2310 return ret;
2313 /* If we don't have a system accept4() then just call accept.
2314 * The callsites to do_accept4() will ensure that they don't
2315 * pass a non-zero flags argument in this config.
2317 #ifndef CONFIG_ACCEPT4
2318 static inline int accept4(int sockfd, struct sockaddr *addr,
2319 socklen_t *addrlen, int flags)
2321 assert(flags == 0);
2322 return accept(sockfd, addr, addrlen);
2324 #endif
2326 /* do_accept4() Must return target values and target errnos. */
2327 static abi_long do_accept4(int fd, abi_ulong target_addr,
2328 abi_ulong target_addrlen_addr, int flags)
2330 socklen_t addrlen;
2331 void *addr;
2332 abi_long ret;
2333 int host_flags;
2335 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2337 if (target_addr == 0) {
2338 return get_errno(accept4(fd, NULL, NULL, host_flags));
2341 /* linux returns EINVAL if addrlen pointer is invalid */
2342 if (get_user_u32(addrlen, target_addrlen_addr))
2343 return -TARGET_EINVAL;
2345 if ((int)addrlen < 0) {
2346 return -TARGET_EINVAL;
2349 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2350 return -TARGET_EINVAL;
2352 addr = alloca(addrlen);
2354 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2355 if (!is_error(ret)) {
2356 host_to_target_sockaddr(target_addr, addr, addrlen);
2357 if (put_user_u32(addrlen, target_addrlen_addr))
2358 ret = -TARGET_EFAULT;
2360 return ret;
2363 /* do_getpeername() Must return target values and target errnos. */
2364 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2365 abi_ulong target_addrlen_addr)
2367 socklen_t addrlen;
2368 void *addr;
2369 abi_long ret;
2371 if (get_user_u32(addrlen, target_addrlen_addr))
2372 return -TARGET_EFAULT;
2374 if ((int)addrlen < 0) {
2375 return -TARGET_EINVAL;
2378 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2379 return -TARGET_EFAULT;
2381 addr = alloca(addrlen);
2383 ret = get_errno(getpeername(fd, addr, &addrlen));
2384 if (!is_error(ret)) {
2385 host_to_target_sockaddr(target_addr, addr, addrlen);
2386 if (put_user_u32(addrlen, target_addrlen_addr))
2387 ret = -TARGET_EFAULT;
2389 return ret;
2392 /* do_getsockname() Must return target values and target errnos. */
2393 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2394 abi_ulong target_addrlen_addr)
2396 socklen_t addrlen;
2397 void *addr;
2398 abi_long ret;
2400 if (get_user_u32(addrlen, target_addrlen_addr))
2401 return -TARGET_EFAULT;
2403 if ((int)addrlen < 0) {
2404 return -TARGET_EINVAL;
2407 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2408 return -TARGET_EFAULT;
2410 addr = alloca(addrlen);
2412 ret = get_errno(getsockname(fd, addr, &addrlen));
2413 if (!is_error(ret)) {
2414 host_to_target_sockaddr(target_addr, addr, addrlen);
2415 if (put_user_u32(addrlen, target_addrlen_addr))
2416 ret = -TARGET_EFAULT;
2418 return ret;
2421 /* do_socketpair() Must return target values and target errnos. */
2422 static abi_long do_socketpair(int domain, int type, int protocol,
2423 abi_ulong target_tab_addr)
2425 int tab[2];
2426 abi_long ret;
2428 target_to_host_sock_type(&type);
2430 ret = get_errno(socketpair(domain, type, protocol, tab));
2431 if (!is_error(ret)) {
2432 if (put_user_s32(tab[0], target_tab_addr)
2433 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2434 ret = -TARGET_EFAULT;
2436 return ret;
2439 /* do_sendto() Must return target values and target errnos. */
2440 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2441 abi_ulong target_addr, socklen_t addrlen)
2443 void *addr;
2444 void *host_msg;
2445 abi_long ret;
2447 if ((int)addrlen < 0) {
2448 return -TARGET_EINVAL;
2451 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2452 if (!host_msg)
2453 return -TARGET_EFAULT;
2454 if (target_addr) {
2455 addr = alloca(addrlen+1);
2456 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2457 if (ret) {
2458 unlock_user(host_msg, msg, 0);
2459 return ret;
2461 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2462 } else {
2463 ret = get_errno(send(fd, host_msg, len, flags));
2465 unlock_user(host_msg, msg, 0);
2466 return ret;
2469 /* do_recvfrom() Must return target values and target errnos. */
2470 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2471 abi_ulong target_addr,
2472 abi_ulong target_addrlen)
2474 socklen_t addrlen;
2475 void *addr;
2476 void *host_msg;
2477 abi_long ret;
2479 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2480 if (!host_msg)
2481 return -TARGET_EFAULT;
2482 if (target_addr) {
2483 if (get_user_u32(addrlen, target_addrlen)) {
2484 ret = -TARGET_EFAULT;
2485 goto fail;
2487 if ((int)addrlen < 0) {
2488 ret = -TARGET_EINVAL;
2489 goto fail;
2491 addr = alloca(addrlen);
2492 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2493 } else {
2494 addr = NULL; /* To keep compiler quiet. */
2495 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2497 if (!is_error(ret)) {
2498 if (target_addr) {
2499 host_to_target_sockaddr(target_addr, addr, addrlen);
2500 if (put_user_u32(addrlen, target_addrlen)) {
2501 ret = -TARGET_EFAULT;
2502 goto fail;
2505 unlock_user(host_msg, msg, len);
2506 } else {
2507 fail:
2508 unlock_user(host_msg, msg, 0);
2510 return ret;
2513 #ifdef TARGET_NR_socketcall
2514 /* do_socketcall() Must return target values and target errnos. */
2515 static abi_long do_socketcall(int num, abi_ulong vptr)
2517 static const unsigned ac[] = { /* number of arguments per call */
2518 [SOCKOP_socket] = 3, /* domain, type, protocol */
2519 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2520 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2521 [SOCKOP_listen] = 2, /* sockfd, backlog */
2522 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2523 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2524 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2525 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2526 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2527 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2528 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2529 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2530 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2531 [SOCKOP_shutdown] = 2, /* sockfd, how */
2532 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2533 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2534 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2535 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2536 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2537 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2539 abi_long a[6]; /* max 6 args */
2541 /* first, collect the arguments in a[] according to ac[] */
2542 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2543 unsigned i;
2544 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2545 for (i = 0; i < ac[num]; ++i) {
2546 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2547 return -TARGET_EFAULT;
2552 /* now when we have the args, actually handle the call */
2553 switch (num) {
2554 case SOCKOP_socket: /* domain, type, protocol */
2555 return do_socket(a[0], a[1], a[2]);
2556 case SOCKOP_bind: /* sockfd, addr, addrlen */
2557 return do_bind(a[0], a[1], a[2]);
2558 case SOCKOP_connect: /* sockfd, addr, addrlen */
2559 return do_connect(a[0], a[1], a[2]);
2560 case SOCKOP_listen: /* sockfd, backlog */
2561 return get_errno(listen(a[0], a[1]));
2562 case SOCKOP_accept: /* sockfd, addr, addrlen */
2563 return do_accept4(a[0], a[1], a[2], 0);
2564 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2565 return do_accept4(a[0], a[1], a[2], a[3]);
2566 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2567 return do_getsockname(a[0], a[1], a[2]);
2568 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2569 return do_getpeername(a[0], a[1], a[2]);
2570 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2571 return do_socketpair(a[0], a[1], a[2], a[3]);
2572 case SOCKOP_send: /* sockfd, msg, len, flags */
2573 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2574 case SOCKOP_recv: /* sockfd, msg, len, flags */
2575 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2576 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2577 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2578 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2579 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2580 case SOCKOP_shutdown: /* sockfd, how */
2581 return get_errno(shutdown(a[0], a[1]));
2582 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2583 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2584 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2585 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2586 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
2587 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
2588 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
2589 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
2590 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2591 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2592 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2593 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2594 default:
2595 gemu_log("Unsupported socketcall: %d\n", num);
2596 return -TARGET_ENOSYS;
2599 #endif
2601 #define N_SHM_REGIONS 32
2603 static struct shm_region {
2604 abi_ulong start;
2605 abi_ulong size;
2606 bool in_use;
2607 } shm_regions[N_SHM_REGIONS];
2609 struct target_semid_ds
2611 struct target_ipc_perm sem_perm;
2612 abi_ulong sem_otime;
2613 #if !defined(TARGET_PPC64)
2614 abi_ulong __unused1;
2615 #endif
2616 abi_ulong sem_ctime;
2617 #if !defined(TARGET_PPC64)
2618 abi_ulong __unused2;
2619 #endif
2620 abi_ulong sem_nsems;
2621 abi_ulong __unused3;
2622 abi_ulong __unused4;
2625 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2626 abi_ulong target_addr)
2628 struct target_ipc_perm *target_ip;
2629 struct target_semid_ds *target_sd;
2631 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2632 return -TARGET_EFAULT;
2633 target_ip = &(target_sd->sem_perm);
2634 host_ip->__key = tswap32(target_ip->__key);
2635 host_ip->uid = tswap32(target_ip->uid);
2636 host_ip->gid = tswap32(target_ip->gid);
2637 host_ip->cuid = tswap32(target_ip->cuid);
2638 host_ip->cgid = tswap32(target_ip->cgid);
2639 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2640 host_ip->mode = tswap32(target_ip->mode);
2641 #else
2642 host_ip->mode = tswap16(target_ip->mode);
2643 #endif
2644 #if defined(TARGET_PPC)
2645 host_ip->__seq = tswap32(target_ip->__seq);
2646 #else
2647 host_ip->__seq = tswap16(target_ip->__seq);
2648 #endif
2649 unlock_user_struct(target_sd, target_addr, 0);
2650 return 0;
2653 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2654 struct ipc_perm *host_ip)
2656 struct target_ipc_perm *target_ip;
2657 struct target_semid_ds *target_sd;
2659 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2660 return -TARGET_EFAULT;
2661 target_ip = &(target_sd->sem_perm);
2662 target_ip->__key = tswap32(host_ip->__key);
2663 target_ip->uid = tswap32(host_ip->uid);
2664 target_ip->gid = tswap32(host_ip->gid);
2665 target_ip->cuid = tswap32(host_ip->cuid);
2666 target_ip->cgid = tswap32(host_ip->cgid);
2667 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2668 target_ip->mode = tswap32(host_ip->mode);
2669 #else
2670 target_ip->mode = tswap16(host_ip->mode);
2671 #endif
2672 #if defined(TARGET_PPC)
2673 target_ip->__seq = tswap32(host_ip->__seq);
2674 #else
2675 target_ip->__seq = tswap16(host_ip->__seq);
2676 #endif
2677 unlock_user_struct(target_sd, target_addr, 1);
2678 return 0;
2681 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2682 abi_ulong target_addr)
2684 struct target_semid_ds *target_sd;
2686 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2687 return -TARGET_EFAULT;
2688 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2689 return -TARGET_EFAULT;
2690 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2691 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2692 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2693 unlock_user_struct(target_sd, target_addr, 0);
2694 return 0;
2697 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2698 struct semid_ds *host_sd)
2700 struct target_semid_ds *target_sd;
2702 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2703 return -TARGET_EFAULT;
2704 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2705 return -TARGET_EFAULT;
2706 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2707 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2708 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2709 unlock_user_struct(target_sd, target_addr, 1);
2710 return 0;
2713 struct target_seminfo {
2714 int semmap;
2715 int semmni;
2716 int semmns;
2717 int semmnu;
2718 int semmsl;
2719 int semopm;
2720 int semume;
2721 int semusz;
2722 int semvmx;
2723 int semaem;
2726 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2727 struct seminfo *host_seminfo)
2729 struct target_seminfo *target_seminfo;
2730 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2731 return -TARGET_EFAULT;
2732 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2733 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2734 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2735 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2736 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2737 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2738 __put_user(host_seminfo->semume, &target_seminfo->semume);
2739 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2740 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2741 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2742 unlock_user_struct(target_seminfo, target_addr, 1);
2743 return 0;
2746 union semun {
2747 int val;
2748 struct semid_ds *buf;
2749 unsigned short *array;
2750 struct seminfo *__buf;
2753 union target_semun {
2754 int val;
2755 abi_ulong buf;
2756 abi_ulong array;
2757 abi_ulong __buf;
2760 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2761 abi_ulong target_addr)
2763 int nsems;
2764 unsigned short *array;
2765 union semun semun;
2766 struct semid_ds semid_ds;
2767 int i, ret;
2769 semun.buf = &semid_ds;
2771 ret = semctl(semid, 0, IPC_STAT, semun);
2772 if (ret == -1)
2773 return get_errno(ret);
2775 nsems = semid_ds.sem_nsems;
2777 *host_array = g_try_new(unsigned short, nsems);
2778 if (!*host_array) {
2779 return -TARGET_ENOMEM;
2781 array = lock_user(VERIFY_READ, target_addr,
2782 nsems*sizeof(unsigned short), 1);
2783 if (!array) {
2784 g_free(*host_array);
2785 return -TARGET_EFAULT;
2788 for(i=0; i<nsems; i++) {
2789 __get_user((*host_array)[i], &array[i]);
2791 unlock_user(array, target_addr, 0);
2793 return 0;
2796 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2797 unsigned short **host_array)
2799 int nsems;
2800 unsigned short *array;
2801 union semun semun;
2802 struct semid_ds semid_ds;
2803 int i, ret;
2805 semun.buf = &semid_ds;
2807 ret = semctl(semid, 0, IPC_STAT, semun);
2808 if (ret == -1)
2809 return get_errno(ret);
2811 nsems = semid_ds.sem_nsems;
2813 array = lock_user(VERIFY_WRITE, target_addr,
2814 nsems*sizeof(unsigned short), 0);
2815 if (!array)
2816 return -TARGET_EFAULT;
2818 for(i=0; i<nsems; i++) {
2819 __put_user((*host_array)[i], &array[i]);
2821 g_free(*host_array);
2822 unlock_user(array, target_addr, 1);
2824 return 0;
2827 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2828 abi_ulong target_arg)
2830 union target_semun target_su = { .buf = target_arg };
2831 union semun arg;
2832 struct semid_ds dsarg;
2833 unsigned short *array = NULL;
2834 struct seminfo seminfo;
2835 abi_long ret = -TARGET_EINVAL;
2836 abi_long err;
2837 cmd &= 0xff;
2839 switch( cmd ) {
2840 case GETVAL:
2841 case SETVAL:
2842 /* In 64 bit cross-endian situations, we will erroneously pick up
2843 * the wrong half of the union for the "val" element. To rectify
2844 * this, the entire 8-byte structure is byteswapped, followed by
2845 * a swap of the 4 byte val field. In other cases, the data is
2846 * already in proper host byte order. */
2847 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2848 target_su.buf = tswapal(target_su.buf);
2849 arg.val = tswap32(target_su.val);
2850 } else {
2851 arg.val = target_su.val;
2853 ret = get_errno(semctl(semid, semnum, cmd, arg));
2854 break;
2855 case GETALL:
2856 case SETALL:
2857 err = target_to_host_semarray(semid, &array, target_su.array);
2858 if (err)
2859 return err;
2860 arg.array = array;
2861 ret = get_errno(semctl(semid, semnum, cmd, arg));
2862 err = host_to_target_semarray(semid, target_su.array, &array);
2863 if (err)
2864 return err;
2865 break;
2866 case IPC_STAT:
2867 case IPC_SET:
2868 case SEM_STAT:
2869 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2870 if (err)
2871 return err;
2872 arg.buf = &dsarg;
2873 ret = get_errno(semctl(semid, semnum, cmd, arg));
2874 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2875 if (err)
2876 return err;
2877 break;
2878 case IPC_INFO:
2879 case SEM_INFO:
2880 arg.__buf = &seminfo;
2881 ret = get_errno(semctl(semid, semnum, cmd, arg));
2882 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2883 if (err)
2884 return err;
2885 break;
2886 case IPC_RMID:
2887 case GETPID:
2888 case GETNCNT:
2889 case GETZCNT:
2890 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2891 break;
2894 return ret;
2897 struct target_sembuf {
2898 unsigned short sem_num;
2899 short sem_op;
2900 short sem_flg;
2903 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2904 abi_ulong target_addr,
2905 unsigned nsops)
2907 struct target_sembuf *target_sembuf;
2908 int i;
2910 target_sembuf = lock_user(VERIFY_READ, target_addr,
2911 nsops*sizeof(struct target_sembuf), 1);
2912 if (!target_sembuf)
2913 return -TARGET_EFAULT;
2915 for(i=0; i<nsops; i++) {
2916 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2917 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2918 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2921 unlock_user(target_sembuf, target_addr, 0);
2923 return 0;
2926 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2928 struct sembuf sops[nsops];
2930 if (target_to_host_sembuf(sops, ptr, nsops))
2931 return -TARGET_EFAULT;
2933 return get_errno(semop(semid, sops, nsops));
2936 struct target_msqid_ds
2938 struct target_ipc_perm msg_perm;
2939 abi_ulong msg_stime;
2940 #if TARGET_ABI_BITS == 32
2941 abi_ulong __unused1;
2942 #endif
2943 abi_ulong msg_rtime;
2944 #if TARGET_ABI_BITS == 32
2945 abi_ulong __unused2;
2946 #endif
2947 abi_ulong msg_ctime;
2948 #if TARGET_ABI_BITS == 32
2949 abi_ulong __unused3;
2950 #endif
2951 abi_ulong __msg_cbytes;
2952 abi_ulong msg_qnum;
2953 abi_ulong msg_qbytes;
2954 abi_ulong msg_lspid;
2955 abi_ulong msg_lrpid;
2956 abi_ulong __unused4;
2957 abi_ulong __unused5;
2960 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2961 abi_ulong target_addr)
2963 struct target_msqid_ds *target_md;
2965 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2966 return -TARGET_EFAULT;
2967 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2968 return -TARGET_EFAULT;
2969 host_md->msg_stime = tswapal(target_md->msg_stime);
2970 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2971 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2972 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2973 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2974 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2975 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2976 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2977 unlock_user_struct(target_md, target_addr, 0);
2978 return 0;
2981 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2982 struct msqid_ds *host_md)
2984 struct target_msqid_ds *target_md;
2986 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2987 return -TARGET_EFAULT;
2988 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2989 return -TARGET_EFAULT;
2990 target_md->msg_stime = tswapal(host_md->msg_stime);
2991 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2992 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2993 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2994 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2995 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2996 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2997 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2998 unlock_user_struct(target_md, target_addr, 1);
2999 return 0;
3002 struct target_msginfo {
3003 int msgpool;
3004 int msgmap;
3005 int msgmax;
3006 int msgmnb;
3007 int msgmni;
3008 int msgssz;
3009 int msgtql;
3010 unsigned short int msgseg;
3013 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3014 struct msginfo *host_msginfo)
3016 struct target_msginfo *target_msginfo;
3017 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3018 return -TARGET_EFAULT;
3019 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3020 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3021 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3022 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3023 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3024 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3025 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3026 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3027 unlock_user_struct(target_msginfo, target_addr, 1);
3028 return 0;
3031 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3033 struct msqid_ds dsarg;
3034 struct msginfo msginfo;
3035 abi_long ret = -TARGET_EINVAL;
3037 cmd &= 0xff;
3039 switch (cmd) {
3040 case IPC_STAT:
3041 case IPC_SET:
3042 case MSG_STAT:
3043 if (target_to_host_msqid_ds(&dsarg,ptr))
3044 return -TARGET_EFAULT;
3045 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3046 if (host_to_target_msqid_ds(ptr,&dsarg))
3047 return -TARGET_EFAULT;
3048 break;
3049 case IPC_RMID:
3050 ret = get_errno(msgctl(msgid, cmd, NULL));
3051 break;
3052 case IPC_INFO:
3053 case MSG_INFO:
3054 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3055 if (host_to_target_msginfo(ptr, &msginfo))
3056 return -TARGET_EFAULT;
3057 break;
3060 return ret;
3063 struct target_msgbuf {
3064 abi_long mtype;
3065 char mtext[1];
3068 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3069 ssize_t msgsz, int msgflg)
3071 struct target_msgbuf *target_mb;
3072 struct msgbuf *host_mb;
3073 abi_long ret = 0;
3075 if (msgsz < 0) {
3076 return -TARGET_EINVAL;
3079 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3080 return -TARGET_EFAULT;
3081 host_mb = g_try_malloc(msgsz + sizeof(long));
3082 if (!host_mb) {
3083 unlock_user_struct(target_mb, msgp, 0);
3084 return -TARGET_ENOMEM;
3086 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3087 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3088 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3089 g_free(host_mb);
3090 unlock_user_struct(target_mb, msgp, 0);
3092 return ret;
3095 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3096 unsigned int msgsz, abi_long msgtyp,
3097 int msgflg)
3099 struct target_msgbuf *target_mb;
3100 char *target_mtext;
3101 struct msgbuf *host_mb;
3102 abi_long ret = 0;
3104 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3105 return -TARGET_EFAULT;
3107 host_mb = g_malloc(msgsz+sizeof(long));
3108 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3110 if (ret > 0) {
3111 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3112 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3113 if (!target_mtext) {
3114 ret = -TARGET_EFAULT;
3115 goto end;
3117 memcpy(target_mb->mtext, host_mb->mtext, ret);
3118 unlock_user(target_mtext, target_mtext_addr, ret);
3121 target_mb->mtype = tswapal(host_mb->mtype);
3123 end:
3124 if (target_mb)
3125 unlock_user_struct(target_mb, msgp, 1);
3126 g_free(host_mb);
3127 return ret;
3130 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3131 abi_ulong target_addr)
3133 struct target_shmid_ds *target_sd;
3135 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3136 return -TARGET_EFAULT;
3137 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3138 return -TARGET_EFAULT;
3139 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3140 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3141 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3142 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3143 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3144 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3145 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3146 unlock_user_struct(target_sd, target_addr, 0);
3147 return 0;
3150 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3151 struct shmid_ds *host_sd)
3153 struct target_shmid_ds *target_sd;
3155 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3156 return -TARGET_EFAULT;
3157 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3158 return -TARGET_EFAULT;
3159 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3160 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3161 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3162 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3163 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3164 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3165 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3166 unlock_user_struct(target_sd, target_addr, 1);
3167 return 0;
3170 struct target_shminfo {
3171 abi_ulong shmmax;
3172 abi_ulong shmmin;
3173 abi_ulong shmmni;
3174 abi_ulong shmseg;
3175 abi_ulong shmall;
3178 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3179 struct shminfo *host_shminfo)
3181 struct target_shminfo *target_shminfo;
3182 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3183 return -TARGET_EFAULT;
3184 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3185 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3186 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3187 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3188 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3189 unlock_user_struct(target_shminfo, target_addr, 1);
3190 return 0;
3193 struct target_shm_info {
3194 int used_ids;
3195 abi_ulong shm_tot;
3196 abi_ulong shm_rss;
3197 abi_ulong shm_swp;
3198 abi_ulong swap_attempts;
3199 abi_ulong swap_successes;
3202 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3203 struct shm_info *host_shm_info)
3205 struct target_shm_info *target_shm_info;
3206 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3207 return -TARGET_EFAULT;
3208 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3209 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3210 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3211 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3212 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3213 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3214 unlock_user_struct(target_shm_info, target_addr, 1);
3215 return 0;
3218 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3220 struct shmid_ds dsarg;
3221 struct shminfo shminfo;
3222 struct shm_info shm_info;
3223 abi_long ret = -TARGET_EINVAL;
3225 cmd &= 0xff;
3227 switch(cmd) {
3228 case IPC_STAT:
3229 case IPC_SET:
3230 case SHM_STAT:
3231 if (target_to_host_shmid_ds(&dsarg, buf))
3232 return -TARGET_EFAULT;
3233 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3234 if (host_to_target_shmid_ds(buf, &dsarg))
3235 return -TARGET_EFAULT;
3236 break;
3237 case IPC_INFO:
3238 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3239 if (host_to_target_shminfo(buf, &shminfo))
3240 return -TARGET_EFAULT;
3241 break;
3242 case SHM_INFO:
3243 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3244 if (host_to_target_shm_info(buf, &shm_info))
3245 return -TARGET_EFAULT;
3246 break;
3247 case IPC_RMID:
3248 case SHM_LOCK:
3249 case SHM_UNLOCK:
3250 ret = get_errno(shmctl(shmid, cmd, NULL));
3251 break;
3254 return ret;
3257 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3259 abi_long raddr;
3260 void *host_raddr;
3261 struct shmid_ds shm_info;
3262 int i,ret;
3264 /* find out the length of the shared memory segment */
3265 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3266 if (is_error(ret)) {
3267 /* can't get length, bail out */
3268 return ret;
3271 mmap_lock();
3273 if (shmaddr)
3274 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3275 else {
3276 abi_ulong mmap_start;
3278 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3280 if (mmap_start == -1) {
3281 errno = ENOMEM;
3282 host_raddr = (void *)-1;
3283 } else
3284 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3287 if (host_raddr == (void *)-1) {
3288 mmap_unlock();
3289 return get_errno((long)host_raddr);
3291 raddr=h2g((unsigned long)host_raddr);
3293 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3294 PAGE_VALID | PAGE_READ |
3295 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3297 for (i = 0; i < N_SHM_REGIONS; i++) {
3298 if (!shm_regions[i].in_use) {
3299 shm_regions[i].in_use = true;
3300 shm_regions[i].start = raddr;
3301 shm_regions[i].size = shm_info.shm_segsz;
3302 break;
3306 mmap_unlock();
3307 return raddr;
3311 static inline abi_long do_shmdt(abi_ulong shmaddr)
3313 int i;
3315 for (i = 0; i < N_SHM_REGIONS; ++i) {
3316 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3317 shm_regions[i].in_use = false;
3318 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3319 break;
3323 return get_errno(shmdt(g2h(shmaddr)));
3326 #ifdef TARGET_NR_ipc
3327 /* ??? This only works with linear mappings. */
3328 /* do_ipc() must return target values and target errnos. */
3329 static abi_long do_ipc(unsigned int call, abi_long first,
3330 abi_long second, abi_long third,
3331 abi_long ptr, abi_long fifth)
3333 int version;
3334 abi_long ret = 0;
3336 version = call >> 16;
3337 call &= 0xffff;
3339 switch (call) {
3340 case IPCOP_semop:
3341 ret = do_semop(first, ptr, second);
3342 break;
3344 case IPCOP_semget:
3345 ret = get_errno(semget(first, second, third));
3346 break;
3348 case IPCOP_semctl: {
3349 /* The semun argument to semctl is passed by value, so dereference the
3350 * ptr argument. */
3351 abi_ulong atptr;
3352 get_user_ual(atptr, ptr);
3353 ret = do_semctl(first, second, third, atptr);
3354 break;
3357 case IPCOP_msgget:
3358 ret = get_errno(msgget(first, second));
3359 break;
3361 case IPCOP_msgsnd:
3362 ret = do_msgsnd(first, ptr, second, third);
3363 break;
3365 case IPCOP_msgctl:
3366 ret = do_msgctl(first, second, ptr);
3367 break;
3369 case IPCOP_msgrcv:
3370 switch (version) {
3371 case 0:
3373 struct target_ipc_kludge {
3374 abi_long msgp;
3375 abi_long msgtyp;
3376 } *tmp;
3378 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3379 ret = -TARGET_EFAULT;
3380 break;
3383 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3385 unlock_user_struct(tmp, ptr, 0);
3386 break;
3388 default:
3389 ret = do_msgrcv(first, ptr, second, fifth, third);
3391 break;
3393 case IPCOP_shmat:
3394 switch (version) {
3395 default:
3397 abi_ulong raddr;
3398 raddr = do_shmat(first, ptr, second);
3399 if (is_error(raddr))
3400 return get_errno(raddr);
3401 if (put_user_ual(raddr, third))
3402 return -TARGET_EFAULT;
3403 break;
3405 case 1:
3406 ret = -TARGET_EINVAL;
3407 break;
3409 break;
3410 case IPCOP_shmdt:
3411 ret = do_shmdt(ptr);
3412 break;
3414 case IPCOP_shmget:
3415 /* IPC_* flag values are the same on all linux platforms */
3416 ret = get_errno(shmget(first, second, third));
3417 break;
3419 /* IPC_* and SHM_* command values are the same on all linux platforms */
3420 case IPCOP_shmctl:
3421 ret = do_shmctl(first, second, ptr);
3422 break;
3423 default:
3424 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3425 ret = -TARGET_ENOSYS;
3426 break;
3428 return ret;
3430 #endif
3432 /* kernel structure types definitions */
3434 #define STRUCT(name, ...) STRUCT_ ## name,
3435 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3436 enum {
3437 #include "syscall_types.h"
3438 STRUCT_MAX
3440 #undef STRUCT
3441 #undef STRUCT_SPECIAL
3443 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3444 #define STRUCT_SPECIAL(name)
3445 #include "syscall_types.h"
3446 #undef STRUCT
3447 #undef STRUCT_SPECIAL
3449 typedef struct IOCTLEntry IOCTLEntry;
3451 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3452 int fd, int cmd, abi_long arg);
3454 struct IOCTLEntry {
3455 int target_cmd;
3456 unsigned int host_cmd;
3457 const char *name;
3458 int access;
3459 do_ioctl_fn *do_ioctl;
3460 const argtype arg_type[5];
3463 #define IOC_R 0x0001
3464 #define IOC_W 0x0002
3465 #define IOC_RW (IOC_R | IOC_W)
3467 #define MAX_STRUCT_SIZE 4096
3469 #ifdef CONFIG_FIEMAP
3470 /* So fiemap access checks don't overflow on 32 bit systems.
3471 * This is very slightly smaller than the limit imposed by
3472 * the underlying kernel.
3474 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3475 / sizeof(struct fiemap_extent))
3477 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3478 int fd, int cmd, abi_long arg)
3480 /* The parameter for this ioctl is a struct fiemap followed
3481 * by an array of struct fiemap_extent whose size is set
3482 * in fiemap->fm_extent_count. The array is filled in by the
3483 * ioctl.
3485 int target_size_in, target_size_out;
3486 struct fiemap *fm;
3487 const argtype *arg_type = ie->arg_type;
3488 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3489 void *argptr, *p;
3490 abi_long ret;
3491 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3492 uint32_t outbufsz;
3493 int free_fm = 0;
3495 assert(arg_type[0] == TYPE_PTR);
3496 assert(ie->access == IOC_RW);
3497 arg_type++;
3498 target_size_in = thunk_type_size(arg_type, 0);
3499 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3500 if (!argptr) {
3501 return -TARGET_EFAULT;
3503 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3504 unlock_user(argptr, arg, 0);
3505 fm = (struct fiemap *)buf_temp;
3506 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3507 return -TARGET_EINVAL;
3510 outbufsz = sizeof (*fm) +
3511 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3513 if (outbufsz > MAX_STRUCT_SIZE) {
3514 /* We can't fit all the extents into the fixed size buffer.
3515 * Allocate one that is large enough and use it instead.
3517 fm = g_try_malloc(outbufsz);
3518 if (!fm) {
3519 return -TARGET_ENOMEM;
3521 memcpy(fm, buf_temp, sizeof(struct fiemap));
3522 free_fm = 1;
3524 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3525 if (!is_error(ret)) {
3526 target_size_out = target_size_in;
3527 /* An extent_count of 0 means we were only counting the extents
3528 * so there are no structs to copy
3530 if (fm->fm_extent_count != 0) {
3531 target_size_out += fm->fm_mapped_extents * extent_size;
3533 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3534 if (!argptr) {
3535 ret = -TARGET_EFAULT;
3536 } else {
3537 /* Convert the struct fiemap */
3538 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3539 if (fm->fm_extent_count != 0) {
3540 p = argptr + target_size_in;
3541 /* ...and then all the struct fiemap_extents */
3542 for (i = 0; i < fm->fm_mapped_extents; i++) {
3543 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3544 THUNK_TARGET);
3545 p += extent_size;
3548 unlock_user(argptr, arg, target_size_out);
3551 if (free_fm) {
3552 g_free(fm);
3554 return ret;
3556 #endif
3558 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3559 int fd, int cmd, abi_long arg)
3561 const argtype *arg_type = ie->arg_type;
3562 int target_size;
3563 void *argptr;
3564 int ret;
3565 struct ifconf *host_ifconf;
3566 uint32_t outbufsz;
3567 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3568 int target_ifreq_size;
3569 int nb_ifreq;
3570 int free_buf = 0;
3571 int i;
3572 int target_ifc_len;
3573 abi_long target_ifc_buf;
3574 int host_ifc_len;
3575 char *host_ifc_buf;
3577 assert(arg_type[0] == TYPE_PTR);
3578 assert(ie->access == IOC_RW);
3580 arg_type++;
3581 target_size = thunk_type_size(arg_type, 0);
3583 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3584 if (!argptr)
3585 return -TARGET_EFAULT;
3586 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3587 unlock_user(argptr, arg, 0);
3589 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3590 target_ifc_len = host_ifconf->ifc_len;
3591 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3593 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3594 nb_ifreq = target_ifc_len / target_ifreq_size;
3595 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3597 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3598 if (outbufsz > MAX_STRUCT_SIZE) {
3599 /* We can't fit all the extents into the fixed size buffer.
3600 * Allocate one that is large enough and use it instead.
3602 host_ifconf = malloc(outbufsz);
3603 if (!host_ifconf) {
3604 return -TARGET_ENOMEM;
3606 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3607 free_buf = 1;
3609 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3611 host_ifconf->ifc_len = host_ifc_len;
3612 host_ifconf->ifc_buf = host_ifc_buf;
3614 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3615 if (!is_error(ret)) {
3616 /* convert host ifc_len to target ifc_len */
3618 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3619 target_ifc_len = nb_ifreq * target_ifreq_size;
3620 host_ifconf->ifc_len = target_ifc_len;
3622 /* restore target ifc_buf */
3624 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3626 /* copy struct ifconf to target user */
3628 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3629 if (!argptr)
3630 return -TARGET_EFAULT;
3631 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3632 unlock_user(argptr, arg, target_size);
3634 /* copy ifreq[] to target user */
3636 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3637 for (i = 0; i < nb_ifreq ; i++) {
3638 thunk_convert(argptr + i * target_ifreq_size,
3639 host_ifc_buf + i * sizeof(struct ifreq),
3640 ifreq_arg_type, THUNK_TARGET);
3642 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3645 if (free_buf) {
3646 free(host_ifconf);
3649 return ret;
3652 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3653 int cmd, abi_long arg)
3655 void *argptr;
3656 struct dm_ioctl *host_dm;
3657 abi_long guest_data;
3658 uint32_t guest_data_size;
3659 int target_size;
3660 const argtype *arg_type = ie->arg_type;
3661 abi_long ret;
3662 void *big_buf = NULL;
3663 char *host_data;
3665 arg_type++;
3666 target_size = thunk_type_size(arg_type, 0);
3667 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3668 if (!argptr) {
3669 ret = -TARGET_EFAULT;
3670 goto out;
3672 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3673 unlock_user(argptr, arg, 0);
3675 /* buf_temp is too small, so fetch things into a bigger buffer */
3676 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3677 memcpy(big_buf, buf_temp, target_size);
3678 buf_temp = big_buf;
3679 host_dm = big_buf;
3681 guest_data = arg + host_dm->data_start;
3682 if ((guest_data - arg) < 0) {
3683 ret = -EINVAL;
3684 goto out;
3686 guest_data_size = host_dm->data_size - host_dm->data_start;
3687 host_data = (char*)host_dm + host_dm->data_start;
3689 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3690 switch (ie->host_cmd) {
3691 case DM_REMOVE_ALL:
3692 case DM_LIST_DEVICES:
3693 case DM_DEV_CREATE:
3694 case DM_DEV_REMOVE:
3695 case DM_DEV_SUSPEND:
3696 case DM_DEV_STATUS:
3697 case DM_DEV_WAIT:
3698 case DM_TABLE_STATUS:
3699 case DM_TABLE_CLEAR:
3700 case DM_TABLE_DEPS:
3701 case DM_LIST_VERSIONS:
3702 /* no input data */
3703 break;
3704 case DM_DEV_RENAME:
3705 case DM_DEV_SET_GEOMETRY:
3706 /* data contains only strings */
3707 memcpy(host_data, argptr, guest_data_size);
3708 break;
3709 case DM_TARGET_MSG:
3710 memcpy(host_data, argptr, guest_data_size);
3711 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3712 break;
3713 case DM_TABLE_LOAD:
3715 void *gspec = argptr;
3716 void *cur_data = host_data;
3717 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3718 int spec_size = thunk_type_size(arg_type, 0);
3719 int i;
3721 for (i = 0; i < host_dm->target_count; i++) {
3722 struct dm_target_spec *spec = cur_data;
3723 uint32_t next;
3724 int slen;
3726 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3727 slen = strlen((char*)gspec + spec_size) + 1;
3728 next = spec->next;
3729 spec->next = sizeof(*spec) + slen;
3730 strcpy((char*)&spec[1], gspec + spec_size);
3731 gspec += next;
3732 cur_data += spec->next;
3734 break;
3736 default:
3737 ret = -TARGET_EINVAL;
3738 unlock_user(argptr, guest_data, 0);
3739 goto out;
3741 unlock_user(argptr, guest_data, 0);
3743 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3744 if (!is_error(ret)) {
3745 guest_data = arg + host_dm->data_start;
3746 guest_data_size = host_dm->data_size - host_dm->data_start;
3747 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3748 switch (ie->host_cmd) {
3749 case DM_REMOVE_ALL:
3750 case DM_DEV_CREATE:
3751 case DM_DEV_REMOVE:
3752 case DM_DEV_RENAME:
3753 case DM_DEV_SUSPEND:
3754 case DM_DEV_STATUS:
3755 case DM_TABLE_LOAD:
3756 case DM_TABLE_CLEAR:
3757 case DM_TARGET_MSG:
3758 case DM_DEV_SET_GEOMETRY:
3759 /* no return data */
3760 break;
3761 case DM_LIST_DEVICES:
3763 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3764 uint32_t remaining_data = guest_data_size;
3765 void *cur_data = argptr;
3766 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3767 int nl_size = 12; /* can't use thunk_size due to alignment */
3769 while (1) {
3770 uint32_t next = nl->next;
3771 if (next) {
3772 nl->next = nl_size + (strlen(nl->name) + 1);
3774 if (remaining_data < nl->next) {
3775 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3776 break;
3778 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3779 strcpy(cur_data + nl_size, nl->name);
3780 cur_data += nl->next;
3781 remaining_data -= nl->next;
3782 if (!next) {
3783 break;
3785 nl = (void*)nl + next;
3787 break;
3789 case DM_DEV_WAIT:
3790 case DM_TABLE_STATUS:
3792 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3793 void *cur_data = argptr;
3794 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3795 int spec_size = thunk_type_size(arg_type, 0);
3796 int i;
3798 for (i = 0; i < host_dm->target_count; i++) {
3799 uint32_t next = spec->next;
3800 int slen = strlen((char*)&spec[1]) + 1;
3801 spec->next = (cur_data - argptr) + spec_size + slen;
3802 if (guest_data_size < spec->next) {
3803 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3804 break;
3806 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3807 strcpy(cur_data + spec_size, (char*)&spec[1]);
3808 cur_data = argptr + spec->next;
3809 spec = (void*)host_dm + host_dm->data_start + next;
3811 break;
3813 case DM_TABLE_DEPS:
3815 void *hdata = (void*)host_dm + host_dm->data_start;
3816 int count = *(uint32_t*)hdata;
3817 uint64_t *hdev = hdata + 8;
3818 uint64_t *gdev = argptr + 8;
3819 int i;
3821 *(uint32_t*)argptr = tswap32(count);
3822 for (i = 0; i < count; i++) {
3823 *gdev = tswap64(*hdev);
3824 gdev++;
3825 hdev++;
3827 break;
3829 case DM_LIST_VERSIONS:
3831 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3832 uint32_t remaining_data = guest_data_size;
3833 void *cur_data = argptr;
3834 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3835 int vers_size = thunk_type_size(arg_type, 0);
3837 while (1) {
3838 uint32_t next = vers->next;
3839 if (next) {
3840 vers->next = vers_size + (strlen(vers->name) + 1);
3842 if (remaining_data < vers->next) {
3843 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3844 break;
3846 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3847 strcpy(cur_data + vers_size, vers->name);
3848 cur_data += vers->next;
3849 remaining_data -= vers->next;
3850 if (!next) {
3851 break;
3853 vers = (void*)vers + next;
3855 break;
3857 default:
3858 unlock_user(argptr, guest_data, 0);
3859 ret = -TARGET_EINVAL;
3860 goto out;
3862 unlock_user(argptr, guest_data, guest_data_size);
3864 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3865 if (!argptr) {
3866 ret = -TARGET_EFAULT;
3867 goto out;
3869 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3870 unlock_user(argptr, arg, target_size);
3872 out:
3873 g_free(big_buf);
3874 return ret;
3877 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3878 int cmd, abi_long arg)
3880 void *argptr;
3881 int target_size;
3882 const argtype *arg_type = ie->arg_type;
3883 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3884 abi_long ret;
3886 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3887 struct blkpg_partition host_part;
3889 /* Read and convert blkpg */
3890 arg_type++;
3891 target_size = thunk_type_size(arg_type, 0);
3892 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3893 if (!argptr) {
3894 ret = -TARGET_EFAULT;
3895 goto out;
3897 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3898 unlock_user(argptr, arg, 0);
3900 switch (host_blkpg->op) {
3901 case BLKPG_ADD_PARTITION:
3902 case BLKPG_DEL_PARTITION:
3903 /* payload is struct blkpg_partition */
3904 break;
3905 default:
3906 /* Unknown opcode */
3907 ret = -TARGET_EINVAL;
3908 goto out;
3911 /* Read and convert blkpg->data */
3912 arg = (abi_long)(uintptr_t)host_blkpg->data;
3913 target_size = thunk_type_size(part_arg_type, 0);
3914 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3915 if (!argptr) {
3916 ret = -TARGET_EFAULT;
3917 goto out;
3919 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3920 unlock_user(argptr, arg, 0);
3922 /* Swizzle the data pointer to our local copy and call! */
3923 host_blkpg->data = &host_part;
3924 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3926 out:
3927 return ret;
3930 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3931 int fd, int cmd, abi_long arg)
3933 const argtype *arg_type = ie->arg_type;
3934 const StructEntry *se;
3935 const argtype *field_types;
3936 const int *dst_offsets, *src_offsets;
3937 int target_size;
3938 void *argptr;
3939 abi_ulong *target_rt_dev_ptr;
3940 unsigned long *host_rt_dev_ptr;
3941 abi_long ret;
3942 int i;
3944 assert(ie->access == IOC_W);
3945 assert(*arg_type == TYPE_PTR);
3946 arg_type++;
3947 assert(*arg_type == TYPE_STRUCT);
3948 target_size = thunk_type_size(arg_type, 0);
3949 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3950 if (!argptr) {
3951 return -TARGET_EFAULT;
3953 arg_type++;
3954 assert(*arg_type == (int)STRUCT_rtentry);
3955 se = struct_entries + *arg_type++;
3956 assert(se->convert[0] == NULL);
3957 /* convert struct here to be able to catch rt_dev string */
3958 field_types = se->field_types;
3959 dst_offsets = se->field_offsets[THUNK_HOST];
3960 src_offsets = se->field_offsets[THUNK_TARGET];
3961 for (i = 0; i < se->nb_fields; i++) {
3962 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3963 assert(*field_types == TYPE_PTRVOID);
3964 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3965 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3966 if (*target_rt_dev_ptr != 0) {
3967 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3968 tswapal(*target_rt_dev_ptr));
3969 if (!*host_rt_dev_ptr) {
3970 unlock_user(argptr, arg, 0);
3971 return -TARGET_EFAULT;
3973 } else {
3974 *host_rt_dev_ptr = 0;
3976 field_types++;
3977 continue;
3979 field_types = thunk_convert(buf_temp + dst_offsets[i],
3980 argptr + src_offsets[i],
3981 field_types, THUNK_HOST);
3983 unlock_user(argptr, arg, 0);
3985 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3986 if (*host_rt_dev_ptr != 0) {
3987 unlock_user((void *)*host_rt_dev_ptr,
3988 *target_rt_dev_ptr, 0);
3990 return ret;
3993 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
3994 int fd, int cmd, abi_long arg)
3996 int sig = target_to_host_signal(arg);
3997 return get_errno(ioctl(fd, ie->host_cmd, sig));
4000 static IOCTLEntry ioctl_entries[] = {
4001 #define IOCTL(cmd, access, ...) \
4002 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4003 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4004 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4005 #include "ioctls.h"
4006 { 0, 0, },
4009 /* ??? Implement proper locking for ioctls. */
4010 /* do_ioctl() Must return target values and target errnos. */
4011 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4013 const IOCTLEntry *ie;
4014 const argtype *arg_type;
4015 abi_long ret;
4016 uint8_t buf_temp[MAX_STRUCT_SIZE];
4017 int target_size;
4018 void *argptr;
4020 ie = ioctl_entries;
4021 for(;;) {
4022 if (ie->target_cmd == 0) {
4023 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4024 return -TARGET_ENOSYS;
4026 if (ie->target_cmd == cmd)
4027 break;
4028 ie++;
4030 arg_type = ie->arg_type;
4031 #if defined(DEBUG)
4032 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4033 #endif
4034 if (ie->do_ioctl) {
4035 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4038 switch(arg_type[0]) {
4039 case TYPE_NULL:
4040 /* no argument */
4041 ret = get_errno(ioctl(fd, ie->host_cmd));
4042 break;
4043 case TYPE_PTRVOID:
4044 case TYPE_INT:
4045 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4046 break;
4047 case TYPE_PTR:
4048 arg_type++;
4049 target_size = thunk_type_size(arg_type, 0);
4050 switch(ie->access) {
4051 case IOC_R:
4052 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4053 if (!is_error(ret)) {
4054 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4055 if (!argptr)
4056 return -TARGET_EFAULT;
4057 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4058 unlock_user(argptr, arg, target_size);
4060 break;
4061 case IOC_W:
4062 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4063 if (!argptr)
4064 return -TARGET_EFAULT;
4065 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4066 unlock_user(argptr, arg, 0);
4067 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4068 break;
4069 default:
4070 case IOC_RW:
4071 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4072 if (!argptr)
4073 return -TARGET_EFAULT;
4074 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4075 unlock_user(argptr, arg, 0);
4076 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4077 if (!is_error(ret)) {
4078 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4079 if (!argptr)
4080 return -TARGET_EFAULT;
4081 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4082 unlock_user(argptr, arg, target_size);
4084 break;
4086 break;
4087 default:
4088 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4089 (long)cmd, arg_type[0]);
4090 ret = -TARGET_ENOSYS;
4091 break;
4093 return ret;
4096 static const bitmask_transtbl iflag_tbl[] = {
4097 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4098 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4099 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4100 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4101 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4102 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4103 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4104 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4105 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4106 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4107 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4108 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4109 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4110 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4111 { 0, 0, 0, 0 }
4114 static const bitmask_transtbl oflag_tbl[] = {
4115 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4116 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4117 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4118 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4119 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4120 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4121 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4122 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4123 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4124 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4125 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4126 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4127 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4128 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4129 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4130 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4131 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4132 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4133 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4134 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4135 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4136 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4137 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4138 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4139 { 0, 0, 0, 0 }
4142 static const bitmask_transtbl cflag_tbl[] = {
4143 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4144 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4145 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4146 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4147 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4148 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4149 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4150 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4151 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4152 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4153 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4154 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4155 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4156 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4157 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4158 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4159 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4160 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4161 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4162 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4163 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4164 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4165 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4166 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4167 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4168 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4169 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4170 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4171 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4172 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4173 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4174 { 0, 0, 0, 0 }
4177 static const bitmask_transtbl lflag_tbl[] = {
4178 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4179 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4180 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4181 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4182 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4183 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4184 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4185 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4186 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4187 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4188 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4189 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4190 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4191 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4192 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4193 { 0, 0, 0, 0 }
4196 static void target_to_host_termios (void *dst, const void *src)
4198 struct host_termios *host = dst;
4199 const struct target_termios *target = src;
4201 host->c_iflag =
4202 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4203 host->c_oflag =
4204 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4205 host->c_cflag =
4206 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4207 host->c_lflag =
4208 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4209 host->c_line = target->c_line;
4211 memset(host->c_cc, 0, sizeof(host->c_cc));
4212 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4213 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4214 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4215 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4216 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4217 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4218 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4219 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4220 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4221 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4222 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4223 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4224 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4225 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4226 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4227 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4228 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4231 static void host_to_target_termios (void *dst, const void *src)
4233 struct target_termios *target = dst;
4234 const struct host_termios *host = src;
4236 target->c_iflag =
4237 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4238 target->c_oflag =
4239 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4240 target->c_cflag =
4241 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4242 target->c_lflag =
4243 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4244 target->c_line = host->c_line;
4246 memset(target->c_cc, 0, sizeof(target->c_cc));
4247 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4248 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4249 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4250 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4251 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4252 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4253 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4254 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4255 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4256 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4257 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4258 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4259 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4260 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4261 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4262 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4263 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4266 static const StructEntry struct_termios_def = {
4267 .convert = { host_to_target_termios, target_to_host_termios },
4268 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4269 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4272 static bitmask_transtbl mmap_flags_tbl[] = {
4273 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4274 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4275 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4276 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4277 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4278 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4279 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4280 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4281 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4282 MAP_NORESERVE },
4283 { 0, 0, 0, 0 }
4286 #if defined(TARGET_I386)
4288 /* NOTE: there is really one LDT for all the threads */
4289 static uint8_t *ldt_table;
4291 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4293 int size;
4294 void *p;
4296 if (!ldt_table)
4297 return 0;
4298 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4299 if (size > bytecount)
4300 size = bytecount;
4301 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4302 if (!p)
4303 return -TARGET_EFAULT;
4304 /* ??? Should this by byteswapped? */
4305 memcpy(p, ldt_table, size);
4306 unlock_user(p, ptr, size);
4307 return size;
4310 /* XXX: add locking support */
4311 static abi_long write_ldt(CPUX86State *env,
4312 abi_ulong ptr, unsigned long bytecount, int oldmode)
4314 struct target_modify_ldt_ldt_s ldt_info;
4315 struct target_modify_ldt_ldt_s *target_ldt_info;
4316 int seg_32bit, contents, read_exec_only, limit_in_pages;
4317 int seg_not_present, useable, lm;
4318 uint32_t *lp, entry_1, entry_2;
4320 if (bytecount != sizeof(ldt_info))
4321 return -TARGET_EINVAL;
4322 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4323 return -TARGET_EFAULT;
4324 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4325 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4326 ldt_info.limit = tswap32(target_ldt_info->limit);
4327 ldt_info.flags = tswap32(target_ldt_info->flags);
4328 unlock_user_struct(target_ldt_info, ptr, 0);
4330 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4331 return -TARGET_EINVAL;
4332 seg_32bit = ldt_info.flags & 1;
4333 contents = (ldt_info.flags >> 1) & 3;
4334 read_exec_only = (ldt_info.flags >> 3) & 1;
4335 limit_in_pages = (ldt_info.flags >> 4) & 1;
4336 seg_not_present = (ldt_info.flags >> 5) & 1;
4337 useable = (ldt_info.flags >> 6) & 1;
4338 #ifdef TARGET_ABI32
4339 lm = 0;
4340 #else
4341 lm = (ldt_info.flags >> 7) & 1;
4342 #endif
4343 if (contents == 3) {
4344 if (oldmode)
4345 return -TARGET_EINVAL;
4346 if (seg_not_present == 0)
4347 return -TARGET_EINVAL;
4349 /* allocate the LDT */
4350 if (!ldt_table) {
4351 env->ldt.base = target_mmap(0,
4352 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4353 PROT_READ|PROT_WRITE,
4354 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4355 if (env->ldt.base == -1)
4356 return -TARGET_ENOMEM;
4357 memset(g2h(env->ldt.base), 0,
4358 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4359 env->ldt.limit = 0xffff;
4360 ldt_table = g2h(env->ldt.base);
4363 /* NOTE: same code as Linux kernel */
4364 /* Allow LDTs to be cleared by the user. */
4365 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4366 if (oldmode ||
4367 (contents == 0 &&
4368 read_exec_only == 1 &&
4369 seg_32bit == 0 &&
4370 limit_in_pages == 0 &&
4371 seg_not_present == 1 &&
4372 useable == 0 )) {
4373 entry_1 = 0;
4374 entry_2 = 0;
4375 goto install;
4379 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4380 (ldt_info.limit & 0x0ffff);
4381 entry_2 = (ldt_info.base_addr & 0xff000000) |
4382 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4383 (ldt_info.limit & 0xf0000) |
4384 ((read_exec_only ^ 1) << 9) |
4385 (contents << 10) |
4386 ((seg_not_present ^ 1) << 15) |
4387 (seg_32bit << 22) |
4388 (limit_in_pages << 23) |
4389 (lm << 21) |
4390 0x7000;
4391 if (!oldmode)
4392 entry_2 |= (useable << 20);
4394 /* Install the new entry ... */
4395 install:
4396 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4397 lp[0] = tswap32(entry_1);
4398 lp[1] = tswap32(entry_2);
4399 return 0;
4402 /* specific and weird i386 syscalls */
4403 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4404 unsigned long bytecount)
4406 abi_long ret;
4408 switch (func) {
4409 case 0:
4410 ret = read_ldt(ptr, bytecount);
4411 break;
4412 case 1:
4413 ret = write_ldt(env, ptr, bytecount, 1);
4414 break;
4415 case 0x11:
4416 ret = write_ldt(env, ptr, bytecount, 0);
4417 break;
4418 default:
4419 ret = -TARGET_ENOSYS;
4420 break;
4422 return ret;
4425 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4426 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4428 uint64_t *gdt_table = g2h(env->gdt.base);
4429 struct target_modify_ldt_ldt_s ldt_info;
4430 struct target_modify_ldt_ldt_s *target_ldt_info;
4431 int seg_32bit, contents, read_exec_only, limit_in_pages;
4432 int seg_not_present, useable, lm;
4433 uint32_t *lp, entry_1, entry_2;
4434 int i;
4436 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4437 if (!target_ldt_info)
4438 return -TARGET_EFAULT;
4439 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4440 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4441 ldt_info.limit = tswap32(target_ldt_info->limit);
4442 ldt_info.flags = tswap32(target_ldt_info->flags);
4443 if (ldt_info.entry_number == -1) {
4444 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4445 if (gdt_table[i] == 0) {
4446 ldt_info.entry_number = i;
4447 target_ldt_info->entry_number = tswap32(i);
4448 break;
4452 unlock_user_struct(target_ldt_info, ptr, 1);
4454 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4455 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4456 return -TARGET_EINVAL;
4457 seg_32bit = ldt_info.flags & 1;
4458 contents = (ldt_info.flags >> 1) & 3;
4459 read_exec_only = (ldt_info.flags >> 3) & 1;
4460 limit_in_pages = (ldt_info.flags >> 4) & 1;
4461 seg_not_present = (ldt_info.flags >> 5) & 1;
4462 useable = (ldt_info.flags >> 6) & 1;
4463 #ifdef TARGET_ABI32
4464 lm = 0;
4465 #else
4466 lm = (ldt_info.flags >> 7) & 1;
4467 #endif
4469 if (contents == 3) {
4470 if (seg_not_present == 0)
4471 return -TARGET_EINVAL;
4474 /* NOTE: same code as Linux kernel */
4475 /* Allow LDTs to be cleared by the user. */
4476 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4477 if ((contents == 0 &&
4478 read_exec_only == 1 &&
4479 seg_32bit == 0 &&
4480 limit_in_pages == 0 &&
4481 seg_not_present == 1 &&
4482 useable == 0 )) {
4483 entry_1 = 0;
4484 entry_2 = 0;
4485 goto install;
4489 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4490 (ldt_info.limit & 0x0ffff);
4491 entry_2 = (ldt_info.base_addr & 0xff000000) |
4492 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4493 (ldt_info.limit & 0xf0000) |
4494 ((read_exec_only ^ 1) << 9) |
4495 (contents << 10) |
4496 ((seg_not_present ^ 1) << 15) |
4497 (seg_32bit << 22) |
4498 (limit_in_pages << 23) |
4499 (useable << 20) |
4500 (lm << 21) |
4501 0x7000;
4503 /* Install the new entry ... */
4504 install:
4505 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4506 lp[0] = tswap32(entry_1);
4507 lp[1] = tswap32(entry_2);
4508 return 0;
4511 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4513 struct target_modify_ldt_ldt_s *target_ldt_info;
4514 uint64_t *gdt_table = g2h(env->gdt.base);
4515 uint32_t base_addr, limit, flags;
4516 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4517 int seg_not_present, useable, lm;
4518 uint32_t *lp, entry_1, entry_2;
4520 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4521 if (!target_ldt_info)
4522 return -TARGET_EFAULT;
4523 idx = tswap32(target_ldt_info->entry_number);
4524 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4525 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4526 unlock_user_struct(target_ldt_info, ptr, 1);
4527 return -TARGET_EINVAL;
4529 lp = (uint32_t *)(gdt_table + idx);
4530 entry_1 = tswap32(lp[0]);
4531 entry_2 = tswap32(lp[1]);
4533 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4534 contents = (entry_2 >> 10) & 3;
4535 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4536 seg_32bit = (entry_2 >> 22) & 1;
4537 limit_in_pages = (entry_2 >> 23) & 1;
4538 useable = (entry_2 >> 20) & 1;
4539 #ifdef TARGET_ABI32
4540 lm = 0;
4541 #else
4542 lm = (entry_2 >> 21) & 1;
4543 #endif
4544 flags = (seg_32bit << 0) | (contents << 1) |
4545 (read_exec_only << 3) | (limit_in_pages << 4) |
4546 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4547 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4548 base_addr = (entry_1 >> 16) |
4549 (entry_2 & 0xff000000) |
4550 ((entry_2 & 0xff) << 16);
4551 target_ldt_info->base_addr = tswapal(base_addr);
4552 target_ldt_info->limit = tswap32(limit);
4553 target_ldt_info->flags = tswap32(flags);
4554 unlock_user_struct(target_ldt_info, ptr, 1);
4555 return 0;
4557 #endif /* TARGET_I386 && TARGET_ABI32 */
4559 #ifndef TARGET_ABI32
4560 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4562 abi_long ret = 0;
4563 abi_ulong val;
4564 int idx;
4566 switch(code) {
4567 case TARGET_ARCH_SET_GS:
4568 case TARGET_ARCH_SET_FS:
4569 if (code == TARGET_ARCH_SET_GS)
4570 idx = R_GS;
4571 else
4572 idx = R_FS;
4573 cpu_x86_load_seg(env, idx, 0);
4574 env->segs[idx].base = addr;
4575 break;
4576 case TARGET_ARCH_GET_GS:
4577 case TARGET_ARCH_GET_FS:
4578 if (code == TARGET_ARCH_GET_GS)
4579 idx = R_GS;
4580 else
4581 idx = R_FS;
4582 val = env->segs[idx].base;
4583 if (put_user(val, addr, abi_ulong))
4584 ret = -TARGET_EFAULT;
4585 break;
4586 default:
4587 ret = -TARGET_EINVAL;
4588 break;
4590 return ret;
4592 #endif
4594 #endif /* defined(TARGET_I386) */
4596 #define NEW_STACK_SIZE 0x40000
4599 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4600 typedef struct {
4601 CPUArchState *env;
4602 pthread_mutex_t mutex;
4603 pthread_cond_t cond;
4604 pthread_t thread;
4605 uint32_t tid;
4606 abi_ulong child_tidptr;
4607 abi_ulong parent_tidptr;
4608 sigset_t sigmask;
4609 } new_thread_info;
4611 static void *clone_func(void *arg)
4613 new_thread_info *info = arg;
4614 CPUArchState *env;
4615 CPUState *cpu;
4616 TaskState *ts;
4618 rcu_register_thread();
4619 env = info->env;
4620 cpu = ENV_GET_CPU(env);
4621 thread_cpu = cpu;
4622 ts = (TaskState *)cpu->opaque;
4623 info->tid = gettid();
4624 cpu->host_tid = info->tid;
4625 task_settid(ts);
4626 if (info->child_tidptr)
4627 put_user_u32(info->tid, info->child_tidptr);
4628 if (info->parent_tidptr)
4629 put_user_u32(info->tid, info->parent_tidptr);
4630 /* Enable signals. */
4631 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4632 /* Signal to the parent that we're ready. */
4633 pthread_mutex_lock(&info->mutex);
4634 pthread_cond_broadcast(&info->cond);
4635 pthread_mutex_unlock(&info->mutex);
4636 /* Wait until the parent has finshed initializing the tls state. */
4637 pthread_mutex_lock(&clone_lock);
4638 pthread_mutex_unlock(&clone_lock);
4639 cpu_loop(env);
4640 /* never exits */
4641 return NULL;
4644 /* do_fork() Must return host values and target errnos (unlike most
4645 do_*() functions). */
4646 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4647 abi_ulong parent_tidptr, target_ulong newtls,
4648 abi_ulong child_tidptr)
4650 CPUState *cpu = ENV_GET_CPU(env);
4651 int ret;
4652 TaskState *ts;
4653 CPUState *new_cpu;
4654 CPUArchState *new_env;
4655 unsigned int nptl_flags;
4656 sigset_t sigmask;
4658 /* Emulate vfork() with fork() */
4659 if (flags & CLONE_VFORK)
4660 flags &= ~(CLONE_VFORK | CLONE_VM);
4662 if (flags & CLONE_VM) {
4663 TaskState *parent_ts = (TaskState *)cpu->opaque;
4664 new_thread_info info;
4665 pthread_attr_t attr;
4667 ts = g_new0(TaskState, 1);
4668 init_task_state(ts);
4669 /* we create a new CPU instance. */
4670 new_env = cpu_copy(env);
4671 /* Init regs that differ from the parent. */
4672 cpu_clone_regs(new_env, newsp);
4673 new_cpu = ENV_GET_CPU(new_env);
4674 new_cpu->opaque = ts;
4675 ts->bprm = parent_ts->bprm;
4676 ts->info = parent_ts->info;
4677 nptl_flags = flags;
4678 flags &= ~CLONE_NPTL_FLAGS2;
4680 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4681 ts->child_tidptr = child_tidptr;
4684 if (nptl_flags & CLONE_SETTLS)
4685 cpu_set_tls (new_env, newtls);
4687 /* Grab a mutex so that thread setup appears atomic. */
4688 pthread_mutex_lock(&clone_lock);
4690 memset(&info, 0, sizeof(info));
4691 pthread_mutex_init(&info.mutex, NULL);
4692 pthread_mutex_lock(&info.mutex);
4693 pthread_cond_init(&info.cond, NULL);
4694 info.env = new_env;
4695 if (nptl_flags & CLONE_CHILD_SETTID)
4696 info.child_tidptr = child_tidptr;
4697 if (nptl_flags & CLONE_PARENT_SETTID)
4698 info.parent_tidptr = parent_tidptr;
4700 ret = pthread_attr_init(&attr);
4701 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4702 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4703 /* It is not safe to deliver signals until the child has finished
4704 initializing, so temporarily block all signals. */
4705 sigfillset(&sigmask);
4706 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4708 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4709 /* TODO: Free new CPU state if thread creation failed. */
4711 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4712 pthread_attr_destroy(&attr);
4713 if (ret == 0) {
4714 /* Wait for the child to initialize. */
4715 pthread_cond_wait(&info.cond, &info.mutex);
4716 ret = info.tid;
4717 if (flags & CLONE_PARENT_SETTID)
4718 put_user_u32(ret, parent_tidptr);
4719 } else {
4720 ret = -1;
4722 pthread_mutex_unlock(&info.mutex);
4723 pthread_cond_destroy(&info.cond);
4724 pthread_mutex_destroy(&info.mutex);
4725 pthread_mutex_unlock(&clone_lock);
4726 } else {
4727 /* if no CLONE_VM, we consider it is a fork */
4728 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4729 return -TARGET_EINVAL;
4731 fork_start();
4732 ret = fork();
4733 if (ret == 0) {
4734 /* Child Process. */
4735 rcu_after_fork();
4736 cpu_clone_regs(env, newsp);
4737 fork_end(1);
4738 /* There is a race condition here. The parent process could
4739 theoretically read the TID in the child process before the child
4740 tid is set. This would require using either ptrace
4741 (not implemented) or having *_tidptr to point at a shared memory
4742 mapping. We can't repeat the spinlock hack used above because
4743 the child process gets its own copy of the lock. */
4744 if (flags & CLONE_CHILD_SETTID)
4745 put_user_u32(gettid(), child_tidptr);
4746 if (flags & CLONE_PARENT_SETTID)
4747 put_user_u32(gettid(), parent_tidptr);
4748 ts = (TaskState *)cpu->opaque;
4749 if (flags & CLONE_SETTLS)
4750 cpu_set_tls (env, newtls);
4751 if (flags & CLONE_CHILD_CLEARTID)
4752 ts->child_tidptr = child_tidptr;
4753 } else {
4754 fork_end(0);
4757 return ret;
4760 /* warning : doesn't handle linux specific flags... */
4761 static int target_to_host_fcntl_cmd(int cmd)
4763 switch(cmd) {
4764 case TARGET_F_DUPFD:
4765 case TARGET_F_GETFD:
4766 case TARGET_F_SETFD:
4767 case TARGET_F_GETFL:
4768 case TARGET_F_SETFL:
4769 return cmd;
4770 case TARGET_F_GETLK:
4771 return F_GETLK;
4772 case TARGET_F_SETLK:
4773 return F_SETLK;
4774 case TARGET_F_SETLKW:
4775 return F_SETLKW;
4776 case TARGET_F_GETOWN:
4777 return F_GETOWN;
4778 case TARGET_F_SETOWN:
4779 return F_SETOWN;
4780 case TARGET_F_GETSIG:
4781 return F_GETSIG;
4782 case TARGET_F_SETSIG:
4783 return F_SETSIG;
4784 #if TARGET_ABI_BITS == 32
4785 case TARGET_F_GETLK64:
4786 return F_GETLK64;
4787 case TARGET_F_SETLK64:
4788 return F_SETLK64;
4789 case TARGET_F_SETLKW64:
4790 return F_SETLKW64;
4791 #endif
4792 case TARGET_F_SETLEASE:
4793 return F_SETLEASE;
4794 case TARGET_F_GETLEASE:
4795 return F_GETLEASE;
4796 #ifdef F_DUPFD_CLOEXEC
4797 case TARGET_F_DUPFD_CLOEXEC:
4798 return F_DUPFD_CLOEXEC;
4799 #endif
4800 case TARGET_F_NOTIFY:
4801 return F_NOTIFY;
4802 #ifdef F_GETOWN_EX
4803 case TARGET_F_GETOWN_EX:
4804 return F_GETOWN_EX;
4805 #endif
4806 #ifdef F_SETOWN_EX
4807 case TARGET_F_SETOWN_EX:
4808 return F_SETOWN_EX;
4809 #endif
4810 default:
4811 return -TARGET_EINVAL;
4813 return -TARGET_EINVAL;
4816 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4817 static const bitmask_transtbl flock_tbl[] = {
4818 TRANSTBL_CONVERT(F_RDLCK),
4819 TRANSTBL_CONVERT(F_WRLCK),
4820 TRANSTBL_CONVERT(F_UNLCK),
4821 TRANSTBL_CONVERT(F_EXLCK),
4822 TRANSTBL_CONVERT(F_SHLCK),
4823 { 0, 0, 0, 0 }
4826 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4828 struct flock fl;
4829 struct target_flock *target_fl;
4830 struct flock64 fl64;
4831 struct target_flock64 *target_fl64;
4832 #ifdef F_GETOWN_EX
4833 struct f_owner_ex fox;
4834 struct target_f_owner_ex *target_fox;
4835 #endif
4836 abi_long ret;
4837 int host_cmd = target_to_host_fcntl_cmd(cmd);
4839 if (host_cmd == -TARGET_EINVAL)
4840 return host_cmd;
4842 switch(cmd) {
4843 case TARGET_F_GETLK:
4844 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4845 return -TARGET_EFAULT;
4846 fl.l_type =
4847 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4848 fl.l_whence = tswap16(target_fl->l_whence);
4849 fl.l_start = tswapal(target_fl->l_start);
4850 fl.l_len = tswapal(target_fl->l_len);
4851 fl.l_pid = tswap32(target_fl->l_pid);
4852 unlock_user_struct(target_fl, arg, 0);
4853 ret = get_errno(fcntl(fd, host_cmd, &fl));
4854 if (ret == 0) {
4855 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4856 return -TARGET_EFAULT;
4857 target_fl->l_type =
4858 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4859 target_fl->l_whence = tswap16(fl.l_whence);
4860 target_fl->l_start = tswapal(fl.l_start);
4861 target_fl->l_len = tswapal(fl.l_len);
4862 target_fl->l_pid = tswap32(fl.l_pid);
4863 unlock_user_struct(target_fl, arg, 1);
4865 break;
4867 case TARGET_F_SETLK:
4868 case TARGET_F_SETLKW:
4869 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4870 return -TARGET_EFAULT;
4871 fl.l_type =
4872 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4873 fl.l_whence = tswap16(target_fl->l_whence);
4874 fl.l_start = tswapal(target_fl->l_start);
4875 fl.l_len = tswapal(target_fl->l_len);
4876 fl.l_pid = tswap32(target_fl->l_pid);
4877 unlock_user_struct(target_fl, arg, 0);
4878 ret = get_errno(fcntl(fd, host_cmd, &fl));
4879 break;
4881 case TARGET_F_GETLK64:
4882 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4883 return -TARGET_EFAULT;
4884 fl64.l_type =
4885 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4886 fl64.l_whence = tswap16(target_fl64->l_whence);
4887 fl64.l_start = tswap64(target_fl64->l_start);
4888 fl64.l_len = tswap64(target_fl64->l_len);
4889 fl64.l_pid = tswap32(target_fl64->l_pid);
4890 unlock_user_struct(target_fl64, arg, 0);
4891 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4892 if (ret == 0) {
4893 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4894 return -TARGET_EFAULT;
4895 target_fl64->l_type =
4896 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4897 target_fl64->l_whence = tswap16(fl64.l_whence);
4898 target_fl64->l_start = tswap64(fl64.l_start);
4899 target_fl64->l_len = tswap64(fl64.l_len);
4900 target_fl64->l_pid = tswap32(fl64.l_pid);
4901 unlock_user_struct(target_fl64, arg, 1);
4903 break;
4904 case TARGET_F_SETLK64:
4905 case TARGET_F_SETLKW64:
4906 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4907 return -TARGET_EFAULT;
4908 fl64.l_type =
4909 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4910 fl64.l_whence = tswap16(target_fl64->l_whence);
4911 fl64.l_start = tswap64(target_fl64->l_start);
4912 fl64.l_len = tswap64(target_fl64->l_len);
4913 fl64.l_pid = tswap32(target_fl64->l_pid);
4914 unlock_user_struct(target_fl64, arg, 0);
4915 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4916 break;
4918 case TARGET_F_GETFL:
4919 ret = get_errno(fcntl(fd, host_cmd, arg));
4920 if (ret >= 0) {
4921 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4923 break;
4925 case TARGET_F_SETFL:
4926 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4927 break;
4929 #ifdef F_GETOWN_EX
4930 case TARGET_F_GETOWN_EX:
4931 ret = get_errno(fcntl(fd, host_cmd, &fox));
4932 if (ret >= 0) {
4933 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4934 return -TARGET_EFAULT;
4935 target_fox->type = tswap32(fox.type);
4936 target_fox->pid = tswap32(fox.pid);
4937 unlock_user_struct(target_fox, arg, 1);
4939 break;
4940 #endif
4942 #ifdef F_SETOWN_EX
4943 case TARGET_F_SETOWN_EX:
4944 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4945 return -TARGET_EFAULT;
4946 fox.type = tswap32(target_fox->type);
4947 fox.pid = tswap32(target_fox->pid);
4948 unlock_user_struct(target_fox, arg, 0);
4949 ret = get_errno(fcntl(fd, host_cmd, &fox));
4950 break;
4951 #endif
4953 case TARGET_F_SETOWN:
4954 case TARGET_F_GETOWN:
4955 case TARGET_F_SETSIG:
4956 case TARGET_F_GETSIG:
4957 case TARGET_F_SETLEASE:
4958 case TARGET_F_GETLEASE:
4959 ret = get_errno(fcntl(fd, host_cmd, arg));
4960 break;
4962 default:
4963 ret = get_errno(fcntl(fd, cmd, arg));
4964 break;
4966 return ret;
4969 #ifdef USE_UID16
4971 static inline int high2lowuid(int uid)
4973 if (uid > 65535)
4974 return 65534;
4975 else
4976 return uid;
4979 static inline int high2lowgid(int gid)
4981 if (gid > 65535)
4982 return 65534;
4983 else
4984 return gid;
4987 static inline int low2highuid(int uid)
4989 if ((int16_t)uid == -1)
4990 return -1;
4991 else
4992 return uid;
4995 static inline int low2highgid(int gid)
4997 if ((int16_t)gid == -1)
4998 return -1;
4999 else
5000 return gid;
5002 static inline int tswapid(int id)
5004 return tswap16(id);
5007 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5009 #else /* !USE_UID16 */
5010 static inline int high2lowuid(int uid)
5012 return uid;
5014 static inline int high2lowgid(int gid)
5016 return gid;
5018 static inline int low2highuid(int uid)
5020 return uid;
5022 static inline int low2highgid(int gid)
5024 return gid;
5026 static inline int tswapid(int id)
5028 return tswap32(id);
5031 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5033 #endif /* USE_UID16 */
5035 void syscall_init(void)
5037 IOCTLEntry *ie;
5038 const argtype *arg_type;
5039 int size;
5040 int i;
5042 thunk_init(STRUCT_MAX);
5044 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5045 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5046 #include "syscall_types.h"
5047 #undef STRUCT
5048 #undef STRUCT_SPECIAL
5050 /* Build target_to_host_errno_table[] table from
5051 * host_to_target_errno_table[]. */
5052 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5053 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5056 /* we patch the ioctl size if necessary. We rely on the fact that
5057 no ioctl has all the bits at '1' in the size field */
5058 ie = ioctl_entries;
5059 while (ie->target_cmd != 0) {
5060 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5061 TARGET_IOC_SIZEMASK) {
5062 arg_type = ie->arg_type;
5063 if (arg_type[0] != TYPE_PTR) {
5064 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5065 ie->target_cmd);
5066 exit(1);
5068 arg_type++;
5069 size = thunk_type_size(arg_type, 0);
5070 ie->target_cmd = (ie->target_cmd &
5071 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5072 (size << TARGET_IOC_SIZESHIFT);
5075 /* automatic consistency check if same arch */
5076 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5077 (defined(__x86_64__) && defined(TARGET_X86_64))
5078 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5079 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5080 ie->name, ie->target_cmd, ie->host_cmd);
5082 #endif
5083 ie++;
5087 #if TARGET_ABI_BITS == 32
5088 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5090 #ifdef TARGET_WORDS_BIGENDIAN
5091 return ((uint64_t)word0 << 32) | word1;
5092 #else
5093 return ((uint64_t)word1 << 32) | word0;
5094 #endif
5096 #else /* TARGET_ABI_BITS == 32 */
5097 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5099 return word0;
5101 #endif /* TARGET_ABI_BITS != 32 */
5103 #ifdef TARGET_NR_truncate64
5104 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5105 abi_long arg2,
5106 abi_long arg3,
5107 abi_long arg4)
5109 if (regpairs_aligned(cpu_env)) {
5110 arg2 = arg3;
5111 arg3 = arg4;
5113 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5115 #endif
5117 #ifdef TARGET_NR_ftruncate64
5118 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5119 abi_long arg2,
5120 abi_long arg3,
5121 abi_long arg4)
5123 if (regpairs_aligned(cpu_env)) {
5124 arg2 = arg3;
5125 arg3 = arg4;
5127 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5129 #endif
5131 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5132 abi_ulong target_addr)
5134 struct target_timespec *target_ts;
5136 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5137 return -TARGET_EFAULT;
5138 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5139 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5140 unlock_user_struct(target_ts, target_addr, 0);
5141 return 0;
5144 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5145 struct timespec *host_ts)
5147 struct target_timespec *target_ts;
5149 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5150 return -TARGET_EFAULT;
5151 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5152 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5153 unlock_user_struct(target_ts, target_addr, 1);
5154 return 0;
5157 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5158 abi_ulong target_addr)
5160 struct target_itimerspec *target_itspec;
5162 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5163 return -TARGET_EFAULT;
5166 host_itspec->it_interval.tv_sec =
5167 tswapal(target_itspec->it_interval.tv_sec);
5168 host_itspec->it_interval.tv_nsec =
5169 tswapal(target_itspec->it_interval.tv_nsec);
5170 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5171 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5173 unlock_user_struct(target_itspec, target_addr, 1);
5174 return 0;
5177 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5178 struct itimerspec *host_its)
5180 struct target_itimerspec *target_itspec;
5182 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5183 return -TARGET_EFAULT;
5186 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5187 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5189 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5190 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5192 unlock_user_struct(target_itspec, target_addr, 0);
5193 return 0;
5196 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5197 abi_ulong target_addr)
5199 struct target_sigevent *target_sevp;
5201 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5202 return -TARGET_EFAULT;
5205 /* This union is awkward on 64 bit systems because it has a 32 bit
5206 * integer and a pointer in it; we follow the conversion approach
5207 * used for handling sigval types in signal.c so the guest should get
5208 * the correct value back even if we did a 64 bit byteswap and it's
5209 * using the 32 bit integer.
5211 host_sevp->sigev_value.sival_ptr =
5212 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5213 host_sevp->sigev_signo =
5214 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5215 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5216 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5218 unlock_user_struct(target_sevp, target_addr, 1);
5219 return 0;
5222 #if defined(TARGET_NR_mlockall)
5223 static inline int target_to_host_mlockall_arg(int arg)
5225 int result = 0;
5227 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5228 result |= MCL_CURRENT;
5230 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5231 result |= MCL_FUTURE;
5233 return result;
5235 #endif
5237 static inline abi_long host_to_target_stat64(void *cpu_env,
5238 abi_ulong target_addr,
5239 struct stat *host_st)
5241 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5242 if (((CPUARMState *)cpu_env)->eabi) {
5243 struct target_eabi_stat64 *target_st;
5245 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5246 return -TARGET_EFAULT;
5247 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5248 __put_user(host_st->st_dev, &target_st->st_dev);
5249 __put_user(host_st->st_ino, &target_st->st_ino);
5250 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5251 __put_user(host_st->st_ino, &target_st->__st_ino);
5252 #endif
5253 __put_user(host_st->st_mode, &target_st->st_mode);
5254 __put_user(host_st->st_nlink, &target_st->st_nlink);
5255 __put_user(host_st->st_uid, &target_st->st_uid);
5256 __put_user(host_st->st_gid, &target_st->st_gid);
5257 __put_user(host_st->st_rdev, &target_st->st_rdev);
5258 __put_user(host_st->st_size, &target_st->st_size);
5259 __put_user(host_st->st_blksize, &target_st->st_blksize);
5260 __put_user(host_st->st_blocks, &target_st->st_blocks);
5261 __put_user(host_st->st_atime, &target_st->target_st_atime);
5262 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5263 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5264 unlock_user_struct(target_st, target_addr, 1);
5265 } else
5266 #endif
5268 #if defined(TARGET_HAS_STRUCT_STAT64)
5269 struct target_stat64 *target_st;
5270 #else
5271 struct target_stat *target_st;
5272 #endif
5274 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5275 return -TARGET_EFAULT;
5276 memset(target_st, 0, sizeof(*target_st));
5277 __put_user(host_st->st_dev, &target_st->st_dev);
5278 __put_user(host_st->st_ino, &target_st->st_ino);
5279 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5280 __put_user(host_st->st_ino, &target_st->__st_ino);
5281 #endif
5282 __put_user(host_st->st_mode, &target_st->st_mode);
5283 __put_user(host_st->st_nlink, &target_st->st_nlink);
5284 __put_user(host_st->st_uid, &target_st->st_uid);
5285 __put_user(host_st->st_gid, &target_st->st_gid);
5286 __put_user(host_st->st_rdev, &target_st->st_rdev);
5287 /* XXX: better use of kernel struct */
5288 __put_user(host_st->st_size, &target_st->st_size);
5289 __put_user(host_st->st_blksize, &target_st->st_blksize);
5290 __put_user(host_st->st_blocks, &target_st->st_blocks);
5291 __put_user(host_st->st_atime, &target_st->target_st_atime);
5292 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5293 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5294 unlock_user_struct(target_st, target_addr, 1);
5297 return 0;
5300 /* ??? Using host futex calls even when target atomic operations
5301 are not really atomic probably breaks things. However implementing
5302 futexes locally would make futexes shared between multiple processes
5303 tricky. However they're probably useless because guest atomic
5304 operations won't work either. */
5305 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5306 target_ulong uaddr2, int val3)
5308 struct timespec ts, *pts;
5309 int base_op;
5311 /* ??? We assume FUTEX_* constants are the same on both host
5312 and target. */
5313 #ifdef FUTEX_CMD_MASK
5314 base_op = op & FUTEX_CMD_MASK;
5315 #else
5316 base_op = op;
5317 #endif
5318 switch (base_op) {
5319 case FUTEX_WAIT:
5320 case FUTEX_WAIT_BITSET:
5321 if (timeout) {
5322 pts = &ts;
5323 target_to_host_timespec(pts, timeout);
5324 } else {
5325 pts = NULL;
5327 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5328 pts, NULL, val3));
5329 case FUTEX_WAKE:
5330 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5331 case FUTEX_FD:
5332 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5333 case FUTEX_REQUEUE:
5334 case FUTEX_CMP_REQUEUE:
5335 case FUTEX_WAKE_OP:
5336 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5337 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5338 But the prototype takes a `struct timespec *'; insert casts
5339 to satisfy the compiler. We do not need to tswap TIMEOUT
5340 since it's not compared to guest memory. */
5341 pts = (struct timespec *)(uintptr_t) timeout;
5342 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5343 g2h(uaddr2),
5344 (base_op == FUTEX_CMP_REQUEUE
5345 ? tswap32(val3)
5346 : val3)));
5347 default:
5348 return -TARGET_ENOSYS;
5351 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5352 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5353 abi_long handle, abi_long mount_id,
5354 abi_long flags)
5356 struct file_handle *target_fh;
5357 struct file_handle *fh;
5358 int mid = 0;
5359 abi_long ret;
5360 char *name;
5361 unsigned int size, total_size;
5363 if (get_user_s32(size, handle)) {
5364 return -TARGET_EFAULT;
5367 name = lock_user_string(pathname);
5368 if (!name) {
5369 return -TARGET_EFAULT;
5372 total_size = sizeof(struct file_handle) + size;
5373 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5374 if (!target_fh) {
5375 unlock_user(name, pathname, 0);
5376 return -TARGET_EFAULT;
5379 fh = g_malloc0(total_size);
5380 fh->handle_bytes = size;
5382 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5383 unlock_user(name, pathname, 0);
5385 /* man name_to_handle_at(2):
5386 * Other than the use of the handle_bytes field, the caller should treat
5387 * the file_handle structure as an opaque data type
5390 memcpy(target_fh, fh, total_size);
5391 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5392 target_fh->handle_type = tswap32(fh->handle_type);
5393 g_free(fh);
5394 unlock_user(target_fh, handle, total_size);
5396 if (put_user_s32(mid, mount_id)) {
5397 return -TARGET_EFAULT;
5400 return ret;
5403 #endif
5405 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5406 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5407 abi_long flags)
5409 struct file_handle *target_fh;
5410 struct file_handle *fh;
5411 unsigned int size, total_size;
5412 abi_long ret;
5414 if (get_user_s32(size, handle)) {
5415 return -TARGET_EFAULT;
5418 total_size = sizeof(struct file_handle) + size;
5419 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5420 if (!target_fh) {
5421 return -TARGET_EFAULT;
5424 fh = g_memdup(target_fh, total_size);
5425 fh->handle_bytes = size;
5426 fh->handle_type = tswap32(target_fh->handle_type);
5428 ret = get_errno(open_by_handle_at(mount_fd, fh,
5429 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5431 g_free(fh);
5433 unlock_user(target_fh, handle, total_size);
5435 return ret;
5437 #endif
5439 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5441 /* signalfd siginfo conversion */
5443 static void
5444 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5445 const struct signalfd_siginfo *info)
5447 int sig = host_to_target_signal(info->ssi_signo);
5449 /* linux/signalfd.h defines a ssi_addr_lsb
5450 * not defined in sys/signalfd.h but used by some kernels
5453 #ifdef BUS_MCEERR_AO
5454 if (tinfo->ssi_signo == SIGBUS &&
5455 (tinfo->ssi_code == BUS_MCEERR_AR ||
5456 tinfo->ssi_code == BUS_MCEERR_AO)) {
5457 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5458 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5459 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5461 #endif
5463 tinfo->ssi_signo = tswap32(sig);
5464 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5465 tinfo->ssi_code = tswap32(info->ssi_code);
5466 tinfo->ssi_pid = tswap32(info->ssi_pid);
5467 tinfo->ssi_uid = tswap32(info->ssi_uid);
5468 tinfo->ssi_fd = tswap32(info->ssi_fd);
5469 tinfo->ssi_tid = tswap32(info->ssi_tid);
5470 tinfo->ssi_band = tswap32(info->ssi_band);
5471 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5472 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5473 tinfo->ssi_status = tswap32(info->ssi_status);
5474 tinfo->ssi_int = tswap32(info->ssi_int);
5475 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5476 tinfo->ssi_utime = tswap64(info->ssi_utime);
5477 tinfo->ssi_stime = tswap64(info->ssi_stime);
5478 tinfo->ssi_addr = tswap64(info->ssi_addr);
5481 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
5483 int i;
5485 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5486 host_to_target_signalfd_siginfo(buf + i, buf + i);
5489 return len;
5492 static TargetFdTrans target_signalfd_trans = {
5493 .host_to_target_data = host_to_target_data_signalfd,
5496 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5498 int host_flags;
5499 target_sigset_t *target_mask;
5500 sigset_t host_mask;
5501 abi_long ret;
5503 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5504 return -TARGET_EINVAL;
5506 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5507 return -TARGET_EFAULT;
5510 target_to_host_sigset(&host_mask, target_mask);
5512 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5514 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5515 if (ret >= 0) {
5516 fd_trans_register(ret, &target_signalfd_trans);
5519 unlock_user_struct(target_mask, mask, 0);
5521 return ret;
5523 #endif
5525 /* Map host to target signal numbers for the wait family of syscalls.
5526 Assume all other status bits are the same. */
5527 int host_to_target_waitstatus(int status)
5529 if (WIFSIGNALED(status)) {
5530 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5532 if (WIFSTOPPED(status)) {
5533 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5534 | (status & 0xff);
5536 return status;
5539 static int open_self_cmdline(void *cpu_env, int fd)
5541 int fd_orig = -1;
5542 bool word_skipped = false;
5544 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5545 if (fd_orig < 0) {
5546 return fd_orig;
5549 while (true) {
5550 ssize_t nb_read;
5551 char buf[128];
5552 char *cp_buf = buf;
5554 nb_read = read(fd_orig, buf, sizeof(buf));
5555 if (nb_read < 0) {
5556 fd_orig = close(fd_orig);
5557 return -1;
5558 } else if (nb_read == 0) {
5559 break;
5562 if (!word_skipped) {
5563 /* Skip the first string, which is the path to qemu-*-static
5564 instead of the actual command. */
5565 cp_buf = memchr(buf, 0, sizeof(buf));
5566 if (cp_buf) {
5567 /* Null byte found, skip one string */
5568 cp_buf++;
5569 nb_read -= cp_buf - buf;
5570 word_skipped = true;
5574 if (word_skipped) {
5575 if (write(fd, cp_buf, nb_read) != nb_read) {
5576 close(fd_orig);
5577 return -1;
5582 return close(fd_orig);
5585 static int open_self_maps(void *cpu_env, int fd)
5587 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5588 TaskState *ts = cpu->opaque;
5589 FILE *fp;
5590 char *line = NULL;
5591 size_t len = 0;
5592 ssize_t read;
5594 fp = fopen("/proc/self/maps", "r");
5595 if (fp == NULL) {
5596 return -EACCES;
5599 while ((read = getline(&line, &len, fp)) != -1) {
5600 int fields, dev_maj, dev_min, inode;
5601 uint64_t min, max, offset;
5602 char flag_r, flag_w, flag_x, flag_p;
5603 char path[512] = "";
5604 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5605 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5606 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5608 if ((fields < 10) || (fields > 11)) {
5609 continue;
5611 if (h2g_valid(min)) {
5612 int flags = page_get_flags(h2g(min));
5613 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5614 if (page_check_range(h2g(min), max - min, flags) == -1) {
5615 continue;
5617 if (h2g(min) == ts->info->stack_limit) {
5618 pstrcpy(path, sizeof(path), " [stack]");
5620 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5621 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5622 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5623 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5624 path[0] ? " " : "", path);
5628 free(line);
5629 fclose(fp);
5631 return 0;
5634 static int open_self_stat(void *cpu_env, int fd)
5636 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5637 TaskState *ts = cpu->opaque;
5638 abi_ulong start_stack = ts->info->start_stack;
5639 int i;
5641 for (i = 0; i < 44; i++) {
5642 char buf[128];
5643 int len;
5644 uint64_t val = 0;
5646 if (i == 0) {
5647 /* pid */
5648 val = getpid();
5649 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5650 } else if (i == 1) {
5651 /* app name */
5652 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5653 } else if (i == 27) {
5654 /* stack bottom */
5655 val = start_stack;
5656 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5657 } else {
5658 /* for the rest, there is MasterCard */
5659 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5662 len = strlen(buf);
5663 if (write(fd, buf, len) != len) {
5664 return -1;
5668 return 0;
5671 static int open_self_auxv(void *cpu_env, int fd)
5673 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5674 TaskState *ts = cpu->opaque;
5675 abi_ulong auxv = ts->info->saved_auxv;
5676 abi_ulong len = ts->info->auxv_len;
5677 char *ptr;
5680 * Auxiliary vector is stored in target process stack.
5681 * read in whole auxv vector and copy it to file
5683 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5684 if (ptr != NULL) {
5685 while (len > 0) {
5686 ssize_t r;
5687 r = write(fd, ptr, len);
5688 if (r <= 0) {
5689 break;
5691 len -= r;
5692 ptr += r;
5694 lseek(fd, 0, SEEK_SET);
5695 unlock_user(ptr, auxv, len);
5698 return 0;
5701 static int is_proc_myself(const char *filename, const char *entry)
5703 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5704 filename += strlen("/proc/");
5705 if (!strncmp(filename, "self/", strlen("self/"))) {
5706 filename += strlen("self/");
5707 } else if (*filename >= '1' && *filename <= '9') {
5708 char myself[80];
5709 snprintf(myself, sizeof(myself), "%d/", getpid());
5710 if (!strncmp(filename, myself, strlen(myself))) {
5711 filename += strlen(myself);
5712 } else {
5713 return 0;
5715 } else {
5716 return 0;
5718 if (!strcmp(filename, entry)) {
5719 return 1;
5722 return 0;
5725 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5726 static int is_proc(const char *filename, const char *entry)
5728 return strcmp(filename, entry) == 0;
5731 static int open_net_route(void *cpu_env, int fd)
5733 FILE *fp;
5734 char *line = NULL;
5735 size_t len = 0;
5736 ssize_t read;
5738 fp = fopen("/proc/net/route", "r");
5739 if (fp == NULL) {
5740 return -EACCES;
5743 /* read header */
5745 read = getline(&line, &len, fp);
5746 dprintf(fd, "%s", line);
5748 /* read routes */
5750 while ((read = getline(&line, &len, fp)) != -1) {
5751 char iface[16];
5752 uint32_t dest, gw, mask;
5753 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5754 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5755 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5756 &mask, &mtu, &window, &irtt);
5757 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5758 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5759 metric, tswap32(mask), mtu, window, irtt);
5762 free(line);
5763 fclose(fp);
5765 return 0;
5767 #endif
5769 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5771 struct fake_open {
5772 const char *filename;
5773 int (*fill)(void *cpu_env, int fd);
5774 int (*cmp)(const char *s1, const char *s2);
5776 const struct fake_open *fake_open;
5777 static const struct fake_open fakes[] = {
5778 { "maps", open_self_maps, is_proc_myself },
5779 { "stat", open_self_stat, is_proc_myself },
5780 { "auxv", open_self_auxv, is_proc_myself },
5781 { "cmdline", open_self_cmdline, is_proc_myself },
5782 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5783 { "/proc/net/route", open_net_route, is_proc },
5784 #endif
5785 { NULL, NULL, NULL }
5788 if (is_proc_myself(pathname, "exe")) {
5789 int execfd = qemu_getauxval(AT_EXECFD);
5790 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5793 for (fake_open = fakes; fake_open->filename; fake_open++) {
5794 if (fake_open->cmp(pathname, fake_open->filename)) {
5795 break;
5799 if (fake_open->filename) {
5800 const char *tmpdir;
5801 char filename[PATH_MAX];
5802 int fd, r;
5804 /* create temporary file to map stat to */
5805 tmpdir = getenv("TMPDIR");
5806 if (!tmpdir)
5807 tmpdir = "/tmp";
5808 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5809 fd = mkstemp(filename);
5810 if (fd < 0) {
5811 return fd;
5813 unlink(filename);
5815 if ((r = fake_open->fill(cpu_env, fd))) {
5816 close(fd);
5817 return r;
5819 lseek(fd, 0, SEEK_SET);
5821 return fd;
5824 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5827 #define TIMER_MAGIC 0x0caf0000
5828 #define TIMER_MAGIC_MASK 0xffff0000
5830 /* Convert QEMU provided timer ID back to internal 16bit index format */
5831 static target_timer_t get_timer_id(abi_long arg)
5833 target_timer_t timerid = arg;
5835 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5836 return -TARGET_EINVAL;
5839 timerid &= 0xffff;
5841 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5842 return -TARGET_EINVAL;
5845 return timerid;
5848 /* do_syscall() should always have a single exit point at the end so
5849 that actions, such as logging of syscall results, can be performed.
5850 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5851 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5852 abi_long arg2, abi_long arg3, abi_long arg4,
5853 abi_long arg5, abi_long arg6, abi_long arg7,
5854 abi_long arg8)
5856 CPUState *cpu = ENV_GET_CPU(cpu_env);
5857 abi_long ret;
5858 struct stat st;
5859 struct statfs stfs;
5860 void *p;
5862 #ifdef DEBUG
5863 gemu_log("syscall %d", num);
5864 #endif
5865 if(do_strace)
5866 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5868 switch(num) {
5869 case TARGET_NR_exit:
5870 /* In old applications this may be used to implement _exit(2).
5871 However in threaded applictions it is used for thread termination,
5872 and _exit_group is used for application termination.
5873 Do thread termination if we have more then one thread. */
5874 /* FIXME: This probably breaks if a signal arrives. We should probably
5875 be disabling signals. */
5876 if (CPU_NEXT(first_cpu)) {
5877 TaskState *ts;
5879 cpu_list_lock();
5880 /* Remove the CPU from the list. */
5881 QTAILQ_REMOVE(&cpus, cpu, node);
5882 cpu_list_unlock();
5883 ts = cpu->opaque;
5884 if (ts->child_tidptr) {
5885 put_user_u32(0, ts->child_tidptr);
5886 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5887 NULL, NULL, 0);
5889 thread_cpu = NULL;
5890 object_unref(OBJECT(cpu));
5891 g_free(ts);
5892 rcu_unregister_thread();
5893 pthread_exit(NULL);
5895 #ifdef TARGET_GPROF
5896 _mcleanup();
5897 #endif
5898 gdb_exit(cpu_env, arg1);
5899 _exit(arg1);
5900 ret = 0; /* avoid warning */
5901 break;
5902 case TARGET_NR_read:
5903 if (arg3 == 0)
5904 ret = 0;
5905 else {
5906 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5907 goto efault;
5908 ret = get_errno(read(arg1, p, arg3));
5909 if (ret >= 0 &&
5910 fd_trans_host_to_target_data(arg1)) {
5911 ret = fd_trans_host_to_target_data(arg1)(p, ret);
5913 unlock_user(p, arg2, ret);
5915 break;
5916 case TARGET_NR_write:
5917 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5918 goto efault;
5919 ret = get_errno(write(arg1, p, arg3));
5920 unlock_user(p, arg2, 0);
5921 break;
5922 #ifdef TARGET_NR_open
5923 case TARGET_NR_open:
5924 if (!(p = lock_user_string(arg1)))
5925 goto efault;
5926 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5927 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5928 arg3));
5929 fd_trans_unregister(ret);
5930 unlock_user(p, arg1, 0);
5931 break;
5932 #endif
5933 case TARGET_NR_openat:
5934 if (!(p = lock_user_string(arg2)))
5935 goto efault;
5936 ret = get_errno(do_openat(cpu_env, arg1, p,
5937 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5938 arg4));
5939 fd_trans_unregister(ret);
5940 unlock_user(p, arg2, 0);
5941 break;
5942 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5943 case TARGET_NR_name_to_handle_at:
5944 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
5945 break;
5946 #endif
5947 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5948 case TARGET_NR_open_by_handle_at:
5949 ret = do_open_by_handle_at(arg1, arg2, arg3);
5950 fd_trans_unregister(ret);
5951 break;
5952 #endif
5953 case TARGET_NR_close:
5954 fd_trans_unregister(arg1);
5955 ret = get_errno(close(arg1));
5956 break;
5957 case TARGET_NR_brk:
5958 ret = do_brk(arg1);
5959 break;
5960 #ifdef TARGET_NR_fork
5961 case TARGET_NR_fork:
5962 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5963 break;
5964 #endif
5965 #ifdef TARGET_NR_waitpid
5966 case TARGET_NR_waitpid:
5968 int status;
5969 ret = get_errno(waitpid(arg1, &status, arg3));
5970 if (!is_error(ret) && arg2 && ret
5971 && put_user_s32(host_to_target_waitstatus(status), arg2))
5972 goto efault;
5974 break;
5975 #endif
5976 #ifdef TARGET_NR_waitid
5977 case TARGET_NR_waitid:
5979 siginfo_t info;
5980 info.si_pid = 0;
5981 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5982 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5983 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5984 goto efault;
5985 host_to_target_siginfo(p, &info);
5986 unlock_user(p, arg3, sizeof(target_siginfo_t));
5989 break;
5990 #endif
5991 #ifdef TARGET_NR_creat /* not on alpha */
5992 case TARGET_NR_creat:
5993 if (!(p = lock_user_string(arg1)))
5994 goto efault;
5995 ret = get_errno(creat(p, arg2));
5996 fd_trans_unregister(ret);
5997 unlock_user(p, arg1, 0);
5998 break;
5999 #endif
6000 #ifdef TARGET_NR_link
6001 case TARGET_NR_link:
6003 void * p2;
6004 p = lock_user_string(arg1);
6005 p2 = lock_user_string(arg2);
6006 if (!p || !p2)
6007 ret = -TARGET_EFAULT;
6008 else
6009 ret = get_errno(link(p, p2));
6010 unlock_user(p2, arg2, 0);
6011 unlock_user(p, arg1, 0);
6013 break;
6014 #endif
6015 #if defined(TARGET_NR_linkat)
6016 case TARGET_NR_linkat:
6018 void * p2 = NULL;
6019 if (!arg2 || !arg4)
6020 goto efault;
6021 p = lock_user_string(arg2);
6022 p2 = lock_user_string(arg4);
6023 if (!p || !p2)
6024 ret = -TARGET_EFAULT;
6025 else
6026 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6027 unlock_user(p, arg2, 0);
6028 unlock_user(p2, arg4, 0);
6030 break;
6031 #endif
6032 #ifdef TARGET_NR_unlink
6033 case TARGET_NR_unlink:
6034 if (!(p = lock_user_string(arg1)))
6035 goto efault;
6036 ret = get_errno(unlink(p));
6037 unlock_user(p, arg1, 0);
6038 break;
6039 #endif
6040 #if defined(TARGET_NR_unlinkat)
6041 case TARGET_NR_unlinkat:
6042 if (!(p = lock_user_string(arg2)))
6043 goto efault;
6044 ret = get_errno(unlinkat(arg1, p, arg3));
6045 unlock_user(p, arg2, 0);
6046 break;
6047 #endif
6048 case TARGET_NR_execve:
6050 char **argp, **envp;
6051 int argc, envc;
6052 abi_ulong gp;
6053 abi_ulong guest_argp;
6054 abi_ulong guest_envp;
6055 abi_ulong addr;
6056 char **q;
6057 int total_size = 0;
6059 argc = 0;
6060 guest_argp = arg2;
6061 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6062 if (get_user_ual(addr, gp))
6063 goto efault;
6064 if (!addr)
6065 break;
6066 argc++;
6068 envc = 0;
6069 guest_envp = arg3;
6070 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6071 if (get_user_ual(addr, gp))
6072 goto efault;
6073 if (!addr)
6074 break;
6075 envc++;
6078 argp = alloca((argc + 1) * sizeof(void *));
6079 envp = alloca((envc + 1) * sizeof(void *));
6081 for (gp = guest_argp, q = argp; gp;
6082 gp += sizeof(abi_ulong), q++) {
6083 if (get_user_ual(addr, gp))
6084 goto execve_efault;
6085 if (!addr)
6086 break;
6087 if (!(*q = lock_user_string(addr)))
6088 goto execve_efault;
6089 total_size += strlen(*q) + 1;
6091 *q = NULL;
6093 for (gp = guest_envp, q = envp; gp;
6094 gp += sizeof(abi_ulong), q++) {
6095 if (get_user_ual(addr, gp))
6096 goto execve_efault;
6097 if (!addr)
6098 break;
6099 if (!(*q = lock_user_string(addr)))
6100 goto execve_efault;
6101 total_size += strlen(*q) + 1;
6103 *q = NULL;
6105 if (!(p = lock_user_string(arg1)))
6106 goto execve_efault;
6107 ret = get_errno(execve(p, argp, envp));
6108 unlock_user(p, arg1, 0);
6110 goto execve_end;
6112 execve_efault:
6113 ret = -TARGET_EFAULT;
6115 execve_end:
6116 for (gp = guest_argp, q = argp; *q;
6117 gp += sizeof(abi_ulong), q++) {
6118 if (get_user_ual(addr, gp)
6119 || !addr)
6120 break;
6121 unlock_user(*q, addr, 0);
6123 for (gp = guest_envp, q = envp; *q;
6124 gp += sizeof(abi_ulong), q++) {
6125 if (get_user_ual(addr, gp)
6126 || !addr)
6127 break;
6128 unlock_user(*q, addr, 0);
6131 break;
6132 case TARGET_NR_chdir:
6133 if (!(p = lock_user_string(arg1)))
6134 goto efault;
6135 ret = get_errno(chdir(p));
6136 unlock_user(p, arg1, 0);
6137 break;
6138 #ifdef TARGET_NR_time
6139 case TARGET_NR_time:
6141 time_t host_time;
6142 ret = get_errno(time(&host_time));
6143 if (!is_error(ret)
6144 && arg1
6145 && put_user_sal(host_time, arg1))
6146 goto efault;
6148 break;
6149 #endif
6150 #ifdef TARGET_NR_mknod
6151 case TARGET_NR_mknod:
6152 if (!(p = lock_user_string(arg1)))
6153 goto efault;
6154 ret = get_errno(mknod(p, arg2, arg3));
6155 unlock_user(p, arg1, 0);
6156 break;
6157 #endif
6158 #if defined(TARGET_NR_mknodat)
6159 case TARGET_NR_mknodat:
6160 if (!(p = lock_user_string(arg2)))
6161 goto efault;
6162 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6163 unlock_user(p, arg2, 0);
6164 break;
6165 #endif
6166 #ifdef TARGET_NR_chmod
6167 case TARGET_NR_chmod:
6168 if (!(p = lock_user_string(arg1)))
6169 goto efault;
6170 ret = get_errno(chmod(p, arg2));
6171 unlock_user(p, arg1, 0);
6172 break;
6173 #endif
6174 #ifdef TARGET_NR_break
6175 case TARGET_NR_break:
6176 goto unimplemented;
6177 #endif
6178 #ifdef TARGET_NR_oldstat
6179 case TARGET_NR_oldstat:
6180 goto unimplemented;
6181 #endif
6182 case TARGET_NR_lseek:
6183 ret = get_errno(lseek(arg1, arg2, arg3));
6184 break;
6185 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6186 /* Alpha specific */
6187 case TARGET_NR_getxpid:
6188 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6189 ret = get_errno(getpid());
6190 break;
6191 #endif
6192 #ifdef TARGET_NR_getpid
6193 case TARGET_NR_getpid:
6194 ret = get_errno(getpid());
6195 break;
6196 #endif
6197 case TARGET_NR_mount:
6199 /* need to look at the data field */
6200 void *p2, *p3;
6202 if (arg1) {
6203 p = lock_user_string(arg1);
6204 if (!p) {
6205 goto efault;
6207 } else {
6208 p = NULL;
6211 p2 = lock_user_string(arg2);
6212 if (!p2) {
6213 if (arg1) {
6214 unlock_user(p, arg1, 0);
6216 goto efault;
6219 if (arg3) {
6220 p3 = lock_user_string(arg3);
6221 if (!p3) {
6222 if (arg1) {
6223 unlock_user(p, arg1, 0);
6225 unlock_user(p2, arg2, 0);
6226 goto efault;
6228 } else {
6229 p3 = NULL;
6232 /* FIXME - arg5 should be locked, but it isn't clear how to
6233 * do that since it's not guaranteed to be a NULL-terminated
6234 * string.
6236 if (!arg5) {
6237 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6238 } else {
6239 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6241 ret = get_errno(ret);
6243 if (arg1) {
6244 unlock_user(p, arg1, 0);
6246 unlock_user(p2, arg2, 0);
6247 if (arg3) {
6248 unlock_user(p3, arg3, 0);
6251 break;
6252 #ifdef TARGET_NR_umount
6253 case TARGET_NR_umount:
6254 if (!(p = lock_user_string(arg1)))
6255 goto efault;
6256 ret = get_errno(umount(p));
6257 unlock_user(p, arg1, 0);
6258 break;
6259 #endif
6260 #ifdef TARGET_NR_stime /* not on alpha */
6261 case TARGET_NR_stime:
6263 time_t host_time;
6264 if (get_user_sal(host_time, arg1))
6265 goto efault;
6266 ret = get_errno(stime(&host_time));
6268 break;
6269 #endif
6270 case TARGET_NR_ptrace:
6271 goto unimplemented;
6272 #ifdef TARGET_NR_alarm /* not on alpha */
6273 case TARGET_NR_alarm:
6274 ret = alarm(arg1);
6275 break;
6276 #endif
6277 #ifdef TARGET_NR_oldfstat
6278 case TARGET_NR_oldfstat:
6279 goto unimplemented;
6280 #endif
6281 #ifdef TARGET_NR_pause /* not on alpha */
6282 case TARGET_NR_pause:
6283 ret = get_errno(pause());
6284 break;
6285 #endif
6286 #ifdef TARGET_NR_utime
6287 case TARGET_NR_utime:
6289 struct utimbuf tbuf, *host_tbuf;
6290 struct target_utimbuf *target_tbuf;
6291 if (arg2) {
6292 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6293 goto efault;
6294 tbuf.actime = tswapal(target_tbuf->actime);
6295 tbuf.modtime = tswapal(target_tbuf->modtime);
6296 unlock_user_struct(target_tbuf, arg2, 0);
6297 host_tbuf = &tbuf;
6298 } else {
6299 host_tbuf = NULL;
6301 if (!(p = lock_user_string(arg1)))
6302 goto efault;
6303 ret = get_errno(utime(p, host_tbuf));
6304 unlock_user(p, arg1, 0);
6306 break;
6307 #endif
6308 #ifdef TARGET_NR_utimes
6309 case TARGET_NR_utimes:
6311 struct timeval *tvp, tv[2];
6312 if (arg2) {
6313 if (copy_from_user_timeval(&tv[0], arg2)
6314 || copy_from_user_timeval(&tv[1],
6315 arg2 + sizeof(struct target_timeval)))
6316 goto efault;
6317 tvp = tv;
6318 } else {
6319 tvp = NULL;
6321 if (!(p = lock_user_string(arg1)))
6322 goto efault;
6323 ret = get_errno(utimes(p, tvp));
6324 unlock_user(p, arg1, 0);
6326 break;
6327 #endif
6328 #if defined(TARGET_NR_futimesat)
6329 case TARGET_NR_futimesat:
6331 struct timeval *tvp, tv[2];
6332 if (arg3) {
6333 if (copy_from_user_timeval(&tv[0], arg3)
6334 || copy_from_user_timeval(&tv[1],
6335 arg3 + sizeof(struct target_timeval)))
6336 goto efault;
6337 tvp = tv;
6338 } else {
6339 tvp = NULL;
6341 if (!(p = lock_user_string(arg2)))
6342 goto efault;
6343 ret = get_errno(futimesat(arg1, path(p), tvp));
6344 unlock_user(p, arg2, 0);
6346 break;
6347 #endif
6348 #ifdef TARGET_NR_stty
6349 case TARGET_NR_stty:
6350 goto unimplemented;
6351 #endif
6352 #ifdef TARGET_NR_gtty
6353 case TARGET_NR_gtty:
6354 goto unimplemented;
6355 #endif
6356 #ifdef TARGET_NR_access
6357 case TARGET_NR_access:
6358 if (!(p = lock_user_string(arg1)))
6359 goto efault;
6360 ret = get_errno(access(path(p), arg2));
6361 unlock_user(p, arg1, 0);
6362 break;
6363 #endif
6364 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6365 case TARGET_NR_faccessat:
6366 if (!(p = lock_user_string(arg2)))
6367 goto efault;
6368 ret = get_errno(faccessat(arg1, p, arg3, 0));
6369 unlock_user(p, arg2, 0);
6370 break;
6371 #endif
6372 #ifdef TARGET_NR_nice /* not on alpha */
6373 case TARGET_NR_nice:
6374 ret = get_errno(nice(arg1));
6375 break;
6376 #endif
6377 #ifdef TARGET_NR_ftime
6378 case TARGET_NR_ftime:
6379 goto unimplemented;
6380 #endif
6381 case TARGET_NR_sync:
6382 sync();
6383 ret = 0;
6384 break;
6385 case TARGET_NR_kill:
6386 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6387 break;
6388 #ifdef TARGET_NR_rename
6389 case TARGET_NR_rename:
6391 void *p2;
6392 p = lock_user_string(arg1);
6393 p2 = lock_user_string(arg2);
6394 if (!p || !p2)
6395 ret = -TARGET_EFAULT;
6396 else
6397 ret = get_errno(rename(p, p2));
6398 unlock_user(p2, arg2, 0);
6399 unlock_user(p, arg1, 0);
6401 break;
6402 #endif
6403 #if defined(TARGET_NR_renameat)
6404 case TARGET_NR_renameat:
6406 void *p2;
6407 p = lock_user_string(arg2);
6408 p2 = lock_user_string(arg4);
6409 if (!p || !p2)
6410 ret = -TARGET_EFAULT;
6411 else
6412 ret = get_errno(renameat(arg1, p, arg3, p2));
6413 unlock_user(p2, arg4, 0);
6414 unlock_user(p, arg2, 0);
6416 break;
6417 #endif
6418 #ifdef TARGET_NR_mkdir
6419 case TARGET_NR_mkdir:
6420 if (!(p = lock_user_string(arg1)))
6421 goto efault;
6422 ret = get_errno(mkdir(p, arg2));
6423 unlock_user(p, arg1, 0);
6424 break;
6425 #endif
6426 #if defined(TARGET_NR_mkdirat)
6427 case TARGET_NR_mkdirat:
6428 if (!(p = lock_user_string(arg2)))
6429 goto efault;
6430 ret = get_errno(mkdirat(arg1, p, arg3));
6431 unlock_user(p, arg2, 0);
6432 break;
6433 #endif
6434 #ifdef TARGET_NR_rmdir
6435 case TARGET_NR_rmdir:
6436 if (!(p = lock_user_string(arg1)))
6437 goto efault;
6438 ret = get_errno(rmdir(p));
6439 unlock_user(p, arg1, 0);
6440 break;
6441 #endif
6442 case TARGET_NR_dup:
6443 ret = get_errno(dup(arg1));
6444 if (ret >= 0) {
6445 fd_trans_dup(arg1, ret);
6447 break;
6448 #ifdef TARGET_NR_pipe
6449 case TARGET_NR_pipe:
6450 ret = do_pipe(cpu_env, arg1, 0, 0);
6451 break;
6452 #endif
6453 #ifdef TARGET_NR_pipe2
6454 case TARGET_NR_pipe2:
6455 ret = do_pipe(cpu_env, arg1,
6456 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6457 break;
6458 #endif
6459 case TARGET_NR_times:
6461 struct target_tms *tmsp;
6462 struct tms tms;
6463 ret = get_errno(times(&tms));
6464 if (arg1) {
6465 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6466 if (!tmsp)
6467 goto efault;
6468 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6469 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6470 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6471 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6473 if (!is_error(ret))
6474 ret = host_to_target_clock_t(ret);
6476 break;
6477 #ifdef TARGET_NR_prof
6478 case TARGET_NR_prof:
6479 goto unimplemented;
6480 #endif
6481 #ifdef TARGET_NR_signal
6482 case TARGET_NR_signal:
6483 goto unimplemented;
6484 #endif
6485 case TARGET_NR_acct:
6486 if (arg1 == 0) {
6487 ret = get_errno(acct(NULL));
6488 } else {
6489 if (!(p = lock_user_string(arg1)))
6490 goto efault;
6491 ret = get_errno(acct(path(p)));
6492 unlock_user(p, arg1, 0);
6494 break;
6495 #ifdef TARGET_NR_umount2
6496 case TARGET_NR_umount2:
6497 if (!(p = lock_user_string(arg1)))
6498 goto efault;
6499 ret = get_errno(umount2(p, arg2));
6500 unlock_user(p, arg1, 0);
6501 break;
6502 #endif
6503 #ifdef TARGET_NR_lock
6504 case TARGET_NR_lock:
6505 goto unimplemented;
6506 #endif
6507 case TARGET_NR_ioctl:
6508 ret = do_ioctl(arg1, arg2, arg3);
6509 break;
6510 case TARGET_NR_fcntl:
6511 ret = do_fcntl(arg1, arg2, arg3);
6512 break;
6513 #ifdef TARGET_NR_mpx
6514 case TARGET_NR_mpx:
6515 goto unimplemented;
6516 #endif
6517 case TARGET_NR_setpgid:
6518 ret = get_errno(setpgid(arg1, arg2));
6519 break;
6520 #ifdef TARGET_NR_ulimit
6521 case TARGET_NR_ulimit:
6522 goto unimplemented;
6523 #endif
6524 #ifdef TARGET_NR_oldolduname
6525 case TARGET_NR_oldolduname:
6526 goto unimplemented;
6527 #endif
6528 case TARGET_NR_umask:
6529 ret = get_errno(umask(arg1));
6530 break;
6531 case TARGET_NR_chroot:
6532 if (!(p = lock_user_string(arg1)))
6533 goto efault;
6534 ret = get_errno(chroot(p));
6535 unlock_user(p, arg1, 0);
6536 break;
6537 #ifdef TARGET_NR_ustat
6538 case TARGET_NR_ustat:
6539 goto unimplemented;
6540 #endif
6541 #ifdef TARGET_NR_dup2
6542 case TARGET_NR_dup2:
6543 ret = get_errno(dup2(arg1, arg2));
6544 if (ret >= 0) {
6545 fd_trans_dup(arg1, arg2);
6547 break;
6548 #endif
6549 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6550 case TARGET_NR_dup3:
6551 ret = get_errno(dup3(arg1, arg2, arg3));
6552 if (ret >= 0) {
6553 fd_trans_dup(arg1, arg2);
6555 break;
6556 #endif
6557 #ifdef TARGET_NR_getppid /* not on alpha */
6558 case TARGET_NR_getppid:
6559 ret = get_errno(getppid());
6560 break;
6561 #endif
6562 #ifdef TARGET_NR_getpgrp
6563 case TARGET_NR_getpgrp:
6564 ret = get_errno(getpgrp());
6565 break;
6566 #endif
6567 case TARGET_NR_setsid:
6568 ret = get_errno(setsid());
6569 break;
6570 #ifdef TARGET_NR_sigaction
6571 case TARGET_NR_sigaction:
6573 #if defined(TARGET_ALPHA)
6574 struct target_sigaction act, oact, *pact = 0;
6575 struct target_old_sigaction *old_act;
6576 if (arg2) {
6577 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6578 goto efault;
6579 act._sa_handler = old_act->_sa_handler;
6580 target_siginitset(&act.sa_mask, old_act->sa_mask);
6581 act.sa_flags = old_act->sa_flags;
6582 act.sa_restorer = 0;
6583 unlock_user_struct(old_act, arg2, 0);
6584 pact = &act;
6586 ret = get_errno(do_sigaction(arg1, pact, &oact));
6587 if (!is_error(ret) && arg3) {
6588 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6589 goto efault;
6590 old_act->_sa_handler = oact._sa_handler;
6591 old_act->sa_mask = oact.sa_mask.sig[0];
6592 old_act->sa_flags = oact.sa_flags;
6593 unlock_user_struct(old_act, arg3, 1);
6595 #elif defined(TARGET_MIPS)
6596 struct target_sigaction act, oact, *pact, *old_act;
6598 if (arg2) {
6599 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6600 goto efault;
6601 act._sa_handler = old_act->_sa_handler;
6602 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6603 act.sa_flags = old_act->sa_flags;
6604 unlock_user_struct(old_act, arg2, 0);
6605 pact = &act;
6606 } else {
6607 pact = NULL;
6610 ret = get_errno(do_sigaction(arg1, pact, &oact));
6612 if (!is_error(ret) && arg3) {
6613 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6614 goto efault;
6615 old_act->_sa_handler = oact._sa_handler;
6616 old_act->sa_flags = oact.sa_flags;
6617 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6618 old_act->sa_mask.sig[1] = 0;
6619 old_act->sa_mask.sig[2] = 0;
6620 old_act->sa_mask.sig[3] = 0;
6621 unlock_user_struct(old_act, arg3, 1);
6623 #else
6624 struct target_old_sigaction *old_act;
6625 struct target_sigaction act, oact, *pact;
6626 if (arg2) {
6627 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6628 goto efault;
6629 act._sa_handler = old_act->_sa_handler;
6630 target_siginitset(&act.sa_mask, old_act->sa_mask);
6631 act.sa_flags = old_act->sa_flags;
6632 act.sa_restorer = old_act->sa_restorer;
6633 unlock_user_struct(old_act, arg2, 0);
6634 pact = &act;
6635 } else {
6636 pact = NULL;
6638 ret = get_errno(do_sigaction(arg1, pact, &oact));
6639 if (!is_error(ret) && arg3) {
6640 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6641 goto efault;
6642 old_act->_sa_handler = oact._sa_handler;
6643 old_act->sa_mask = oact.sa_mask.sig[0];
6644 old_act->sa_flags = oact.sa_flags;
6645 old_act->sa_restorer = oact.sa_restorer;
6646 unlock_user_struct(old_act, arg3, 1);
6648 #endif
6650 break;
6651 #endif
6652 case TARGET_NR_rt_sigaction:
6654 #if defined(TARGET_ALPHA)
6655 struct target_sigaction act, oact, *pact = 0;
6656 struct target_rt_sigaction *rt_act;
6657 /* ??? arg4 == sizeof(sigset_t). */
6658 if (arg2) {
6659 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6660 goto efault;
6661 act._sa_handler = rt_act->_sa_handler;
6662 act.sa_mask = rt_act->sa_mask;
6663 act.sa_flags = rt_act->sa_flags;
6664 act.sa_restorer = arg5;
6665 unlock_user_struct(rt_act, arg2, 0);
6666 pact = &act;
6668 ret = get_errno(do_sigaction(arg1, pact, &oact));
6669 if (!is_error(ret) && arg3) {
6670 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6671 goto efault;
6672 rt_act->_sa_handler = oact._sa_handler;
6673 rt_act->sa_mask = oact.sa_mask;
6674 rt_act->sa_flags = oact.sa_flags;
6675 unlock_user_struct(rt_act, arg3, 1);
6677 #else
6678 struct target_sigaction *act;
6679 struct target_sigaction *oact;
6681 if (arg2) {
6682 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6683 goto efault;
6684 } else
6685 act = NULL;
6686 if (arg3) {
6687 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6688 ret = -TARGET_EFAULT;
6689 goto rt_sigaction_fail;
6691 } else
6692 oact = NULL;
6693 ret = get_errno(do_sigaction(arg1, act, oact));
6694 rt_sigaction_fail:
6695 if (act)
6696 unlock_user_struct(act, arg2, 0);
6697 if (oact)
6698 unlock_user_struct(oact, arg3, 1);
6699 #endif
6701 break;
6702 #ifdef TARGET_NR_sgetmask /* not on alpha */
6703 case TARGET_NR_sgetmask:
6705 sigset_t cur_set;
6706 abi_ulong target_set;
6707 do_sigprocmask(0, NULL, &cur_set);
6708 host_to_target_old_sigset(&target_set, &cur_set);
6709 ret = target_set;
6711 break;
6712 #endif
6713 #ifdef TARGET_NR_ssetmask /* not on alpha */
6714 case TARGET_NR_ssetmask:
6716 sigset_t set, oset, cur_set;
6717 abi_ulong target_set = arg1;
6718 do_sigprocmask(0, NULL, &cur_set);
6719 target_to_host_old_sigset(&set, &target_set);
6720 sigorset(&set, &set, &cur_set);
6721 do_sigprocmask(SIG_SETMASK, &set, &oset);
6722 host_to_target_old_sigset(&target_set, &oset);
6723 ret = target_set;
6725 break;
6726 #endif
6727 #ifdef TARGET_NR_sigprocmask
6728 case TARGET_NR_sigprocmask:
6730 #if defined(TARGET_ALPHA)
6731 sigset_t set, oldset;
6732 abi_ulong mask;
6733 int how;
6735 switch (arg1) {
6736 case TARGET_SIG_BLOCK:
6737 how = SIG_BLOCK;
6738 break;
6739 case TARGET_SIG_UNBLOCK:
6740 how = SIG_UNBLOCK;
6741 break;
6742 case TARGET_SIG_SETMASK:
6743 how = SIG_SETMASK;
6744 break;
6745 default:
6746 ret = -TARGET_EINVAL;
6747 goto fail;
6749 mask = arg2;
6750 target_to_host_old_sigset(&set, &mask);
6752 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6753 if (!is_error(ret)) {
6754 host_to_target_old_sigset(&mask, &oldset);
6755 ret = mask;
6756 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6758 #else
6759 sigset_t set, oldset, *set_ptr;
6760 int how;
6762 if (arg2) {
6763 switch (arg1) {
6764 case TARGET_SIG_BLOCK:
6765 how = SIG_BLOCK;
6766 break;
6767 case TARGET_SIG_UNBLOCK:
6768 how = SIG_UNBLOCK;
6769 break;
6770 case TARGET_SIG_SETMASK:
6771 how = SIG_SETMASK;
6772 break;
6773 default:
6774 ret = -TARGET_EINVAL;
6775 goto fail;
6777 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6778 goto efault;
6779 target_to_host_old_sigset(&set, p);
6780 unlock_user(p, arg2, 0);
6781 set_ptr = &set;
6782 } else {
6783 how = 0;
6784 set_ptr = NULL;
6786 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6787 if (!is_error(ret) && arg3) {
6788 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6789 goto efault;
6790 host_to_target_old_sigset(p, &oldset);
6791 unlock_user(p, arg3, sizeof(target_sigset_t));
6793 #endif
6795 break;
6796 #endif
6797 case TARGET_NR_rt_sigprocmask:
6799 int how = arg1;
6800 sigset_t set, oldset, *set_ptr;
6802 if (arg2) {
6803 switch(how) {
6804 case TARGET_SIG_BLOCK:
6805 how = SIG_BLOCK;
6806 break;
6807 case TARGET_SIG_UNBLOCK:
6808 how = SIG_UNBLOCK;
6809 break;
6810 case TARGET_SIG_SETMASK:
6811 how = SIG_SETMASK;
6812 break;
6813 default:
6814 ret = -TARGET_EINVAL;
6815 goto fail;
6817 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6818 goto efault;
6819 target_to_host_sigset(&set, p);
6820 unlock_user(p, arg2, 0);
6821 set_ptr = &set;
6822 } else {
6823 how = 0;
6824 set_ptr = NULL;
6826 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6827 if (!is_error(ret) && arg3) {
6828 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6829 goto efault;
6830 host_to_target_sigset(p, &oldset);
6831 unlock_user(p, arg3, sizeof(target_sigset_t));
6834 break;
6835 #ifdef TARGET_NR_sigpending
6836 case TARGET_NR_sigpending:
6838 sigset_t set;
6839 ret = get_errno(sigpending(&set));
6840 if (!is_error(ret)) {
6841 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6842 goto efault;
6843 host_to_target_old_sigset(p, &set);
6844 unlock_user(p, arg1, sizeof(target_sigset_t));
6847 break;
6848 #endif
6849 case TARGET_NR_rt_sigpending:
6851 sigset_t set;
6852 ret = get_errno(sigpending(&set));
6853 if (!is_error(ret)) {
6854 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6855 goto efault;
6856 host_to_target_sigset(p, &set);
6857 unlock_user(p, arg1, sizeof(target_sigset_t));
6860 break;
6861 #ifdef TARGET_NR_sigsuspend
6862 case TARGET_NR_sigsuspend:
6864 sigset_t set;
6865 #if defined(TARGET_ALPHA)
6866 abi_ulong mask = arg1;
6867 target_to_host_old_sigset(&set, &mask);
6868 #else
6869 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6870 goto efault;
6871 target_to_host_old_sigset(&set, p);
6872 unlock_user(p, arg1, 0);
6873 #endif
6874 ret = get_errno(sigsuspend(&set));
6876 break;
6877 #endif
6878 case TARGET_NR_rt_sigsuspend:
6880 sigset_t set;
6881 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6882 goto efault;
6883 target_to_host_sigset(&set, p);
6884 unlock_user(p, arg1, 0);
6885 ret = get_errno(sigsuspend(&set));
6887 break;
6888 case TARGET_NR_rt_sigtimedwait:
6890 sigset_t set;
6891 struct timespec uts, *puts;
6892 siginfo_t uinfo;
6894 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6895 goto efault;
6896 target_to_host_sigset(&set, p);
6897 unlock_user(p, arg1, 0);
6898 if (arg3) {
6899 puts = &uts;
6900 target_to_host_timespec(puts, arg3);
6901 } else {
6902 puts = NULL;
6904 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6905 if (!is_error(ret)) {
6906 if (arg2) {
6907 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6909 if (!p) {
6910 goto efault;
6912 host_to_target_siginfo(p, &uinfo);
6913 unlock_user(p, arg2, sizeof(target_siginfo_t));
6915 ret = host_to_target_signal(ret);
6918 break;
6919 case TARGET_NR_rt_sigqueueinfo:
6921 siginfo_t uinfo;
6922 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6923 goto efault;
6924 target_to_host_siginfo(&uinfo, p);
6925 unlock_user(p, arg1, 0);
6926 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6928 break;
6929 #ifdef TARGET_NR_sigreturn
6930 case TARGET_NR_sigreturn:
6931 /* NOTE: ret is eax, so not transcoding must be done */
6932 ret = do_sigreturn(cpu_env);
6933 break;
6934 #endif
6935 case TARGET_NR_rt_sigreturn:
6936 /* NOTE: ret is eax, so not transcoding must be done */
6937 ret = do_rt_sigreturn(cpu_env);
6938 break;
6939 case TARGET_NR_sethostname:
6940 if (!(p = lock_user_string(arg1)))
6941 goto efault;
6942 ret = get_errno(sethostname(p, arg2));
6943 unlock_user(p, arg1, 0);
6944 break;
6945 case TARGET_NR_setrlimit:
6947 int resource = target_to_host_resource(arg1);
6948 struct target_rlimit *target_rlim;
6949 struct rlimit rlim;
6950 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6951 goto efault;
6952 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6953 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6954 unlock_user_struct(target_rlim, arg2, 0);
6955 ret = get_errno(setrlimit(resource, &rlim));
6957 break;
6958 case TARGET_NR_getrlimit:
6960 int resource = target_to_host_resource(arg1);
6961 struct target_rlimit *target_rlim;
6962 struct rlimit rlim;
6964 ret = get_errno(getrlimit(resource, &rlim));
6965 if (!is_error(ret)) {
6966 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6967 goto efault;
6968 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6969 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6970 unlock_user_struct(target_rlim, arg2, 1);
6973 break;
6974 case TARGET_NR_getrusage:
6976 struct rusage rusage;
6977 ret = get_errno(getrusage(arg1, &rusage));
6978 if (!is_error(ret)) {
6979 ret = host_to_target_rusage(arg2, &rusage);
6982 break;
6983 case TARGET_NR_gettimeofday:
6985 struct timeval tv;
6986 ret = get_errno(gettimeofday(&tv, NULL));
6987 if (!is_error(ret)) {
6988 if (copy_to_user_timeval(arg1, &tv))
6989 goto efault;
6992 break;
6993 case TARGET_NR_settimeofday:
6995 struct timeval tv, *ptv = NULL;
6996 struct timezone tz, *ptz = NULL;
6998 if (arg1) {
6999 if (copy_from_user_timeval(&tv, arg1)) {
7000 goto efault;
7002 ptv = &tv;
7005 if (arg2) {
7006 if (copy_from_user_timezone(&tz, arg2)) {
7007 goto efault;
7009 ptz = &tz;
7012 ret = get_errno(settimeofday(ptv, ptz));
7014 break;
7015 #if defined(TARGET_NR_select)
7016 case TARGET_NR_select:
7017 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7018 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7019 #else
7021 struct target_sel_arg_struct *sel;
7022 abi_ulong inp, outp, exp, tvp;
7023 long nsel;
7025 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7026 goto efault;
7027 nsel = tswapal(sel->n);
7028 inp = tswapal(sel->inp);
7029 outp = tswapal(sel->outp);
7030 exp = tswapal(sel->exp);
7031 tvp = tswapal(sel->tvp);
7032 unlock_user_struct(sel, arg1, 0);
7033 ret = do_select(nsel, inp, outp, exp, tvp);
7035 #endif
7036 break;
7037 #endif
7038 #ifdef TARGET_NR_pselect6
7039 case TARGET_NR_pselect6:
7041 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7042 fd_set rfds, wfds, efds;
7043 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7044 struct timespec ts, *ts_ptr;
7047 * The 6th arg is actually two args smashed together,
7048 * so we cannot use the C library.
7050 sigset_t set;
7051 struct {
7052 sigset_t *set;
7053 size_t size;
7054 } sig, *sig_ptr;
7056 abi_ulong arg_sigset, arg_sigsize, *arg7;
7057 target_sigset_t *target_sigset;
7059 n = arg1;
7060 rfd_addr = arg2;
7061 wfd_addr = arg3;
7062 efd_addr = arg4;
7063 ts_addr = arg5;
7065 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7066 if (ret) {
7067 goto fail;
7069 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7070 if (ret) {
7071 goto fail;
7073 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7074 if (ret) {
7075 goto fail;
7079 * This takes a timespec, and not a timeval, so we cannot
7080 * use the do_select() helper ...
7082 if (ts_addr) {
7083 if (target_to_host_timespec(&ts, ts_addr)) {
7084 goto efault;
7086 ts_ptr = &ts;
7087 } else {
7088 ts_ptr = NULL;
7091 /* Extract the two packed args for the sigset */
7092 if (arg6) {
7093 sig_ptr = &sig;
7094 sig.size = _NSIG / 8;
7096 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7097 if (!arg7) {
7098 goto efault;
7100 arg_sigset = tswapal(arg7[0]);
7101 arg_sigsize = tswapal(arg7[1]);
7102 unlock_user(arg7, arg6, 0);
7104 if (arg_sigset) {
7105 sig.set = &set;
7106 if (arg_sigsize != sizeof(*target_sigset)) {
7107 /* Like the kernel, we enforce correct size sigsets */
7108 ret = -TARGET_EINVAL;
7109 goto fail;
7111 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7112 sizeof(*target_sigset), 1);
7113 if (!target_sigset) {
7114 goto efault;
7116 target_to_host_sigset(&set, target_sigset);
7117 unlock_user(target_sigset, arg_sigset, 0);
7118 } else {
7119 sig.set = NULL;
7121 } else {
7122 sig_ptr = NULL;
7125 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7126 ts_ptr, sig_ptr));
7128 if (!is_error(ret)) {
7129 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7130 goto efault;
7131 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7132 goto efault;
7133 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7134 goto efault;
7136 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7137 goto efault;
7140 break;
7141 #endif
7142 #ifdef TARGET_NR_symlink
7143 case TARGET_NR_symlink:
7145 void *p2;
7146 p = lock_user_string(arg1);
7147 p2 = lock_user_string(arg2);
7148 if (!p || !p2)
7149 ret = -TARGET_EFAULT;
7150 else
7151 ret = get_errno(symlink(p, p2));
7152 unlock_user(p2, arg2, 0);
7153 unlock_user(p, arg1, 0);
7155 break;
7156 #endif
7157 #if defined(TARGET_NR_symlinkat)
7158 case TARGET_NR_symlinkat:
7160 void *p2;
7161 p = lock_user_string(arg1);
7162 p2 = lock_user_string(arg3);
7163 if (!p || !p2)
7164 ret = -TARGET_EFAULT;
7165 else
7166 ret = get_errno(symlinkat(p, arg2, p2));
7167 unlock_user(p2, arg3, 0);
7168 unlock_user(p, arg1, 0);
7170 break;
7171 #endif
7172 #ifdef TARGET_NR_oldlstat
7173 case TARGET_NR_oldlstat:
7174 goto unimplemented;
7175 #endif
7176 #ifdef TARGET_NR_readlink
7177 case TARGET_NR_readlink:
7179 void *p2;
7180 p = lock_user_string(arg1);
7181 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7182 if (!p || !p2) {
7183 ret = -TARGET_EFAULT;
7184 } else if (!arg3) {
7185 /* Short circuit this for the magic exe check. */
7186 ret = -TARGET_EINVAL;
7187 } else if (is_proc_myself((const char *)p, "exe")) {
7188 char real[PATH_MAX], *temp;
7189 temp = realpath(exec_path, real);
7190 /* Return value is # of bytes that we wrote to the buffer. */
7191 if (temp == NULL) {
7192 ret = get_errno(-1);
7193 } else {
7194 /* Don't worry about sign mismatch as earlier mapping
7195 * logic would have thrown a bad address error. */
7196 ret = MIN(strlen(real), arg3);
7197 /* We cannot NUL terminate the string. */
7198 memcpy(p2, real, ret);
7200 } else {
7201 ret = get_errno(readlink(path(p), p2, arg3));
7203 unlock_user(p2, arg2, ret);
7204 unlock_user(p, arg1, 0);
7206 break;
7207 #endif
7208 #if defined(TARGET_NR_readlinkat)
7209 case TARGET_NR_readlinkat:
7211 void *p2;
7212 p = lock_user_string(arg2);
7213 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7214 if (!p || !p2) {
7215 ret = -TARGET_EFAULT;
7216 } else if (is_proc_myself((const char *)p, "exe")) {
7217 char real[PATH_MAX], *temp;
7218 temp = realpath(exec_path, real);
7219 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7220 snprintf((char *)p2, arg4, "%s", real);
7221 } else {
7222 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7224 unlock_user(p2, arg3, ret);
7225 unlock_user(p, arg2, 0);
7227 break;
7228 #endif
7229 #ifdef TARGET_NR_uselib
7230 case TARGET_NR_uselib:
7231 goto unimplemented;
7232 #endif
7233 #ifdef TARGET_NR_swapon
7234 case TARGET_NR_swapon:
7235 if (!(p = lock_user_string(arg1)))
7236 goto efault;
7237 ret = get_errno(swapon(p, arg2));
7238 unlock_user(p, arg1, 0);
7239 break;
7240 #endif
7241 case TARGET_NR_reboot:
7242 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7243 /* arg4 must be ignored in all other cases */
7244 p = lock_user_string(arg4);
7245 if (!p) {
7246 goto efault;
7248 ret = get_errno(reboot(arg1, arg2, arg3, p));
7249 unlock_user(p, arg4, 0);
7250 } else {
7251 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7253 break;
7254 #ifdef TARGET_NR_readdir
7255 case TARGET_NR_readdir:
7256 goto unimplemented;
7257 #endif
7258 #ifdef TARGET_NR_mmap
7259 case TARGET_NR_mmap:
7260 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7261 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7262 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7263 || defined(TARGET_S390X)
7265 abi_ulong *v;
7266 abi_ulong v1, v2, v3, v4, v5, v6;
7267 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7268 goto efault;
7269 v1 = tswapal(v[0]);
7270 v2 = tswapal(v[1]);
7271 v3 = tswapal(v[2]);
7272 v4 = tswapal(v[3]);
7273 v5 = tswapal(v[4]);
7274 v6 = tswapal(v[5]);
7275 unlock_user(v, arg1, 0);
7276 ret = get_errno(target_mmap(v1, v2, v3,
7277 target_to_host_bitmask(v4, mmap_flags_tbl),
7278 v5, v6));
7280 #else
7281 ret = get_errno(target_mmap(arg1, arg2, arg3,
7282 target_to_host_bitmask(arg4, mmap_flags_tbl),
7283 arg5,
7284 arg6));
7285 #endif
7286 break;
7287 #endif
7288 #ifdef TARGET_NR_mmap2
7289 case TARGET_NR_mmap2:
7290 #ifndef MMAP_SHIFT
7291 #define MMAP_SHIFT 12
7292 #endif
7293 ret = get_errno(target_mmap(arg1, arg2, arg3,
7294 target_to_host_bitmask(arg4, mmap_flags_tbl),
7295 arg5,
7296 arg6 << MMAP_SHIFT));
7297 break;
7298 #endif
7299 case TARGET_NR_munmap:
7300 ret = get_errno(target_munmap(arg1, arg2));
7301 break;
7302 case TARGET_NR_mprotect:
7304 TaskState *ts = cpu->opaque;
7305 /* Special hack to detect libc making the stack executable. */
7306 if ((arg3 & PROT_GROWSDOWN)
7307 && arg1 >= ts->info->stack_limit
7308 && arg1 <= ts->info->start_stack) {
7309 arg3 &= ~PROT_GROWSDOWN;
7310 arg2 = arg2 + arg1 - ts->info->stack_limit;
7311 arg1 = ts->info->stack_limit;
7314 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7315 break;
7316 #ifdef TARGET_NR_mremap
7317 case TARGET_NR_mremap:
7318 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7319 break;
7320 #endif
7321 /* ??? msync/mlock/munlock are broken for softmmu. */
7322 #ifdef TARGET_NR_msync
7323 case TARGET_NR_msync:
7324 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7325 break;
7326 #endif
7327 #ifdef TARGET_NR_mlock
7328 case TARGET_NR_mlock:
7329 ret = get_errno(mlock(g2h(arg1), arg2));
7330 break;
7331 #endif
7332 #ifdef TARGET_NR_munlock
7333 case TARGET_NR_munlock:
7334 ret = get_errno(munlock(g2h(arg1), arg2));
7335 break;
7336 #endif
7337 #ifdef TARGET_NR_mlockall
7338 case TARGET_NR_mlockall:
7339 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7340 break;
7341 #endif
7342 #ifdef TARGET_NR_munlockall
7343 case TARGET_NR_munlockall:
7344 ret = get_errno(munlockall());
7345 break;
7346 #endif
7347 case TARGET_NR_truncate:
7348 if (!(p = lock_user_string(arg1)))
7349 goto efault;
7350 ret = get_errno(truncate(p, arg2));
7351 unlock_user(p, arg1, 0);
7352 break;
7353 case TARGET_NR_ftruncate:
7354 ret = get_errno(ftruncate(arg1, arg2));
7355 break;
7356 case TARGET_NR_fchmod:
7357 ret = get_errno(fchmod(arg1, arg2));
7358 break;
7359 #if defined(TARGET_NR_fchmodat)
7360 case TARGET_NR_fchmodat:
7361 if (!(p = lock_user_string(arg2)))
7362 goto efault;
7363 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7364 unlock_user(p, arg2, 0);
7365 break;
7366 #endif
7367 case TARGET_NR_getpriority:
7368 /* Note that negative values are valid for getpriority, so we must
7369 differentiate based on errno settings. */
7370 errno = 0;
7371 ret = getpriority(arg1, arg2);
7372 if (ret == -1 && errno != 0) {
7373 ret = -host_to_target_errno(errno);
7374 break;
7376 #ifdef TARGET_ALPHA
7377 /* Return value is the unbiased priority. Signal no error. */
7378 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7379 #else
7380 /* Return value is a biased priority to avoid negative numbers. */
7381 ret = 20 - ret;
7382 #endif
7383 break;
7384 case TARGET_NR_setpriority:
7385 ret = get_errno(setpriority(arg1, arg2, arg3));
7386 break;
7387 #ifdef TARGET_NR_profil
7388 case TARGET_NR_profil:
7389 goto unimplemented;
7390 #endif
7391 case TARGET_NR_statfs:
7392 if (!(p = lock_user_string(arg1)))
7393 goto efault;
7394 ret = get_errno(statfs(path(p), &stfs));
7395 unlock_user(p, arg1, 0);
7396 convert_statfs:
7397 if (!is_error(ret)) {
7398 struct target_statfs *target_stfs;
7400 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7401 goto efault;
7402 __put_user(stfs.f_type, &target_stfs->f_type);
7403 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7404 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7405 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7406 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7407 __put_user(stfs.f_files, &target_stfs->f_files);
7408 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7409 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7410 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7411 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7412 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7413 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7414 unlock_user_struct(target_stfs, arg2, 1);
7416 break;
7417 case TARGET_NR_fstatfs:
7418 ret = get_errno(fstatfs(arg1, &stfs));
7419 goto convert_statfs;
7420 #ifdef TARGET_NR_statfs64
7421 case TARGET_NR_statfs64:
7422 if (!(p = lock_user_string(arg1)))
7423 goto efault;
7424 ret = get_errno(statfs(path(p), &stfs));
7425 unlock_user(p, arg1, 0);
7426 convert_statfs64:
7427 if (!is_error(ret)) {
7428 struct target_statfs64 *target_stfs;
7430 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7431 goto efault;
7432 __put_user(stfs.f_type, &target_stfs->f_type);
7433 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7434 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7435 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7436 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7437 __put_user(stfs.f_files, &target_stfs->f_files);
7438 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7439 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7440 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7441 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7442 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7443 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7444 unlock_user_struct(target_stfs, arg3, 1);
7446 break;
7447 case TARGET_NR_fstatfs64:
7448 ret = get_errno(fstatfs(arg1, &stfs));
7449 goto convert_statfs64;
7450 #endif
7451 #ifdef TARGET_NR_ioperm
7452 case TARGET_NR_ioperm:
7453 goto unimplemented;
7454 #endif
7455 #ifdef TARGET_NR_socketcall
7456 case TARGET_NR_socketcall:
7457 ret = do_socketcall(arg1, arg2);
7458 break;
7459 #endif
7460 #ifdef TARGET_NR_accept
7461 case TARGET_NR_accept:
7462 ret = do_accept4(arg1, arg2, arg3, 0);
7463 break;
7464 #endif
7465 #ifdef TARGET_NR_accept4
7466 case TARGET_NR_accept4:
7467 #ifdef CONFIG_ACCEPT4
7468 ret = do_accept4(arg1, arg2, arg3, arg4);
7469 #else
7470 goto unimplemented;
7471 #endif
7472 break;
7473 #endif
7474 #ifdef TARGET_NR_bind
7475 case TARGET_NR_bind:
7476 ret = do_bind(arg1, arg2, arg3);
7477 break;
7478 #endif
7479 #ifdef TARGET_NR_connect
7480 case TARGET_NR_connect:
7481 ret = do_connect(arg1, arg2, arg3);
7482 break;
7483 #endif
7484 #ifdef TARGET_NR_getpeername
7485 case TARGET_NR_getpeername:
7486 ret = do_getpeername(arg1, arg2, arg3);
7487 break;
7488 #endif
7489 #ifdef TARGET_NR_getsockname
7490 case TARGET_NR_getsockname:
7491 ret = do_getsockname(arg1, arg2, arg3);
7492 break;
7493 #endif
7494 #ifdef TARGET_NR_getsockopt
7495 case TARGET_NR_getsockopt:
7496 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7497 break;
7498 #endif
7499 #ifdef TARGET_NR_listen
7500 case TARGET_NR_listen:
7501 ret = get_errno(listen(arg1, arg2));
7502 break;
7503 #endif
7504 #ifdef TARGET_NR_recv
7505 case TARGET_NR_recv:
7506 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7507 break;
7508 #endif
7509 #ifdef TARGET_NR_recvfrom
7510 case TARGET_NR_recvfrom:
7511 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7512 break;
7513 #endif
7514 #ifdef TARGET_NR_recvmsg
7515 case TARGET_NR_recvmsg:
7516 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7517 break;
7518 #endif
7519 #ifdef TARGET_NR_send
7520 case TARGET_NR_send:
7521 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7522 break;
7523 #endif
7524 #ifdef TARGET_NR_sendmsg
7525 case TARGET_NR_sendmsg:
7526 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7527 break;
7528 #endif
7529 #ifdef TARGET_NR_sendmmsg
7530 case TARGET_NR_sendmmsg:
7531 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7532 break;
7533 case TARGET_NR_recvmmsg:
7534 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7535 break;
7536 #endif
7537 #ifdef TARGET_NR_sendto
7538 case TARGET_NR_sendto:
7539 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7540 break;
7541 #endif
7542 #ifdef TARGET_NR_shutdown
7543 case TARGET_NR_shutdown:
7544 ret = get_errno(shutdown(arg1, arg2));
7545 break;
7546 #endif
7547 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7548 case TARGET_NR_getrandom:
7549 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
7550 if (!p) {
7551 goto efault;
7553 ret = get_errno(getrandom(p, arg2, arg3));
7554 unlock_user(p, arg1, ret);
7555 break;
7556 #endif
7557 #ifdef TARGET_NR_socket
7558 case TARGET_NR_socket:
7559 ret = do_socket(arg1, arg2, arg3);
7560 fd_trans_unregister(ret);
7561 break;
7562 #endif
7563 #ifdef TARGET_NR_socketpair
7564 case TARGET_NR_socketpair:
7565 ret = do_socketpair(arg1, arg2, arg3, arg4);
7566 break;
7567 #endif
7568 #ifdef TARGET_NR_setsockopt
7569 case TARGET_NR_setsockopt:
7570 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7571 break;
7572 #endif
7574 case TARGET_NR_syslog:
7575 if (!(p = lock_user_string(arg2)))
7576 goto efault;
7577 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7578 unlock_user(p, arg2, 0);
7579 break;
7581 case TARGET_NR_setitimer:
7583 struct itimerval value, ovalue, *pvalue;
7585 if (arg2) {
7586 pvalue = &value;
7587 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7588 || copy_from_user_timeval(&pvalue->it_value,
7589 arg2 + sizeof(struct target_timeval)))
7590 goto efault;
7591 } else {
7592 pvalue = NULL;
7594 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7595 if (!is_error(ret) && arg3) {
7596 if (copy_to_user_timeval(arg3,
7597 &ovalue.it_interval)
7598 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7599 &ovalue.it_value))
7600 goto efault;
7603 break;
7604 case TARGET_NR_getitimer:
7606 struct itimerval value;
7608 ret = get_errno(getitimer(arg1, &value));
7609 if (!is_error(ret) && arg2) {
7610 if (copy_to_user_timeval(arg2,
7611 &value.it_interval)
7612 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7613 &value.it_value))
7614 goto efault;
7617 break;
7618 #ifdef TARGET_NR_stat
7619 case TARGET_NR_stat:
7620 if (!(p = lock_user_string(arg1)))
7621 goto efault;
7622 ret = get_errno(stat(path(p), &st));
7623 unlock_user(p, arg1, 0);
7624 goto do_stat;
7625 #endif
7626 #ifdef TARGET_NR_lstat
7627 case TARGET_NR_lstat:
7628 if (!(p = lock_user_string(arg1)))
7629 goto efault;
7630 ret = get_errno(lstat(path(p), &st));
7631 unlock_user(p, arg1, 0);
7632 goto do_stat;
7633 #endif
7634 case TARGET_NR_fstat:
7636 ret = get_errno(fstat(arg1, &st));
7637 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7638 do_stat:
7639 #endif
7640 if (!is_error(ret)) {
7641 struct target_stat *target_st;
7643 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7644 goto efault;
7645 memset(target_st, 0, sizeof(*target_st));
7646 __put_user(st.st_dev, &target_st->st_dev);
7647 __put_user(st.st_ino, &target_st->st_ino);
7648 __put_user(st.st_mode, &target_st->st_mode);
7649 __put_user(st.st_uid, &target_st->st_uid);
7650 __put_user(st.st_gid, &target_st->st_gid);
7651 __put_user(st.st_nlink, &target_st->st_nlink);
7652 __put_user(st.st_rdev, &target_st->st_rdev);
7653 __put_user(st.st_size, &target_st->st_size);
7654 __put_user(st.st_blksize, &target_st->st_blksize);
7655 __put_user(st.st_blocks, &target_st->st_blocks);
7656 __put_user(st.st_atime, &target_st->target_st_atime);
7657 __put_user(st.st_mtime, &target_st->target_st_mtime);
7658 __put_user(st.st_ctime, &target_st->target_st_ctime);
7659 unlock_user_struct(target_st, arg2, 1);
7662 break;
7663 #ifdef TARGET_NR_olduname
7664 case TARGET_NR_olduname:
7665 goto unimplemented;
7666 #endif
7667 #ifdef TARGET_NR_iopl
7668 case TARGET_NR_iopl:
7669 goto unimplemented;
7670 #endif
7671 case TARGET_NR_vhangup:
7672 ret = get_errno(vhangup());
7673 break;
7674 #ifdef TARGET_NR_idle
7675 case TARGET_NR_idle:
7676 goto unimplemented;
7677 #endif
7678 #ifdef TARGET_NR_syscall
7679 case TARGET_NR_syscall:
7680 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7681 arg6, arg7, arg8, 0);
7682 break;
7683 #endif
7684 case TARGET_NR_wait4:
7686 int status;
7687 abi_long status_ptr = arg2;
7688 struct rusage rusage, *rusage_ptr;
7689 abi_ulong target_rusage = arg4;
7690 abi_long rusage_err;
7691 if (target_rusage)
7692 rusage_ptr = &rusage;
7693 else
7694 rusage_ptr = NULL;
7695 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7696 if (!is_error(ret)) {
7697 if (status_ptr && ret) {
7698 status = host_to_target_waitstatus(status);
7699 if (put_user_s32(status, status_ptr))
7700 goto efault;
7702 if (target_rusage) {
7703 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7704 if (rusage_err) {
7705 ret = rusage_err;
7710 break;
7711 #ifdef TARGET_NR_swapoff
7712 case TARGET_NR_swapoff:
7713 if (!(p = lock_user_string(arg1)))
7714 goto efault;
7715 ret = get_errno(swapoff(p));
7716 unlock_user(p, arg1, 0);
7717 break;
7718 #endif
7719 case TARGET_NR_sysinfo:
7721 struct target_sysinfo *target_value;
7722 struct sysinfo value;
7723 ret = get_errno(sysinfo(&value));
7724 if (!is_error(ret) && arg1)
7726 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7727 goto efault;
7728 __put_user(value.uptime, &target_value->uptime);
7729 __put_user(value.loads[0], &target_value->loads[0]);
7730 __put_user(value.loads[1], &target_value->loads[1]);
7731 __put_user(value.loads[2], &target_value->loads[2]);
7732 __put_user(value.totalram, &target_value->totalram);
7733 __put_user(value.freeram, &target_value->freeram);
7734 __put_user(value.sharedram, &target_value->sharedram);
7735 __put_user(value.bufferram, &target_value->bufferram);
7736 __put_user(value.totalswap, &target_value->totalswap);
7737 __put_user(value.freeswap, &target_value->freeswap);
7738 __put_user(value.procs, &target_value->procs);
7739 __put_user(value.totalhigh, &target_value->totalhigh);
7740 __put_user(value.freehigh, &target_value->freehigh);
7741 __put_user(value.mem_unit, &target_value->mem_unit);
7742 unlock_user_struct(target_value, arg1, 1);
7745 break;
7746 #ifdef TARGET_NR_ipc
7747 case TARGET_NR_ipc:
7748 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7749 break;
7750 #endif
7751 #ifdef TARGET_NR_semget
7752 case TARGET_NR_semget:
7753 ret = get_errno(semget(arg1, arg2, arg3));
7754 break;
7755 #endif
7756 #ifdef TARGET_NR_semop
7757 case TARGET_NR_semop:
7758 ret = do_semop(arg1, arg2, arg3);
7759 break;
7760 #endif
7761 #ifdef TARGET_NR_semctl
7762 case TARGET_NR_semctl:
7763 ret = do_semctl(arg1, arg2, arg3, arg4);
7764 break;
7765 #endif
7766 #ifdef TARGET_NR_msgctl
7767 case TARGET_NR_msgctl:
7768 ret = do_msgctl(arg1, arg2, arg3);
7769 break;
7770 #endif
7771 #ifdef TARGET_NR_msgget
7772 case TARGET_NR_msgget:
7773 ret = get_errno(msgget(arg1, arg2));
7774 break;
7775 #endif
7776 #ifdef TARGET_NR_msgrcv
7777 case TARGET_NR_msgrcv:
7778 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7779 break;
7780 #endif
7781 #ifdef TARGET_NR_msgsnd
7782 case TARGET_NR_msgsnd:
7783 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7784 break;
7785 #endif
7786 #ifdef TARGET_NR_shmget
7787 case TARGET_NR_shmget:
7788 ret = get_errno(shmget(arg1, arg2, arg3));
7789 break;
7790 #endif
7791 #ifdef TARGET_NR_shmctl
7792 case TARGET_NR_shmctl:
7793 ret = do_shmctl(arg1, arg2, arg3);
7794 break;
7795 #endif
7796 #ifdef TARGET_NR_shmat
7797 case TARGET_NR_shmat:
7798 ret = do_shmat(arg1, arg2, arg3);
7799 break;
7800 #endif
7801 #ifdef TARGET_NR_shmdt
7802 case TARGET_NR_shmdt:
7803 ret = do_shmdt(arg1);
7804 break;
7805 #endif
7806 case TARGET_NR_fsync:
7807 ret = get_errno(fsync(arg1));
7808 break;
7809 case TARGET_NR_clone:
7810 /* Linux manages to have three different orderings for its
7811 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7812 * match the kernel's CONFIG_CLONE_* settings.
7813 * Microblaze is further special in that it uses a sixth
7814 * implicit argument to clone for the TLS pointer.
7816 #if defined(TARGET_MICROBLAZE)
7817 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7818 #elif defined(TARGET_CLONE_BACKWARDS)
7819 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7820 #elif defined(TARGET_CLONE_BACKWARDS2)
7821 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7822 #else
7823 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7824 #endif
7825 break;
7826 #ifdef __NR_exit_group
7827 /* new thread calls */
7828 case TARGET_NR_exit_group:
7829 #ifdef TARGET_GPROF
7830 _mcleanup();
7831 #endif
7832 gdb_exit(cpu_env, arg1);
7833 ret = get_errno(exit_group(arg1));
7834 break;
7835 #endif
7836 case TARGET_NR_setdomainname:
7837 if (!(p = lock_user_string(arg1)))
7838 goto efault;
7839 ret = get_errno(setdomainname(p, arg2));
7840 unlock_user(p, arg1, 0);
7841 break;
7842 case TARGET_NR_uname:
7843 /* no need to transcode because we use the linux syscall */
7845 struct new_utsname * buf;
7847 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7848 goto efault;
7849 ret = get_errno(sys_uname(buf));
7850 if (!is_error(ret)) {
7851 /* Overrite the native machine name with whatever is being
7852 emulated. */
7853 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7854 /* Allow the user to override the reported release. */
7855 if (qemu_uname_release && *qemu_uname_release)
7856 strcpy (buf->release, qemu_uname_release);
7858 unlock_user_struct(buf, arg1, 1);
7860 break;
7861 #ifdef TARGET_I386
7862 case TARGET_NR_modify_ldt:
7863 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7864 break;
7865 #if !defined(TARGET_X86_64)
7866 case TARGET_NR_vm86old:
7867 goto unimplemented;
7868 case TARGET_NR_vm86:
7869 ret = do_vm86(cpu_env, arg1, arg2);
7870 break;
7871 #endif
7872 #endif
7873 case TARGET_NR_adjtimex:
7874 goto unimplemented;
7875 #ifdef TARGET_NR_create_module
7876 case TARGET_NR_create_module:
7877 #endif
7878 case TARGET_NR_init_module:
7879 case TARGET_NR_delete_module:
7880 #ifdef TARGET_NR_get_kernel_syms
7881 case TARGET_NR_get_kernel_syms:
7882 #endif
7883 goto unimplemented;
7884 case TARGET_NR_quotactl:
7885 goto unimplemented;
7886 case TARGET_NR_getpgid:
7887 ret = get_errno(getpgid(arg1));
7888 break;
7889 case TARGET_NR_fchdir:
7890 ret = get_errno(fchdir(arg1));
7891 break;
7892 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7893 case TARGET_NR_bdflush:
7894 goto unimplemented;
7895 #endif
7896 #ifdef TARGET_NR_sysfs
7897 case TARGET_NR_sysfs:
7898 goto unimplemented;
7899 #endif
7900 case TARGET_NR_personality:
7901 ret = get_errno(personality(arg1));
7902 break;
7903 #ifdef TARGET_NR_afs_syscall
7904 case TARGET_NR_afs_syscall:
7905 goto unimplemented;
7906 #endif
7907 #ifdef TARGET_NR__llseek /* Not on alpha */
7908 case TARGET_NR__llseek:
7910 int64_t res;
7911 #if !defined(__NR_llseek)
7912 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7913 if (res == -1) {
7914 ret = get_errno(res);
7915 } else {
7916 ret = 0;
7918 #else
7919 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7920 #endif
7921 if ((ret == 0) && put_user_s64(res, arg4)) {
7922 goto efault;
7925 break;
7926 #endif
7927 #ifdef TARGET_NR_getdents
7928 case TARGET_NR_getdents:
7929 #ifdef __NR_getdents
7930 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7932 struct target_dirent *target_dirp;
7933 struct linux_dirent *dirp;
7934 abi_long count = arg3;
7936 dirp = g_try_malloc(count);
7937 if (!dirp) {
7938 ret = -TARGET_ENOMEM;
7939 goto fail;
7942 ret = get_errno(sys_getdents(arg1, dirp, count));
7943 if (!is_error(ret)) {
7944 struct linux_dirent *de;
7945 struct target_dirent *tde;
7946 int len = ret;
7947 int reclen, treclen;
7948 int count1, tnamelen;
7950 count1 = 0;
7951 de = dirp;
7952 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7953 goto efault;
7954 tde = target_dirp;
7955 while (len > 0) {
7956 reclen = de->d_reclen;
7957 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7958 assert(tnamelen >= 0);
7959 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7960 assert(count1 + treclen <= count);
7961 tde->d_reclen = tswap16(treclen);
7962 tde->d_ino = tswapal(de->d_ino);
7963 tde->d_off = tswapal(de->d_off);
7964 memcpy(tde->d_name, de->d_name, tnamelen);
7965 de = (struct linux_dirent *)((char *)de + reclen);
7966 len -= reclen;
7967 tde = (struct target_dirent *)((char *)tde + treclen);
7968 count1 += treclen;
7970 ret = count1;
7971 unlock_user(target_dirp, arg2, ret);
7973 g_free(dirp);
7975 #else
7977 struct linux_dirent *dirp;
7978 abi_long count = arg3;
7980 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7981 goto efault;
7982 ret = get_errno(sys_getdents(arg1, dirp, count));
7983 if (!is_error(ret)) {
7984 struct linux_dirent *de;
7985 int len = ret;
7986 int reclen;
7987 de = dirp;
7988 while (len > 0) {
7989 reclen = de->d_reclen;
7990 if (reclen > len)
7991 break;
7992 de->d_reclen = tswap16(reclen);
7993 tswapls(&de->d_ino);
7994 tswapls(&de->d_off);
7995 de = (struct linux_dirent *)((char *)de + reclen);
7996 len -= reclen;
7999 unlock_user(dirp, arg2, ret);
8001 #endif
8002 #else
8003 /* Implement getdents in terms of getdents64 */
8005 struct linux_dirent64 *dirp;
8006 abi_long count = arg3;
8008 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8009 if (!dirp) {
8010 goto efault;
8012 ret = get_errno(sys_getdents64(arg1, dirp, count));
8013 if (!is_error(ret)) {
8014 /* Convert the dirent64 structs to target dirent. We do this
8015 * in-place, since we can guarantee that a target_dirent is no
8016 * larger than a dirent64; however this means we have to be
8017 * careful to read everything before writing in the new format.
8019 struct linux_dirent64 *de;
8020 struct target_dirent *tde;
8021 int len = ret;
8022 int tlen = 0;
8024 de = dirp;
8025 tde = (struct target_dirent *)dirp;
8026 while (len > 0) {
8027 int namelen, treclen;
8028 int reclen = de->d_reclen;
8029 uint64_t ino = de->d_ino;
8030 int64_t off = de->d_off;
8031 uint8_t type = de->d_type;
8033 namelen = strlen(de->d_name);
8034 treclen = offsetof(struct target_dirent, d_name)
8035 + namelen + 2;
8036 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8038 memmove(tde->d_name, de->d_name, namelen + 1);
8039 tde->d_ino = tswapal(ino);
8040 tde->d_off = tswapal(off);
8041 tde->d_reclen = tswap16(treclen);
8042 /* The target_dirent type is in what was formerly a padding
8043 * byte at the end of the structure:
8045 *(((char *)tde) + treclen - 1) = type;
8047 de = (struct linux_dirent64 *)((char *)de + reclen);
8048 tde = (struct target_dirent *)((char *)tde + treclen);
8049 len -= reclen;
8050 tlen += treclen;
8052 ret = tlen;
8054 unlock_user(dirp, arg2, ret);
8056 #endif
8057 break;
8058 #endif /* TARGET_NR_getdents */
8059 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8060 case TARGET_NR_getdents64:
8062 struct linux_dirent64 *dirp;
8063 abi_long count = arg3;
8064 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8065 goto efault;
8066 ret = get_errno(sys_getdents64(arg1, dirp, count));
8067 if (!is_error(ret)) {
8068 struct linux_dirent64 *de;
8069 int len = ret;
8070 int reclen;
8071 de = dirp;
8072 while (len > 0) {
8073 reclen = de->d_reclen;
8074 if (reclen > len)
8075 break;
8076 de->d_reclen = tswap16(reclen);
8077 tswap64s((uint64_t *)&de->d_ino);
8078 tswap64s((uint64_t *)&de->d_off);
8079 de = (struct linux_dirent64 *)((char *)de + reclen);
8080 len -= reclen;
8083 unlock_user(dirp, arg2, ret);
8085 break;
8086 #endif /* TARGET_NR_getdents64 */
8087 #if defined(TARGET_NR__newselect)
8088 case TARGET_NR__newselect:
8089 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8090 break;
8091 #endif
8092 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8093 # ifdef TARGET_NR_poll
8094 case TARGET_NR_poll:
8095 # endif
8096 # ifdef TARGET_NR_ppoll
8097 case TARGET_NR_ppoll:
8098 # endif
8100 struct target_pollfd *target_pfd;
8101 unsigned int nfds = arg2;
8102 int timeout = arg3;
8103 struct pollfd *pfd;
8104 unsigned int i;
8106 pfd = NULL;
8107 target_pfd = NULL;
8108 if (nfds) {
8109 target_pfd = lock_user(VERIFY_WRITE, arg1,
8110 sizeof(struct target_pollfd) * nfds, 1);
8111 if (!target_pfd) {
8112 goto efault;
8115 pfd = alloca(sizeof(struct pollfd) * nfds);
8116 for (i = 0; i < nfds; i++) {
8117 pfd[i].fd = tswap32(target_pfd[i].fd);
8118 pfd[i].events = tswap16(target_pfd[i].events);
8122 # ifdef TARGET_NR_ppoll
8123 if (num == TARGET_NR_ppoll) {
8124 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8125 target_sigset_t *target_set;
8126 sigset_t _set, *set = &_set;
8128 if (arg3) {
8129 if (target_to_host_timespec(timeout_ts, arg3)) {
8130 unlock_user(target_pfd, arg1, 0);
8131 goto efault;
8133 } else {
8134 timeout_ts = NULL;
8137 if (arg4) {
8138 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8139 if (!target_set) {
8140 unlock_user(target_pfd, arg1, 0);
8141 goto efault;
8143 target_to_host_sigset(set, target_set);
8144 } else {
8145 set = NULL;
8148 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8150 if (!is_error(ret) && arg3) {
8151 host_to_target_timespec(arg3, timeout_ts);
8153 if (arg4) {
8154 unlock_user(target_set, arg4, 0);
8156 } else
8157 # endif
8158 ret = get_errno(poll(pfd, nfds, timeout));
8160 if (!is_error(ret)) {
8161 for(i = 0; i < nfds; i++) {
8162 target_pfd[i].revents = tswap16(pfd[i].revents);
8165 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8167 break;
8168 #endif
8169 case TARGET_NR_flock:
8170 /* NOTE: the flock constant seems to be the same for every
8171 Linux platform */
8172 ret = get_errno(flock(arg1, arg2));
8173 break;
8174 case TARGET_NR_readv:
8176 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8177 if (vec != NULL) {
8178 ret = get_errno(readv(arg1, vec, arg3));
8179 unlock_iovec(vec, arg2, arg3, 1);
8180 } else {
8181 ret = -host_to_target_errno(errno);
8184 break;
8185 case TARGET_NR_writev:
8187 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8188 if (vec != NULL) {
8189 ret = get_errno(writev(arg1, vec, arg3));
8190 unlock_iovec(vec, arg2, arg3, 0);
8191 } else {
8192 ret = -host_to_target_errno(errno);
8195 break;
8196 case TARGET_NR_getsid:
8197 ret = get_errno(getsid(arg1));
8198 break;
8199 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8200 case TARGET_NR_fdatasync:
8201 ret = get_errno(fdatasync(arg1));
8202 break;
8203 #endif
8204 #ifdef TARGET_NR__sysctl
8205 case TARGET_NR__sysctl:
8206 /* We don't implement this, but ENOTDIR is always a safe
8207 return value. */
8208 ret = -TARGET_ENOTDIR;
8209 break;
8210 #endif
8211 case TARGET_NR_sched_getaffinity:
8213 unsigned int mask_size;
8214 unsigned long *mask;
8217 * sched_getaffinity needs multiples of ulong, so need to take
8218 * care of mismatches between target ulong and host ulong sizes.
8220 if (arg2 & (sizeof(abi_ulong) - 1)) {
8221 ret = -TARGET_EINVAL;
8222 break;
8224 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8226 mask = alloca(mask_size);
8227 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8229 if (!is_error(ret)) {
8230 if (ret > arg2) {
8231 /* More data returned than the caller's buffer will fit.
8232 * This only happens if sizeof(abi_long) < sizeof(long)
8233 * and the caller passed us a buffer holding an odd number
8234 * of abi_longs. If the host kernel is actually using the
8235 * extra 4 bytes then fail EINVAL; otherwise we can just
8236 * ignore them and only copy the interesting part.
8238 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8239 if (numcpus > arg2 * 8) {
8240 ret = -TARGET_EINVAL;
8241 break;
8243 ret = arg2;
8246 if (copy_to_user(arg3, mask, ret)) {
8247 goto efault;
8251 break;
8252 case TARGET_NR_sched_setaffinity:
8254 unsigned int mask_size;
8255 unsigned long *mask;
8258 * sched_setaffinity needs multiples of ulong, so need to take
8259 * care of mismatches between target ulong and host ulong sizes.
8261 if (arg2 & (sizeof(abi_ulong) - 1)) {
8262 ret = -TARGET_EINVAL;
8263 break;
8265 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8267 mask = alloca(mask_size);
8268 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8269 goto efault;
8271 memcpy(mask, p, arg2);
8272 unlock_user_struct(p, arg2, 0);
8274 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8276 break;
8277 case TARGET_NR_sched_setparam:
8279 struct sched_param *target_schp;
8280 struct sched_param schp;
8282 if (arg2 == 0) {
8283 return -TARGET_EINVAL;
8285 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8286 goto efault;
8287 schp.sched_priority = tswap32(target_schp->sched_priority);
8288 unlock_user_struct(target_schp, arg2, 0);
8289 ret = get_errno(sched_setparam(arg1, &schp));
8291 break;
8292 case TARGET_NR_sched_getparam:
8294 struct sched_param *target_schp;
8295 struct sched_param schp;
8297 if (arg2 == 0) {
8298 return -TARGET_EINVAL;
8300 ret = get_errno(sched_getparam(arg1, &schp));
8301 if (!is_error(ret)) {
8302 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8303 goto efault;
8304 target_schp->sched_priority = tswap32(schp.sched_priority);
8305 unlock_user_struct(target_schp, arg2, 1);
8308 break;
8309 case TARGET_NR_sched_setscheduler:
8311 struct sched_param *target_schp;
8312 struct sched_param schp;
8313 if (arg3 == 0) {
8314 return -TARGET_EINVAL;
8316 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8317 goto efault;
8318 schp.sched_priority = tswap32(target_schp->sched_priority);
8319 unlock_user_struct(target_schp, arg3, 0);
8320 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8322 break;
8323 case TARGET_NR_sched_getscheduler:
8324 ret = get_errno(sched_getscheduler(arg1));
8325 break;
8326 case TARGET_NR_sched_yield:
8327 ret = get_errno(sched_yield());
8328 break;
8329 case TARGET_NR_sched_get_priority_max:
8330 ret = get_errno(sched_get_priority_max(arg1));
8331 break;
8332 case TARGET_NR_sched_get_priority_min:
8333 ret = get_errno(sched_get_priority_min(arg1));
8334 break;
8335 case TARGET_NR_sched_rr_get_interval:
8337 struct timespec ts;
8338 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8339 if (!is_error(ret)) {
8340 ret = host_to_target_timespec(arg2, &ts);
8343 break;
8344 case TARGET_NR_nanosleep:
8346 struct timespec req, rem;
8347 target_to_host_timespec(&req, arg1);
8348 ret = get_errno(nanosleep(&req, &rem));
8349 if (is_error(ret) && arg2) {
8350 host_to_target_timespec(arg2, &rem);
8353 break;
8354 #ifdef TARGET_NR_query_module
8355 case TARGET_NR_query_module:
8356 goto unimplemented;
8357 #endif
8358 #ifdef TARGET_NR_nfsservctl
8359 case TARGET_NR_nfsservctl:
8360 goto unimplemented;
8361 #endif
8362 case TARGET_NR_prctl:
8363 switch (arg1) {
8364 case PR_GET_PDEATHSIG:
8366 int deathsig;
8367 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8368 if (!is_error(ret) && arg2
8369 && put_user_ual(deathsig, arg2)) {
8370 goto efault;
8372 break;
8374 #ifdef PR_GET_NAME
8375 case PR_GET_NAME:
8377 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8378 if (!name) {
8379 goto efault;
8381 ret = get_errno(prctl(arg1, (unsigned long)name,
8382 arg3, arg4, arg5));
8383 unlock_user(name, arg2, 16);
8384 break;
8386 case PR_SET_NAME:
8388 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8389 if (!name) {
8390 goto efault;
8392 ret = get_errno(prctl(arg1, (unsigned long)name,
8393 arg3, arg4, arg5));
8394 unlock_user(name, arg2, 0);
8395 break;
8397 #endif
8398 default:
8399 /* Most prctl options have no pointer arguments */
8400 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8401 break;
8403 break;
8404 #ifdef TARGET_NR_arch_prctl
8405 case TARGET_NR_arch_prctl:
8406 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8407 ret = do_arch_prctl(cpu_env, arg1, arg2);
8408 break;
8409 #else
8410 goto unimplemented;
8411 #endif
8412 #endif
8413 #ifdef TARGET_NR_pread64
8414 case TARGET_NR_pread64:
8415 if (regpairs_aligned(cpu_env)) {
8416 arg4 = arg5;
8417 arg5 = arg6;
8419 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8420 goto efault;
8421 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8422 unlock_user(p, arg2, ret);
8423 break;
8424 case TARGET_NR_pwrite64:
8425 if (regpairs_aligned(cpu_env)) {
8426 arg4 = arg5;
8427 arg5 = arg6;
8429 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8430 goto efault;
8431 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8432 unlock_user(p, arg2, 0);
8433 break;
8434 #endif
8435 case TARGET_NR_getcwd:
8436 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8437 goto efault;
8438 ret = get_errno(sys_getcwd1(p, arg2));
8439 unlock_user(p, arg1, ret);
8440 break;
8441 case TARGET_NR_capget:
8442 case TARGET_NR_capset:
8444 struct target_user_cap_header *target_header;
8445 struct target_user_cap_data *target_data = NULL;
8446 struct __user_cap_header_struct header;
8447 struct __user_cap_data_struct data[2];
8448 struct __user_cap_data_struct *dataptr = NULL;
8449 int i, target_datalen;
8450 int data_items = 1;
8452 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8453 goto efault;
8455 header.version = tswap32(target_header->version);
8456 header.pid = tswap32(target_header->pid);
8458 if (header.version != _LINUX_CAPABILITY_VERSION) {
8459 /* Version 2 and up takes pointer to two user_data structs */
8460 data_items = 2;
8463 target_datalen = sizeof(*target_data) * data_items;
8465 if (arg2) {
8466 if (num == TARGET_NR_capget) {
8467 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8468 } else {
8469 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8471 if (!target_data) {
8472 unlock_user_struct(target_header, arg1, 0);
8473 goto efault;
8476 if (num == TARGET_NR_capset) {
8477 for (i = 0; i < data_items; i++) {
8478 data[i].effective = tswap32(target_data[i].effective);
8479 data[i].permitted = tswap32(target_data[i].permitted);
8480 data[i].inheritable = tswap32(target_data[i].inheritable);
8484 dataptr = data;
8487 if (num == TARGET_NR_capget) {
8488 ret = get_errno(capget(&header, dataptr));
8489 } else {
8490 ret = get_errno(capset(&header, dataptr));
8493 /* The kernel always updates version for both capget and capset */
8494 target_header->version = tswap32(header.version);
8495 unlock_user_struct(target_header, arg1, 1);
8497 if (arg2) {
8498 if (num == TARGET_NR_capget) {
8499 for (i = 0; i < data_items; i++) {
8500 target_data[i].effective = tswap32(data[i].effective);
8501 target_data[i].permitted = tswap32(data[i].permitted);
8502 target_data[i].inheritable = tswap32(data[i].inheritable);
8504 unlock_user(target_data, arg2, target_datalen);
8505 } else {
8506 unlock_user(target_data, arg2, 0);
8509 break;
8511 case TARGET_NR_sigaltstack:
8512 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8513 break;
8515 #ifdef CONFIG_SENDFILE
8516 case TARGET_NR_sendfile:
8518 off_t *offp = NULL;
8519 off_t off;
8520 if (arg3) {
8521 ret = get_user_sal(off, arg3);
8522 if (is_error(ret)) {
8523 break;
8525 offp = &off;
8527 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8528 if (!is_error(ret) && arg3) {
8529 abi_long ret2 = put_user_sal(off, arg3);
8530 if (is_error(ret2)) {
8531 ret = ret2;
8534 break;
8536 #ifdef TARGET_NR_sendfile64
8537 case TARGET_NR_sendfile64:
8539 off_t *offp = NULL;
8540 off_t off;
8541 if (arg3) {
8542 ret = get_user_s64(off, arg3);
8543 if (is_error(ret)) {
8544 break;
8546 offp = &off;
8548 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8549 if (!is_error(ret) && arg3) {
8550 abi_long ret2 = put_user_s64(off, arg3);
8551 if (is_error(ret2)) {
8552 ret = ret2;
8555 break;
8557 #endif
8558 #else
8559 case TARGET_NR_sendfile:
8560 #ifdef TARGET_NR_sendfile64
8561 case TARGET_NR_sendfile64:
8562 #endif
8563 goto unimplemented;
8564 #endif
8566 #ifdef TARGET_NR_getpmsg
8567 case TARGET_NR_getpmsg:
8568 goto unimplemented;
8569 #endif
8570 #ifdef TARGET_NR_putpmsg
8571 case TARGET_NR_putpmsg:
8572 goto unimplemented;
8573 #endif
8574 #ifdef TARGET_NR_vfork
8575 case TARGET_NR_vfork:
8576 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8577 0, 0, 0, 0));
8578 break;
8579 #endif
8580 #ifdef TARGET_NR_ugetrlimit
8581 case TARGET_NR_ugetrlimit:
8583 struct rlimit rlim;
8584 int resource = target_to_host_resource(arg1);
8585 ret = get_errno(getrlimit(resource, &rlim));
8586 if (!is_error(ret)) {
8587 struct target_rlimit *target_rlim;
8588 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8589 goto efault;
8590 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8591 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8592 unlock_user_struct(target_rlim, arg2, 1);
8594 break;
8596 #endif
8597 #ifdef TARGET_NR_truncate64
8598 case TARGET_NR_truncate64:
8599 if (!(p = lock_user_string(arg1)))
8600 goto efault;
8601 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8602 unlock_user(p, arg1, 0);
8603 break;
8604 #endif
8605 #ifdef TARGET_NR_ftruncate64
8606 case TARGET_NR_ftruncate64:
8607 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8608 break;
8609 #endif
8610 #ifdef TARGET_NR_stat64
8611 case TARGET_NR_stat64:
8612 if (!(p = lock_user_string(arg1)))
8613 goto efault;
8614 ret = get_errno(stat(path(p), &st));
8615 unlock_user(p, arg1, 0);
8616 if (!is_error(ret))
8617 ret = host_to_target_stat64(cpu_env, arg2, &st);
8618 break;
8619 #endif
8620 #ifdef TARGET_NR_lstat64
8621 case TARGET_NR_lstat64:
8622 if (!(p = lock_user_string(arg1)))
8623 goto efault;
8624 ret = get_errno(lstat(path(p), &st));
8625 unlock_user(p, arg1, 0);
8626 if (!is_error(ret))
8627 ret = host_to_target_stat64(cpu_env, arg2, &st);
8628 break;
8629 #endif
8630 #ifdef TARGET_NR_fstat64
8631 case TARGET_NR_fstat64:
8632 ret = get_errno(fstat(arg1, &st));
8633 if (!is_error(ret))
8634 ret = host_to_target_stat64(cpu_env, arg2, &st);
8635 break;
8636 #endif
8637 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8638 #ifdef TARGET_NR_fstatat64
8639 case TARGET_NR_fstatat64:
8640 #endif
8641 #ifdef TARGET_NR_newfstatat
8642 case TARGET_NR_newfstatat:
8643 #endif
8644 if (!(p = lock_user_string(arg2)))
8645 goto efault;
8646 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8647 if (!is_error(ret))
8648 ret = host_to_target_stat64(cpu_env, arg3, &st);
8649 break;
8650 #endif
8651 #ifdef TARGET_NR_lchown
8652 case TARGET_NR_lchown:
8653 if (!(p = lock_user_string(arg1)))
8654 goto efault;
8655 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8656 unlock_user(p, arg1, 0);
8657 break;
8658 #endif
8659 #ifdef TARGET_NR_getuid
8660 case TARGET_NR_getuid:
8661 ret = get_errno(high2lowuid(getuid()));
8662 break;
8663 #endif
8664 #ifdef TARGET_NR_getgid
8665 case TARGET_NR_getgid:
8666 ret = get_errno(high2lowgid(getgid()));
8667 break;
8668 #endif
8669 #ifdef TARGET_NR_geteuid
8670 case TARGET_NR_geteuid:
8671 ret = get_errno(high2lowuid(geteuid()));
8672 break;
8673 #endif
8674 #ifdef TARGET_NR_getegid
8675 case TARGET_NR_getegid:
8676 ret = get_errno(high2lowgid(getegid()));
8677 break;
8678 #endif
8679 case TARGET_NR_setreuid:
8680 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8681 break;
8682 case TARGET_NR_setregid:
8683 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8684 break;
8685 case TARGET_NR_getgroups:
8687 int gidsetsize = arg1;
8688 target_id *target_grouplist;
8689 gid_t *grouplist;
8690 int i;
8692 grouplist = alloca(gidsetsize * sizeof(gid_t));
8693 ret = get_errno(getgroups(gidsetsize, grouplist));
8694 if (gidsetsize == 0)
8695 break;
8696 if (!is_error(ret)) {
8697 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8698 if (!target_grouplist)
8699 goto efault;
8700 for(i = 0;i < ret; i++)
8701 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8702 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8705 break;
8706 case TARGET_NR_setgroups:
8708 int gidsetsize = arg1;
8709 target_id *target_grouplist;
8710 gid_t *grouplist = NULL;
8711 int i;
8712 if (gidsetsize) {
8713 grouplist = alloca(gidsetsize * sizeof(gid_t));
8714 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8715 if (!target_grouplist) {
8716 ret = -TARGET_EFAULT;
8717 goto fail;
8719 for (i = 0; i < gidsetsize; i++) {
8720 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8722 unlock_user(target_grouplist, arg2, 0);
8724 ret = get_errno(setgroups(gidsetsize, grouplist));
8726 break;
8727 case TARGET_NR_fchown:
8728 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8729 break;
8730 #if defined(TARGET_NR_fchownat)
8731 case TARGET_NR_fchownat:
8732 if (!(p = lock_user_string(arg2)))
8733 goto efault;
8734 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8735 low2highgid(arg4), arg5));
8736 unlock_user(p, arg2, 0);
8737 break;
8738 #endif
8739 #ifdef TARGET_NR_setresuid
8740 case TARGET_NR_setresuid:
8741 ret = get_errno(setresuid(low2highuid(arg1),
8742 low2highuid(arg2),
8743 low2highuid(arg3)));
8744 break;
8745 #endif
8746 #ifdef TARGET_NR_getresuid
8747 case TARGET_NR_getresuid:
8749 uid_t ruid, euid, suid;
8750 ret = get_errno(getresuid(&ruid, &euid, &suid));
8751 if (!is_error(ret)) {
8752 if (put_user_id(high2lowuid(ruid), arg1)
8753 || put_user_id(high2lowuid(euid), arg2)
8754 || put_user_id(high2lowuid(suid), arg3))
8755 goto efault;
8758 break;
8759 #endif
8760 #ifdef TARGET_NR_getresgid
8761 case TARGET_NR_setresgid:
8762 ret = get_errno(setresgid(low2highgid(arg1),
8763 low2highgid(arg2),
8764 low2highgid(arg3)));
8765 break;
8766 #endif
8767 #ifdef TARGET_NR_getresgid
8768 case TARGET_NR_getresgid:
8770 gid_t rgid, egid, sgid;
8771 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8772 if (!is_error(ret)) {
8773 if (put_user_id(high2lowgid(rgid), arg1)
8774 || put_user_id(high2lowgid(egid), arg2)
8775 || put_user_id(high2lowgid(sgid), arg3))
8776 goto efault;
8779 break;
8780 #endif
8781 #ifdef TARGET_NR_chown
8782 case TARGET_NR_chown:
8783 if (!(p = lock_user_string(arg1)))
8784 goto efault;
8785 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8786 unlock_user(p, arg1, 0);
8787 break;
8788 #endif
8789 case TARGET_NR_setuid:
8790 ret = get_errno(setuid(low2highuid(arg1)));
8791 break;
8792 case TARGET_NR_setgid:
8793 ret = get_errno(setgid(low2highgid(arg1)));
8794 break;
8795 case TARGET_NR_setfsuid:
8796 ret = get_errno(setfsuid(arg1));
8797 break;
8798 case TARGET_NR_setfsgid:
8799 ret = get_errno(setfsgid(arg1));
8800 break;
8802 #ifdef TARGET_NR_lchown32
8803 case TARGET_NR_lchown32:
8804 if (!(p = lock_user_string(arg1)))
8805 goto efault;
8806 ret = get_errno(lchown(p, arg2, arg3));
8807 unlock_user(p, arg1, 0);
8808 break;
8809 #endif
8810 #ifdef TARGET_NR_getuid32
8811 case TARGET_NR_getuid32:
8812 ret = get_errno(getuid());
8813 break;
8814 #endif
8816 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8817 /* Alpha specific */
8818 case TARGET_NR_getxuid:
8820 uid_t euid;
8821 euid=geteuid();
8822 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8824 ret = get_errno(getuid());
8825 break;
8826 #endif
8827 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8828 /* Alpha specific */
8829 case TARGET_NR_getxgid:
8831 uid_t egid;
8832 egid=getegid();
8833 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8835 ret = get_errno(getgid());
8836 break;
8837 #endif
8838 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8839 /* Alpha specific */
8840 case TARGET_NR_osf_getsysinfo:
8841 ret = -TARGET_EOPNOTSUPP;
8842 switch (arg1) {
8843 case TARGET_GSI_IEEE_FP_CONTROL:
8845 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8847 /* Copied from linux ieee_fpcr_to_swcr. */
8848 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8849 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8850 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8851 | SWCR_TRAP_ENABLE_DZE
8852 | SWCR_TRAP_ENABLE_OVF);
8853 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8854 | SWCR_TRAP_ENABLE_INE);
8855 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8856 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8858 if (put_user_u64 (swcr, arg2))
8859 goto efault;
8860 ret = 0;
8862 break;
8864 /* case GSI_IEEE_STATE_AT_SIGNAL:
8865 -- Not implemented in linux kernel.
8866 case GSI_UACPROC:
8867 -- Retrieves current unaligned access state; not much used.
8868 case GSI_PROC_TYPE:
8869 -- Retrieves implver information; surely not used.
8870 case GSI_GET_HWRPB:
8871 -- Grabs a copy of the HWRPB; surely not used.
8874 break;
8875 #endif
8876 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8877 /* Alpha specific */
8878 case TARGET_NR_osf_setsysinfo:
8879 ret = -TARGET_EOPNOTSUPP;
8880 switch (arg1) {
8881 case TARGET_SSI_IEEE_FP_CONTROL:
8883 uint64_t swcr, fpcr, orig_fpcr;
8885 if (get_user_u64 (swcr, arg2)) {
8886 goto efault;
8888 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8889 fpcr = orig_fpcr & FPCR_DYN_MASK;
8891 /* Copied from linux ieee_swcr_to_fpcr. */
8892 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8893 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8894 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8895 | SWCR_TRAP_ENABLE_DZE
8896 | SWCR_TRAP_ENABLE_OVF)) << 48;
8897 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8898 | SWCR_TRAP_ENABLE_INE)) << 57;
8899 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8900 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8902 cpu_alpha_store_fpcr(cpu_env, fpcr);
8903 ret = 0;
8905 break;
8907 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8909 uint64_t exc, fpcr, orig_fpcr;
8910 int si_code;
8912 if (get_user_u64(exc, arg2)) {
8913 goto efault;
8916 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8918 /* We only add to the exception status here. */
8919 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8921 cpu_alpha_store_fpcr(cpu_env, fpcr);
8922 ret = 0;
8924 /* Old exceptions are not signaled. */
8925 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8927 /* If any exceptions set by this call,
8928 and are unmasked, send a signal. */
8929 si_code = 0;
8930 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8931 si_code = TARGET_FPE_FLTRES;
8933 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8934 si_code = TARGET_FPE_FLTUND;
8936 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8937 si_code = TARGET_FPE_FLTOVF;
8939 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8940 si_code = TARGET_FPE_FLTDIV;
8942 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8943 si_code = TARGET_FPE_FLTINV;
8945 if (si_code != 0) {
8946 target_siginfo_t info;
8947 info.si_signo = SIGFPE;
8948 info.si_errno = 0;
8949 info.si_code = si_code;
8950 info._sifields._sigfault._addr
8951 = ((CPUArchState *)cpu_env)->pc;
8952 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8955 break;
8957 /* case SSI_NVPAIRS:
8958 -- Used with SSIN_UACPROC to enable unaligned accesses.
8959 case SSI_IEEE_STATE_AT_SIGNAL:
8960 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8961 -- Not implemented in linux kernel
8964 break;
8965 #endif
8966 #ifdef TARGET_NR_osf_sigprocmask
8967 /* Alpha specific. */
8968 case TARGET_NR_osf_sigprocmask:
8970 abi_ulong mask;
8971 int how;
8972 sigset_t set, oldset;
8974 switch(arg1) {
8975 case TARGET_SIG_BLOCK:
8976 how = SIG_BLOCK;
8977 break;
8978 case TARGET_SIG_UNBLOCK:
8979 how = SIG_UNBLOCK;
8980 break;
8981 case TARGET_SIG_SETMASK:
8982 how = SIG_SETMASK;
8983 break;
8984 default:
8985 ret = -TARGET_EINVAL;
8986 goto fail;
8988 mask = arg2;
8989 target_to_host_old_sigset(&set, &mask);
8990 do_sigprocmask(how, &set, &oldset);
8991 host_to_target_old_sigset(&mask, &oldset);
8992 ret = mask;
8994 break;
8995 #endif
8997 #ifdef TARGET_NR_getgid32
8998 case TARGET_NR_getgid32:
8999 ret = get_errno(getgid());
9000 break;
9001 #endif
9002 #ifdef TARGET_NR_geteuid32
9003 case TARGET_NR_geteuid32:
9004 ret = get_errno(geteuid());
9005 break;
9006 #endif
9007 #ifdef TARGET_NR_getegid32
9008 case TARGET_NR_getegid32:
9009 ret = get_errno(getegid());
9010 break;
9011 #endif
9012 #ifdef TARGET_NR_setreuid32
9013 case TARGET_NR_setreuid32:
9014 ret = get_errno(setreuid(arg1, arg2));
9015 break;
9016 #endif
9017 #ifdef TARGET_NR_setregid32
9018 case TARGET_NR_setregid32:
9019 ret = get_errno(setregid(arg1, arg2));
9020 break;
9021 #endif
9022 #ifdef TARGET_NR_getgroups32
9023 case TARGET_NR_getgroups32:
9025 int gidsetsize = arg1;
9026 uint32_t *target_grouplist;
9027 gid_t *grouplist;
9028 int i;
9030 grouplist = alloca(gidsetsize * sizeof(gid_t));
9031 ret = get_errno(getgroups(gidsetsize, grouplist));
9032 if (gidsetsize == 0)
9033 break;
9034 if (!is_error(ret)) {
9035 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9036 if (!target_grouplist) {
9037 ret = -TARGET_EFAULT;
9038 goto fail;
9040 for(i = 0;i < ret; i++)
9041 target_grouplist[i] = tswap32(grouplist[i]);
9042 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9045 break;
9046 #endif
9047 #ifdef TARGET_NR_setgroups32
9048 case TARGET_NR_setgroups32:
9050 int gidsetsize = arg1;
9051 uint32_t *target_grouplist;
9052 gid_t *grouplist;
9053 int i;
9055 grouplist = alloca(gidsetsize * sizeof(gid_t));
9056 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9057 if (!target_grouplist) {
9058 ret = -TARGET_EFAULT;
9059 goto fail;
9061 for(i = 0;i < gidsetsize; i++)
9062 grouplist[i] = tswap32(target_grouplist[i]);
9063 unlock_user(target_grouplist, arg2, 0);
9064 ret = get_errno(setgroups(gidsetsize, grouplist));
9066 break;
9067 #endif
9068 #ifdef TARGET_NR_fchown32
9069 case TARGET_NR_fchown32:
9070 ret = get_errno(fchown(arg1, arg2, arg3));
9071 break;
9072 #endif
9073 #ifdef TARGET_NR_setresuid32
9074 case TARGET_NR_setresuid32:
9075 ret = get_errno(setresuid(arg1, arg2, arg3));
9076 break;
9077 #endif
9078 #ifdef TARGET_NR_getresuid32
9079 case TARGET_NR_getresuid32:
9081 uid_t ruid, euid, suid;
9082 ret = get_errno(getresuid(&ruid, &euid, &suid));
9083 if (!is_error(ret)) {
9084 if (put_user_u32(ruid, arg1)
9085 || put_user_u32(euid, arg2)
9086 || put_user_u32(suid, arg3))
9087 goto efault;
9090 break;
9091 #endif
9092 #ifdef TARGET_NR_setresgid32
9093 case TARGET_NR_setresgid32:
9094 ret = get_errno(setresgid(arg1, arg2, arg3));
9095 break;
9096 #endif
9097 #ifdef TARGET_NR_getresgid32
9098 case TARGET_NR_getresgid32:
9100 gid_t rgid, egid, sgid;
9101 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9102 if (!is_error(ret)) {
9103 if (put_user_u32(rgid, arg1)
9104 || put_user_u32(egid, arg2)
9105 || put_user_u32(sgid, arg3))
9106 goto efault;
9109 break;
9110 #endif
9111 #ifdef TARGET_NR_chown32
9112 case TARGET_NR_chown32:
9113 if (!(p = lock_user_string(arg1)))
9114 goto efault;
9115 ret = get_errno(chown(p, arg2, arg3));
9116 unlock_user(p, arg1, 0);
9117 break;
9118 #endif
9119 #ifdef TARGET_NR_setuid32
9120 case TARGET_NR_setuid32:
9121 ret = get_errno(setuid(arg1));
9122 break;
9123 #endif
9124 #ifdef TARGET_NR_setgid32
9125 case TARGET_NR_setgid32:
9126 ret = get_errno(setgid(arg1));
9127 break;
9128 #endif
9129 #ifdef TARGET_NR_setfsuid32
9130 case TARGET_NR_setfsuid32:
9131 ret = get_errno(setfsuid(arg1));
9132 break;
9133 #endif
9134 #ifdef TARGET_NR_setfsgid32
9135 case TARGET_NR_setfsgid32:
9136 ret = get_errno(setfsgid(arg1));
9137 break;
9138 #endif
9140 case TARGET_NR_pivot_root:
9141 goto unimplemented;
9142 #ifdef TARGET_NR_mincore
9143 case TARGET_NR_mincore:
9145 void *a;
9146 ret = -TARGET_EFAULT;
9147 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9148 goto efault;
9149 if (!(p = lock_user_string(arg3)))
9150 goto mincore_fail;
9151 ret = get_errno(mincore(a, arg2, p));
9152 unlock_user(p, arg3, ret);
9153 mincore_fail:
9154 unlock_user(a, arg1, 0);
9156 break;
9157 #endif
9158 #ifdef TARGET_NR_arm_fadvise64_64
9159 case TARGET_NR_arm_fadvise64_64:
9162 * arm_fadvise64_64 looks like fadvise64_64 but
9163 * with different argument order
9165 abi_long temp;
9166 temp = arg3;
9167 arg3 = arg4;
9168 arg4 = temp;
9170 #endif
9171 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9172 #ifdef TARGET_NR_fadvise64_64
9173 case TARGET_NR_fadvise64_64:
9174 #endif
9175 #ifdef TARGET_NR_fadvise64
9176 case TARGET_NR_fadvise64:
9177 #endif
9178 #ifdef TARGET_S390X
9179 switch (arg4) {
9180 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9181 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9182 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9183 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9184 default: break;
9186 #endif
9187 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9188 break;
9189 #endif
9190 #ifdef TARGET_NR_madvise
9191 case TARGET_NR_madvise:
9192 /* A straight passthrough may not be safe because qemu sometimes
9193 turns private file-backed mappings into anonymous mappings.
9194 This will break MADV_DONTNEED.
9195 This is a hint, so ignoring and returning success is ok. */
9196 ret = get_errno(0);
9197 break;
9198 #endif
9199 #if TARGET_ABI_BITS == 32
9200 case TARGET_NR_fcntl64:
9202 int cmd;
9203 struct flock64 fl;
9204 struct target_flock64 *target_fl;
9205 #ifdef TARGET_ARM
9206 struct target_eabi_flock64 *target_efl;
9207 #endif
9209 cmd = target_to_host_fcntl_cmd(arg2);
9210 if (cmd == -TARGET_EINVAL) {
9211 ret = cmd;
9212 break;
9215 switch(arg2) {
9216 case TARGET_F_GETLK64:
9217 #ifdef TARGET_ARM
9218 if (((CPUARMState *)cpu_env)->eabi) {
9219 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9220 goto efault;
9221 fl.l_type = tswap16(target_efl->l_type);
9222 fl.l_whence = tswap16(target_efl->l_whence);
9223 fl.l_start = tswap64(target_efl->l_start);
9224 fl.l_len = tswap64(target_efl->l_len);
9225 fl.l_pid = tswap32(target_efl->l_pid);
9226 unlock_user_struct(target_efl, arg3, 0);
9227 } else
9228 #endif
9230 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9231 goto efault;
9232 fl.l_type = tswap16(target_fl->l_type);
9233 fl.l_whence = tswap16(target_fl->l_whence);
9234 fl.l_start = tswap64(target_fl->l_start);
9235 fl.l_len = tswap64(target_fl->l_len);
9236 fl.l_pid = tswap32(target_fl->l_pid);
9237 unlock_user_struct(target_fl, arg3, 0);
9239 ret = get_errno(fcntl(arg1, cmd, &fl));
9240 if (ret == 0) {
9241 #ifdef TARGET_ARM
9242 if (((CPUARMState *)cpu_env)->eabi) {
9243 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9244 goto efault;
9245 target_efl->l_type = tswap16(fl.l_type);
9246 target_efl->l_whence = tswap16(fl.l_whence);
9247 target_efl->l_start = tswap64(fl.l_start);
9248 target_efl->l_len = tswap64(fl.l_len);
9249 target_efl->l_pid = tswap32(fl.l_pid);
9250 unlock_user_struct(target_efl, arg3, 1);
9251 } else
9252 #endif
9254 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9255 goto efault;
9256 target_fl->l_type = tswap16(fl.l_type);
9257 target_fl->l_whence = tswap16(fl.l_whence);
9258 target_fl->l_start = tswap64(fl.l_start);
9259 target_fl->l_len = tswap64(fl.l_len);
9260 target_fl->l_pid = tswap32(fl.l_pid);
9261 unlock_user_struct(target_fl, arg3, 1);
9264 break;
9266 case TARGET_F_SETLK64:
9267 case TARGET_F_SETLKW64:
9268 #ifdef TARGET_ARM
9269 if (((CPUARMState *)cpu_env)->eabi) {
9270 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9271 goto efault;
9272 fl.l_type = tswap16(target_efl->l_type);
9273 fl.l_whence = tswap16(target_efl->l_whence);
9274 fl.l_start = tswap64(target_efl->l_start);
9275 fl.l_len = tswap64(target_efl->l_len);
9276 fl.l_pid = tswap32(target_efl->l_pid);
9277 unlock_user_struct(target_efl, arg3, 0);
9278 } else
9279 #endif
9281 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9282 goto efault;
9283 fl.l_type = tswap16(target_fl->l_type);
9284 fl.l_whence = tswap16(target_fl->l_whence);
9285 fl.l_start = tswap64(target_fl->l_start);
9286 fl.l_len = tswap64(target_fl->l_len);
9287 fl.l_pid = tswap32(target_fl->l_pid);
9288 unlock_user_struct(target_fl, arg3, 0);
9290 ret = get_errno(fcntl(arg1, cmd, &fl));
9291 break;
9292 default:
9293 ret = do_fcntl(arg1, arg2, arg3);
9294 break;
9296 break;
9298 #endif
9299 #ifdef TARGET_NR_cacheflush
9300 case TARGET_NR_cacheflush:
9301 /* self-modifying code is handled automatically, so nothing needed */
9302 ret = 0;
9303 break;
9304 #endif
9305 #ifdef TARGET_NR_security
9306 case TARGET_NR_security:
9307 goto unimplemented;
9308 #endif
9309 #ifdef TARGET_NR_getpagesize
9310 case TARGET_NR_getpagesize:
9311 ret = TARGET_PAGE_SIZE;
9312 break;
9313 #endif
9314 case TARGET_NR_gettid:
9315 ret = get_errno(gettid());
9316 break;
9317 #ifdef TARGET_NR_readahead
9318 case TARGET_NR_readahead:
9319 #if TARGET_ABI_BITS == 32
9320 if (regpairs_aligned(cpu_env)) {
9321 arg2 = arg3;
9322 arg3 = arg4;
9323 arg4 = arg5;
9325 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9326 #else
9327 ret = get_errno(readahead(arg1, arg2, arg3));
9328 #endif
9329 break;
9330 #endif
9331 #ifdef CONFIG_ATTR
9332 #ifdef TARGET_NR_setxattr
9333 case TARGET_NR_listxattr:
9334 case TARGET_NR_llistxattr:
9336 void *p, *b = 0;
9337 if (arg2) {
9338 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9339 if (!b) {
9340 ret = -TARGET_EFAULT;
9341 break;
9344 p = lock_user_string(arg1);
9345 if (p) {
9346 if (num == TARGET_NR_listxattr) {
9347 ret = get_errno(listxattr(p, b, arg3));
9348 } else {
9349 ret = get_errno(llistxattr(p, b, arg3));
9351 } else {
9352 ret = -TARGET_EFAULT;
9354 unlock_user(p, arg1, 0);
9355 unlock_user(b, arg2, arg3);
9356 break;
9358 case TARGET_NR_flistxattr:
9360 void *b = 0;
9361 if (arg2) {
9362 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9363 if (!b) {
9364 ret = -TARGET_EFAULT;
9365 break;
9368 ret = get_errno(flistxattr(arg1, b, arg3));
9369 unlock_user(b, arg2, arg3);
9370 break;
9372 case TARGET_NR_setxattr:
9373 case TARGET_NR_lsetxattr:
9375 void *p, *n, *v = 0;
9376 if (arg3) {
9377 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9378 if (!v) {
9379 ret = -TARGET_EFAULT;
9380 break;
9383 p = lock_user_string(arg1);
9384 n = lock_user_string(arg2);
9385 if (p && n) {
9386 if (num == TARGET_NR_setxattr) {
9387 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9388 } else {
9389 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9391 } else {
9392 ret = -TARGET_EFAULT;
9394 unlock_user(p, arg1, 0);
9395 unlock_user(n, arg2, 0);
9396 unlock_user(v, arg3, 0);
9398 break;
9399 case TARGET_NR_fsetxattr:
9401 void *n, *v = 0;
9402 if (arg3) {
9403 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9404 if (!v) {
9405 ret = -TARGET_EFAULT;
9406 break;
9409 n = lock_user_string(arg2);
9410 if (n) {
9411 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9412 } else {
9413 ret = -TARGET_EFAULT;
9415 unlock_user(n, arg2, 0);
9416 unlock_user(v, arg3, 0);
9418 break;
9419 case TARGET_NR_getxattr:
9420 case TARGET_NR_lgetxattr:
9422 void *p, *n, *v = 0;
9423 if (arg3) {
9424 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9425 if (!v) {
9426 ret = -TARGET_EFAULT;
9427 break;
9430 p = lock_user_string(arg1);
9431 n = lock_user_string(arg2);
9432 if (p && n) {
9433 if (num == TARGET_NR_getxattr) {
9434 ret = get_errno(getxattr(p, n, v, arg4));
9435 } else {
9436 ret = get_errno(lgetxattr(p, n, v, arg4));
9438 } else {
9439 ret = -TARGET_EFAULT;
9441 unlock_user(p, arg1, 0);
9442 unlock_user(n, arg2, 0);
9443 unlock_user(v, arg3, arg4);
9445 break;
9446 case TARGET_NR_fgetxattr:
9448 void *n, *v = 0;
9449 if (arg3) {
9450 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9451 if (!v) {
9452 ret = -TARGET_EFAULT;
9453 break;
9456 n = lock_user_string(arg2);
9457 if (n) {
9458 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9459 } else {
9460 ret = -TARGET_EFAULT;
9462 unlock_user(n, arg2, 0);
9463 unlock_user(v, arg3, arg4);
9465 break;
9466 case TARGET_NR_removexattr:
9467 case TARGET_NR_lremovexattr:
9469 void *p, *n;
9470 p = lock_user_string(arg1);
9471 n = lock_user_string(arg2);
9472 if (p && n) {
9473 if (num == TARGET_NR_removexattr) {
9474 ret = get_errno(removexattr(p, n));
9475 } else {
9476 ret = get_errno(lremovexattr(p, n));
9478 } else {
9479 ret = -TARGET_EFAULT;
9481 unlock_user(p, arg1, 0);
9482 unlock_user(n, arg2, 0);
9484 break;
9485 case TARGET_NR_fremovexattr:
9487 void *n;
9488 n = lock_user_string(arg2);
9489 if (n) {
9490 ret = get_errno(fremovexattr(arg1, n));
9491 } else {
9492 ret = -TARGET_EFAULT;
9494 unlock_user(n, arg2, 0);
9496 break;
9497 #endif
9498 #endif /* CONFIG_ATTR */
9499 #ifdef TARGET_NR_set_thread_area
9500 case TARGET_NR_set_thread_area:
9501 #if defined(TARGET_MIPS)
9502 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9503 ret = 0;
9504 break;
9505 #elif defined(TARGET_CRIS)
9506 if (arg1 & 0xff)
9507 ret = -TARGET_EINVAL;
9508 else {
9509 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9510 ret = 0;
9512 break;
9513 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9514 ret = do_set_thread_area(cpu_env, arg1);
9515 break;
9516 #elif defined(TARGET_M68K)
9518 TaskState *ts = cpu->opaque;
9519 ts->tp_value = arg1;
9520 ret = 0;
9521 break;
9523 #else
9524 goto unimplemented_nowarn;
9525 #endif
9526 #endif
9527 #ifdef TARGET_NR_get_thread_area
9528 case TARGET_NR_get_thread_area:
9529 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9530 ret = do_get_thread_area(cpu_env, arg1);
9531 break;
9532 #elif defined(TARGET_M68K)
9534 TaskState *ts = cpu->opaque;
9535 ret = ts->tp_value;
9536 break;
9538 #else
9539 goto unimplemented_nowarn;
9540 #endif
9541 #endif
9542 #ifdef TARGET_NR_getdomainname
9543 case TARGET_NR_getdomainname:
9544 goto unimplemented_nowarn;
9545 #endif
9547 #ifdef TARGET_NR_clock_gettime
9548 case TARGET_NR_clock_gettime:
9550 struct timespec ts;
9551 ret = get_errno(clock_gettime(arg1, &ts));
9552 if (!is_error(ret)) {
9553 host_to_target_timespec(arg2, &ts);
9555 break;
9557 #endif
9558 #ifdef TARGET_NR_clock_getres
9559 case TARGET_NR_clock_getres:
9561 struct timespec ts;
9562 ret = get_errno(clock_getres(arg1, &ts));
9563 if (!is_error(ret)) {
9564 host_to_target_timespec(arg2, &ts);
9566 break;
9568 #endif
9569 #ifdef TARGET_NR_clock_nanosleep
9570 case TARGET_NR_clock_nanosleep:
9572 struct timespec ts;
9573 target_to_host_timespec(&ts, arg3);
9574 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9575 if (arg4)
9576 host_to_target_timespec(arg4, &ts);
9578 #if defined(TARGET_PPC)
9579 /* clock_nanosleep is odd in that it returns positive errno values.
9580 * On PPC, CR0 bit 3 should be set in such a situation. */
9581 if (ret) {
9582 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9584 #endif
9585 break;
9587 #endif
9589 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9590 case TARGET_NR_set_tid_address:
9591 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9592 break;
9593 #endif
9595 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9596 case TARGET_NR_tkill:
9597 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9598 break;
9599 #endif
9601 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9602 case TARGET_NR_tgkill:
9603 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9604 target_to_host_signal(arg3)));
9605 break;
9606 #endif
9608 #ifdef TARGET_NR_set_robust_list
9609 case TARGET_NR_set_robust_list:
9610 case TARGET_NR_get_robust_list:
9611 /* The ABI for supporting robust futexes has userspace pass
9612 * the kernel a pointer to a linked list which is updated by
9613 * userspace after the syscall; the list is walked by the kernel
9614 * when the thread exits. Since the linked list in QEMU guest
9615 * memory isn't a valid linked list for the host and we have
9616 * no way to reliably intercept the thread-death event, we can't
9617 * support these. Silently return ENOSYS so that guest userspace
9618 * falls back to a non-robust futex implementation (which should
9619 * be OK except in the corner case of the guest crashing while
9620 * holding a mutex that is shared with another process via
9621 * shared memory).
9623 goto unimplemented_nowarn;
9624 #endif
9626 #if defined(TARGET_NR_utimensat)
9627 case TARGET_NR_utimensat:
9629 struct timespec *tsp, ts[2];
9630 if (!arg3) {
9631 tsp = NULL;
9632 } else {
9633 target_to_host_timespec(ts, arg3);
9634 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9635 tsp = ts;
9637 if (!arg2)
9638 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9639 else {
9640 if (!(p = lock_user_string(arg2))) {
9641 ret = -TARGET_EFAULT;
9642 goto fail;
9644 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9645 unlock_user(p, arg2, 0);
9648 break;
9649 #endif
9650 case TARGET_NR_futex:
9651 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9652 break;
9653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9654 case TARGET_NR_inotify_init:
9655 ret = get_errno(sys_inotify_init());
9656 break;
9657 #endif
9658 #ifdef CONFIG_INOTIFY1
9659 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9660 case TARGET_NR_inotify_init1:
9661 ret = get_errno(sys_inotify_init1(arg1));
9662 break;
9663 #endif
9664 #endif
9665 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9666 case TARGET_NR_inotify_add_watch:
9667 p = lock_user_string(arg2);
9668 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9669 unlock_user(p, arg2, 0);
9670 break;
9671 #endif
9672 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9673 case TARGET_NR_inotify_rm_watch:
9674 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9675 break;
9676 #endif
9678 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9679 case TARGET_NR_mq_open:
9681 struct mq_attr posix_mq_attr, *attrp;
9683 p = lock_user_string(arg1 - 1);
9684 if (arg4 != 0) {
9685 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9686 attrp = &posix_mq_attr;
9687 } else {
9688 attrp = 0;
9690 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9691 unlock_user (p, arg1, 0);
9693 break;
9695 case TARGET_NR_mq_unlink:
9696 p = lock_user_string(arg1 - 1);
9697 ret = get_errno(mq_unlink(p));
9698 unlock_user (p, arg1, 0);
9699 break;
9701 case TARGET_NR_mq_timedsend:
9703 struct timespec ts;
9705 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9706 if (arg5 != 0) {
9707 target_to_host_timespec(&ts, arg5);
9708 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9709 host_to_target_timespec(arg5, &ts);
9711 else
9712 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9713 unlock_user (p, arg2, arg3);
9715 break;
9717 case TARGET_NR_mq_timedreceive:
9719 struct timespec ts;
9720 unsigned int prio;
9722 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9723 if (arg5 != 0) {
9724 target_to_host_timespec(&ts, arg5);
9725 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9726 host_to_target_timespec(arg5, &ts);
9728 else
9729 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9730 unlock_user (p, arg2, arg3);
9731 if (arg4 != 0)
9732 put_user_u32(prio, arg4);
9734 break;
9736 /* Not implemented for now... */
9737 /* case TARGET_NR_mq_notify: */
9738 /* break; */
9740 case TARGET_NR_mq_getsetattr:
9742 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9743 ret = 0;
9744 if (arg3 != 0) {
9745 ret = mq_getattr(arg1, &posix_mq_attr_out);
9746 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9748 if (arg2 != 0) {
9749 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9750 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9754 break;
9755 #endif
9757 #ifdef CONFIG_SPLICE
9758 #ifdef TARGET_NR_tee
9759 case TARGET_NR_tee:
9761 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9763 break;
9764 #endif
9765 #ifdef TARGET_NR_splice
9766 case TARGET_NR_splice:
9768 loff_t loff_in, loff_out;
9769 loff_t *ploff_in = NULL, *ploff_out = NULL;
9770 if (arg2) {
9771 if (get_user_u64(loff_in, arg2)) {
9772 goto efault;
9774 ploff_in = &loff_in;
9776 if (arg4) {
9777 if (get_user_u64(loff_out, arg4)) {
9778 goto efault;
9780 ploff_out = &loff_out;
9782 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9783 if (arg2) {
9784 if (put_user_u64(loff_in, arg2)) {
9785 goto efault;
9788 if (arg4) {
9789 if (put_user_u64(loff_out, arg4)) {
9790 goto efault;
9794 break;
9795 #endif
9796 #ifdef TARGET_NR_vmsplice
9797 case TARGET_NR_vmsplice:
9799 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9800 if (vec != NULL) {
9801 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9802 unlock_iovec(vec, arg2, arg3, 0);
9803 } else {
9804 ret = -host_to_target_errno(errno);
9807 break;
9808 #endif
9809 #endif /* CONFIG_SPLICE */
9810 #ifdef CONFIG_EVENTFD
9811 #if defined(TARGET_NR_eventfd)
9812 case TARGET_NR_eventfd:
9813 ret = get_errno(eventfd(arg1, 0));
9814 fd_trans_unregister(ret);
9815 break;
9816 #endif
9817 #if defined(TARGET_NR_eventfd2)
9818 case TARGET_NR_eventfd2:
9820 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9821 if (arg2 & TARGET_O_NONBLOCK) {
9822 host_flags |= O_NONBLOCK;
9824 if (arg2 & TARGET_O_CLOEXEC) {
9825 host_flags |= O_CLOEXEC;
9827 ret = get_errno(eventfd(arg1, host_flags));
9828 fd_trans_unregister(ret);
9829 break;
9831 #endif
9832 #endif /* CONFIG_EVENTFD */
9833 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9834 case TARGET_NR_fallocate:
9835 #if TARGET_ABI_BITS == 32
9836 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9837 target_offset64(arg5, arg6)));
9838 #else
9839 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9840 #endif
9841 break;
9842 #endif
9843 #if defined(CONFIG_SYNC_FILE_RANGE)
9844 #if defined(TARGET_NR_sync_file_range)
9845 case TARGET_NR_sync_file_range:
9846 #if TARGET_ABI_BITS == 32
9847 #if defined(TARGET_MIPS)
9848 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9849 target_offset64(arg5, arg6), arg7));
9850 #else
9851 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9852 target_offset64(arg4, arg5), arg6));
9853 #endif /* !TARGET_MIPS */
9854 #else
9855 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9856 #endif
9857 break;
9858 #endif
9859 #if defined(TARGET_NR_sync_file_range2)
9860 case TARGET_NR_sync_file_range2:
9861 /* This is like sync_file_range but the arguments are reordered */
9862 #if TARGET_ABI_BITS == 32
9863 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9864 target_offset64(arg5, arg6), arg2));
9865 #else
9866 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9867 #endif
9868 break;
9869 #endif
9870 #endif
9871 #if defined(TARGET_NR_signalfd4)
9872 case TARGET_NR_signalfd4:
9873 ret = do_signalfd4(arg1, arg2, arg4);
9874 break;
9875 #endif
9876 #if defined(TARGET_NR_signalfd)
9877 case TARGET_NR_signalfd:
9878 ret = do_signalfd4(arg1, arg2, 0);
9879 break;
9880 #endif
9881 #if defined(CONFIG_EPOLL)
9882 #if defined(TARGET_NR_epoll_create)
9883 case TARGET_NR_epoll_create:
9884 ret = get_errno(epoll_create(arg1));
9885 break;
9886 #endif
9887 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9888 case TARGET_NR_epoll_create1:
9889 ret = get_errno(epoll_create1(arg1));
9890 break;
9891 #endif
9892 #if defined(TARGET_NR_epoll_ctl)
9893 case TARGET_NR_epoll_ctl:
9895 struct epoll_event ep;
9896 struct epoll_event *epp = 0;
9897 if (arg4) {
9898 struct target_epoll_event *target_ep;
9899 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9900 goto efault;
9902 ep.events = tswap32(target_ep->events);
9903 /* The epoll_data_t union is just opaque data to the kernel,
9904 * so we transfer all 64 bits across and need not worry what
9905 * actual data type it is.
9907 ep.data.u64 = tswap64(target_ep->data.u64);
9908 unlock_user_struct(target_ep, arg4, 0);
9909 epp = &ep;
9911 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9912 break;
9914 #endif
9916 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9917 #define IMPLEMENT_EPOLL_PWAIT
9918 #endif
9919 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9920 #if defined(TARGET_NR_epoll_wait)
9921 case TARGET_NR_epoll_wait:
9922 #endif
9923 #if defined(IMPLEMENT_EPOLL_PWAIT)
9924 case TARGET_NR_epoll_pwait:
9925 #endif
9927 struct target_epoll_event *target_ep;
9928 struct epoll_event *ep;
9929 int epfd = arg1;
9930 int maxevents = arg3;
9931 int timeout = arg4;
9933 target_ep = lock_user(VERIFY_WRITE, arg2,
9934 maxevents * sizeof(struct target_epoll_event), 1);
9935 if (!target_ep) {
9936 goto efault;
9939 ep = alloca(maxevents * sizeof(struct epoll_event));
9941 switch (num) {
9942 #if defined(IMPLEMENT_EPOLL_PWAIT)
9943 case TARGET_NR_epoll_pwait:
9945 target_sigset_t *target_set;
9946 sigset_t _set, *set = &_set;
9948 if (arg5) {
9949 target_set = lock_user(VERIFY_READ, arg5,
9950 sizeof(target_sigset_t), 1);
9951 if (!target_set) {
9952 unlock_user(target_ep, arg2, 0);
9953 goto efault;
9955 target_to_host_sigset(set, target_set);
9956 unlock_user(target_set, arg5, 0);
9957 } else {
9958 set = NULL;
9961 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9962 break;
9964 #endif
9965 #if defined(TARGET_NR_epoll_wait)
9966 case TARGET_NR_epoll_wait:
9967 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9968 break;
9969 #endif
9970 default:
9971 ret = -TARGET_ENOSYS;
9973 if (!is_error(ret)) {
9974 int i;
9975 for (i = 0; i < ret; i++) {
9976 target_ep[i].events = tswap32(ep[i].events);
9977 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9980 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9981 break;
9983 #endif
9984 #endif
9985 #ifdef TARGET_NR_prlimit64
9986 case TARGET_NR_prlimit64:
9988 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9989 struct target_rlimit64 *target_rnew, *target_rold;
9990 struct host_rlimit64 rnew, rold, *rnewp = 0;
9991 int resource = target_to_host_resource(arg2);
9992 if (arg3) {
9993 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9994 goto efault;
9996 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9997 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9998 unlock_user_struct(target_rnew, arg3, 0);
9999 rnewp = &rnew;
10002 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10003 if (!is_error(ret) && arg4) {
10004 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10005 goto efault;
10007 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10008 target_rold->rlim_max = tswap64(rold.rlim_max);
10009 unlock_user_struct(target_rold, arg4, 1);
10011 break;
10013 #endif
10014 #ifdef TARGET_NR_gethostname
10015 case TARGET_NR_gethostname:
10017 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10018 if (name) {
10019 ret = get_errno(gethostname(name, arg2));
10020 unlock_user(name, arg1, arg2);
10021 } else {
10022 ret = -TARGET_EFAULT;
10024 break;
10026 #endif
10027 #ifdef TARGET_NR_atomic_cmpxchg_32
10028 case TARGET_NR_atomic_cmpxchg_32:
10030 /* should use start_exclusive from main.c */
10031 abi_ulong mem_value;
10032 if (get_user_u32(mem_value, arg6)) {
10033 target_siginfo_t info;
10034 info.si_signo = SIGSEGV;
10035 info.si_errno = 0;
10036 info.si_code = TARGET_SEGV_MAPERR;
10037 info._sifields._sigfault._addr = arg6;
10038 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10039 ret = 0xdeadbeef;
10042 if (mem_value == arg2)
10043 put_user_u32(arg1, arg6);
10044 ret = mem_value;
10045 break;
10047 #endif
10048 #ifdef TARGET_NR_atomic_barrier
10049 case TARGET_NR_atomic_barrier:
10051 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10052 ret = 0;
10053 break;
10055 #endif
10057 #ifdef TARGET_NR_timer_create
10058 case TARGET_NR_timer_create:
10060 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10062 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10064 int clkid = arg1;
10065 int timer_index = next_free_host_timer();
10067 if (timer_index < 0) {
10068 ret = -TARGET_EAGAIN;
10069 } else {
10070 timer_t *phtimer = g_posix_timers + timer_index;
10072 if (arg2) {
10073 phost_sevp = &host_sevp;
10074 ret = target_to_host_sigevent(phost_sevp, arg2);
10075 if (ret != 0) {
10076 break;
10080 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10081 if (ret) {
10082 phtimer = NULL;
10083 } else {
10084 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10085 goto efault;
10089 break;
10091 #endif
10093 #ifdef TARGET_NR_timer_settime
10094 case TARGET_NR_timer_settime:
10096 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10097 * struct itimerspec * old_value */
10098 target_timer_t timerid = get_timer_id(arg1);
10100 if (timerid < 0) {
10101 ret = timerid;
10102 } else if (arg3 == 0) {
10103 ret = -TARGET_EINVAL;
10104 } else {
10105 timer_t htimer = g_posix_timers[timerid];
10106 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10108 target_to_host_itimerspec(&hspec_new, arg3);
10109 ret = get_errno(
10110 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10111 host_to_target_itimerspec(arg2, &hspec_old);
10113 break;
10115 #endif
10117 #ifdef TARGET_NR_timer_gettime
10118 case TARGET_NR_timer_gettime:
10120 /* args: timer_t timerid, struct itimerspec *curr_value */
10121 target_timer_t timerid = get_timer_id(arg1);
10123 if (timerid < 0) {
10124 ret = timerid;
10125 } else if (!arg2) {
10126 ret = -TARGET_EFAULT;
10127 } else {
10128 timer_t htimer = g_posix_timers[timerid];
10129 struct itimerspec hspec;
10130 ret = get_errno(timer_gettime(htimer, &hspec));
10132 if (host_to_target_itimerspec(arg2, &hspec)) {
10133 ret = -TARGET_EFAULT;
10136 break;
10138 #endif
10140 #ifdef TARGET_NR_timer_getoverrun
10141 case TARGET_NR_timer_getoverrun:
10143 /* args: timer_t timerid */
10144 target_timer_t timerid = get_timer_id(arg1);
10146 if (timerid < 0) {
10147 ret = timerid;
10148 } else {
10149 timer_t htimer = g_posix_timers[timerid];
10150 ret = get_errno(timer_getoverrun(htimer));
10152 fd_trans_unregister(ret);
10153 break;
10155 #endif
10157 #ifdef TARGET_NR_timer_delete
10158 case TARGET_NR_timer_delete:
10160 /* args: timer_t timerid */
10161 target_timer_t timerid = get_timer_id(arg1);
10163 if (timerid < 0) {
10164 ret = timerid;
10165 } else {
10166 timer_t htimer = g_posix_timers[timerid];
10167 ret = get_errno(timer_delete(htimer));
10168 g_posix_timers[timerid] = 0;
10170 break;
10172 #endif
10174 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10175 case TARGET_NR_timerfd_create:
10176 ret = get_errno(timerfd_create(arg1,
10177 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10178 break;
10179 #endif
10181 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10182 case TARGET_NR_timerfd_gettime:
10184 struct itimerspec its_curr;
10186 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10188 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10189 goto efault;
10192 break;
10193 #endif
10195 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10196 case TARGET_NR_timerfd_settime:
10198 struct itimerspec its_new, its_old, *p_new;
10200 if (arg3) {
10201 if (target_to_host_itimerspec(&its_new, arg3)) {
10202 goto efault;
10204 p_new = &its_new;
10205 } else {
10206 p_new = NULL;
10209 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10211 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10212 goto efault;
10215 break;
10216 #endif
10218 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10219 case TARGET_NR_ioprio_get:
10220 ret = get_errno(ioprio_get(arg1, arg2));
10221 break;
10222 #endif
10224 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10225 case TARGET_NR_ioprio_set:
10226 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10227 break;
10228 #endif
10230 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10231 case TARGET_NR_setns:
10232 ret = get_errno(setns(arg1, arg2));
10233 break;
10234 #endif
10235 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10236 case TARGET_NR_unshare:
10237 ret = get_errno(unshare(arg1));
10238 break;
10239 #endif
10241 default:
10242 unimplemented:
10243 gemu_log("qemu: Unsupported syscall: %d\n", num);
10244 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10245 unimplemented_nowarn:
10246 #endif
10247 ret = -TARGET_ENOSYS;
10248 break;
10250 fail:
10251 #ifdef DEBUG
10252 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10253 #endif
10254 if(do_strace)
10255 print_syscall_ret(num, ret);
10256 return ret;
10257 efault:
10258 ret = -TARGET_EFAULT;
10259 goto fail;