linux-user: Use safe_syscall for sigsuspend syscalls
[qemu/cris-port.git] / linux-user / syscall.c
blob5bdfe2a936c49de09baeda7b4b62a90aae32d3ca
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #endif
108 #include <linux/audit.h>
109 #include "linux_loop.h"
110 #include "uname.h"
112 #include "qemu.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 //#define DEBUG
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
132 #undef _syscall0
133 #undef _syscall1
134 #undef _syscall2
135 #undef _syscall3
136 #undef _syscall4
137 #undef _syscall5
138 #undef _syscall6
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
181 type6 arg6) \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_futex __NR_futex
197 #define __NR_sys_inotify_init __NR_inotify_init
198 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
199 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
201 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
202 defined(__s390x__)
203 #define __NR__llseek __NR_lseek
204 #endif
206 /* Newer kernel ports have llseek() instead of _llseek() */
207 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
208 #define TARGET_NR__llseek TARGET_NR_llseek
209 #endif
211 #ifdef __NR_gettid
212 _syscall0(int, gettid)
213 #else
214 /* This is a replacement for the host gettid() and must return a host
215 errno. */
216 static int gettid(void) {
217 return -ENOSYS;
219 #endif
220 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
221 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
222 #endif
223 #if !defined(__NR_getdents) || \
224 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
225 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
226 #endif
227 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
228 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
229 loff_t *, res, uint, wh);
230 #endif
231 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
232 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
233 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
234 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
235 #endif
236 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
237 _syscall2(int,sys_tkill,int,tid,int,sig)
238 #endif
239 #ifdef __NR_exit_group
240 _syscall1(int,exit_group,int,error_code)
241 #endif
242 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
243 _syscall1(int,set_tid_address,int *,tidptr)
244 #endif
245 #if defined(TARGET_NR_futex) && defined(__NR_futex)
246 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
247 const struct timespec *,timeout,int *,uaddr2,int,val3)
248 #endif
249 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
250 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
253 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
254 unsigned long *, user_mask_ptr);
255 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
256 void *, arg);
257 _syscall2(int, capget, struct __user_cap_header_struct *, header,
258 struct __user_cap_data_struct *, data);
259 _syscall2(int, capset, struct __user_cap_header_struct *, header,
260 struct __user_cap_data_struct *, data);
261 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
262 _syscall2(int, ioprio_get, int, which, int, who)
263 #endif
264 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
265 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
266 #endif
267 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
268 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
269 #endif
271 static bitmask_transtbl fcntl_flags_tbl[] = {
272 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
273 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
274 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
275 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
276 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
277 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
278 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
279 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
280 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
281 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
282 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
283 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
284 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
285 #if defined(O_DIRECT)
286 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
287 #endif
288 #if defined(O_NOATIME)
289 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
290 #endif
291 #if defined(O_CLOEXEC)
292 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
293 #endif
294 #if defined(O_PATH)
295 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
296 #endif
297 /* Don't terminate the list prematurely on 64-bit host+guest. */
298 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
299 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
300 #endif
301 { 0, 0, 0, 0 }
304 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
305 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
306 typedef struct TargetFdTrans {
307 TargetFdDataFunc host_to_target_data;
308 TargetFdDataFunc target_to_host_data;
309 TargetFdAddrFunc target_to_host_addr;
310 } TargetFdTrans;
312 static TargetFdTrans **target_fd_trans;
314 static unsigned int target_fd_max;
316 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
318 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
319 return target_fd_trans[fd]->target_to_host_data;
321 return NULL;
324 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
326 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
327 return target_fd_trans[fd]->host_to_target_data;
329 return NULL;
332 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
334 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
335 return target_fd_trans[fd]->target_to_host_addr;
337 return NULL;
340 static void fd_trans_register(int fd, TargetFdTrans *trans)
342 unsigned int oldmax;
344 if (fd >= target_fd_max) {
345 oldmax = target_fd_max;
346 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
347 target_fd_trans = g_renew(TargetFdTrans *,
348 target_fd_trans, target_fd_max);
349 memset((void *)(target_fd_trans + oldmax), 0,
350 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
352 target_fd_trans[fd] = trans;
355 static void fd_trans_unregister(int fd)
357 if (fd >= 0 && fd < target_fd_max) {
358 target_fd_trans[fd] = NULL;
362 static void fd_trans_dup(int oldfd, int newfd)
364 fd_trans_unregister(newfd);
365 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
366 fd_trans_register(newfd, target_fd_trans[oldfd]);
370 static int sys_getcwd1(char *buf, size_t size)
372 if (getcwd(buf, size) == NULL) {
373 /* getcwd() sets errno */
374 return (-1);
376 return strlen(buf)+1;
379 #ifdef TARGET_NR_utimensat
380 #ifdef CONFIG_UTIMENSAT
381 static int sys_utimensat(int dirfd, const char *pathname,
382 const struct timespec times[2], int flags)
384 if (pathname == NULL)
385 return futimens(dirfd, times);
386 else
387 return utimensat(dirfd, pathname, times, flags);
389 #elif defined(__NR_utimensat)
390 #define __NR_sys_utimensat __NR_utimensat
391 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
392 const struct timespec *,tsp,int,flags)
393 #else
394 static int sys_utimensat(int dirfd, const char *pathname,
395 const struct timespec times[2], int flags)
397 errno = ENOSYS;
398 return -1;
400 #endif
401 #endif /* TARGET_NR_utimensat */
403 #ifdef CONFIG_INOTIFY
404 #include <sys/inotify.h>
406 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
407 static int sys_inotify_init(void)
409 return (inotify_init());
411 #endif
412 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
413 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
415 return (inotify_add_watch(fd, pathname, mask));
417 #endif
418 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
419 static int sys_inotify_rm_watch(int fd, int32_t wd)
421 return (inotify_rm_watch(fd, wd));
423 #endif
424 #ifdef CONFIG_INOTIFY1
425 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
426 static int sys_inotify_init1(int flags)
428 return (inotify_init1(flags));
430 #endif
431 #endif
432 #else
433 /* Userspace can usually survive runtime without inotify */
434 #undef TARGET_NR_inotify_init
435 #undef TARGET_NR_inotify_init1
436 #undef TARGET_NR_inotify_add_watch
437 #undef TARGET_NR_inotify_rm_watch
438 #endif /* CONFIG_INOTIFY */
440 #if defined(TARGET_NR_ppoll)
441 #ifndef __NR_ppoll
442 # define __NR_ppoll -1
443 #endif
444 #define __NR_sys_ppoll __NR_ppoll
445 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
446 struct timespec *, timeout, const sigset_t *, sigmask,
447 size_t, sigsetsize)
448 #endif
450 #if defined(TARGET_NR_prlimit64)
451 #ifndef __NR_prlimit64
452 # define __NR_prlimit64 -1
453 #endif
454 #define __NR_sys_prlimit64 __NR_prlimit64
455 /* The glibc rlimit structure may not be that used by the underlying syscall */
456 struct host_rlimit64 {
457 uint64_t rlim_cur;
458 uint64_t rlim_max;
460 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
461 const struct host_rlimit64 *, new_limit,
462 struct host_rlimit64 *, old_limit)
463 #endif
466 #if defined(TARGET_NR_timer_create)
467 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
468 static timer_t g_posix_timers[32] = { 0, } ;
470 static inline int next_free_host_timer(void)
472 int k ;
473 /* FIXME: Does finding the next free slot require a lock? */
474 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
475 if (g_posix_timers[k] == 0) {
476 g_posix_timers[k] = (timer_t) 1;
477 return k;
480 return -1;
482 #endif
484 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
485 #ifdef TARGET_ARM
486 static inline int regpairs_aligned(void *cpu_env) {
487 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
489 #elif defined(TARGET_MIPS)
490 static inline int regpairs_aligned(void *cpu_env) { return 1; }
491 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
492 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
493 * of registers which translates to the same as ARM/MIPS, because we start with
494 * r3 as arg1 */
495 static inline int regpairs_aligned(void *cpu_env) { return 1; }
496 #else
497 static inline int regpairs_aligned(void *cpu_env) { return 0; }
498 #endif
500 #define ERRNO_TABLE_SIZE 1200
502 /* target_to_host_errno_table[] is initialized from
503 * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
508 * This list is the union of errno values overridden in asm-<arch>/errno.h
509 * minus the errnos that are not actually generic to all archs.
511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
512 [EAGAIN] = TARGET_EAGAIN,
513 [EIDRM] = TARGET_EIDRM,
514 [ECHRNG] = TARGET_ECHRNG,
515 [EL2NSYNC] = TARGET_EL2NSYNC,
516 [EL3HLT] = TARGET_EL3HLT,
517 [EL3RST] = TARGET_EL3RST,
518 [ELNRNG] = TARGET_ELNRNG,
519 [EUNATCH] = TARGET_EUNATCH,
520 [ENOCSI] = TARGET_ENOCSI,
521 [EL2HLT] = TARGET_EL2HLT,
522 [EDEADLK] = TARGET_EDEADLK,
523 [ENOLCK] = TARGET_ENOLCK,
524 [EBADE] = TARGET_EBADE,
525 [EBADR] = TARGET_EBADR,
526 [EXFULL] = TARGET_EXFULL,
527 [ENOANO] = TARGET_ENOANO,
528 [EBADRQC] = TARGET_EBADRQC,
529 [EBADSLT] = TARGET_EBADSLT,
530 [EBFONT] = TARGET_EBFONT,
531 [ENOSTR] = TARGET_ENOSTR,
532 [ENODATA] = TARGET_ENODATA,
533 [ETIME] = TARGET_ETIME,
534 [ENOSR] = TARGET_ENOSR,
535 [ENONET] = TARGET_ENONET,
536 [ENOPKG] = TARGET_ENOPKG,
537 [EREMOTE] = TARGET_EREMOTE,
538 [ENOLINK] = TARGET_ENOLINK,
539 [EADV] = TARGET_EADV,
540 [ESRMNT] = TARGET_ESRMNT,
541 [ECOMM] = TARGET_ECOMM,
542 [EPROTO] = TARGET_EPROTO,
543 [EDOTDOT] = TARGET_EDOTDOT,
544 [EMULTIHOP] = TARGET_EMULTIHOP,
545 [EBADMSG] = TARGET_EBADMSG,
546 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
547 [EOVERFLOW] = TARGET_EOVERFLOW,
548 [ENOTUNIQ] = TARGET_ENOTUNIQ,
549 [EBADFD] = TARGET_EBADFD,
550 [EREMCHG] = TARGET_EREMCHG,
551 [ELIBACC] = TARGET_ELIBACC,
552 [ELIBBAD] = TARGET_ELIBBAD,
553 [ELIBSCN] = TARGET_ELIBSCN,
554 [ELIBMAX] = TARGET_ELIBMAX,
555 [ELIBEXEC] = TARGET_ELIBEXEC,
556 [EILSEQ] = TARGET_EILSEQ,
557 [ENOSYS] = TARGET_ENOSYS,
558 [ELOOP] = TARGET_ELOOP,
559 [ERESTART] = TARGET_ERESTART,
560 [ESTRPIPE] = TARGET_ESTRPIPE,
561 [ENOTEMPTY] = TARGET_ENOTEMPTY,
562 [EUSERS] = TARGET_EUSERS,
563 [ENOTSOCK] = TARGET_ENOTSOCK,
564 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
565 [EMSGSIZE] = TARGET_EMSGSIZE,
566 [EPROTOTYPE] = TARGET_EPROTOTYPE,
567 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
568 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
569 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
570 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
571 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
572 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
573 [EADDRINUSE] = TARGET_EADDRINUSE,
574 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
575 [ENETDOWN] = TARGET_ENETDOWN,
576 [ENETUNREACH] = TARGET_ENETUNREACH,
577 [ENETRESET] = TARGET_ENETRESET,
578 [ECONNABORTED] = TARGET_ECONNABORTED,
579 [ECONNRESET] = TARGET_ECONNRESET,
580 [ENOBUFS] = TARGET_ENOBUFS,
581 [EISCONN] = TARGET_EISCONN,
582 [ENOTCONN] = TARGET_ENOTCONN,
583 [EUCLEAN] = TARGET_EUCLEAN,
584 [ENOTNAM] = TARGET_ENOTNAM,
585 [ENAVAIL] = TARGET_ENAVAIL,
586 [EISNAM] = TARGET_EISNAM,
587 [EREMOTEIO] = TARGET_EREMOTEIO,
588 [ESHUTDOWN] = TARGET_ESHUTDOWN,
589 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
590 [ETIMEDOUT] = TARGET_ETIMEDOUT,
591 [ECONNREFUSED] = TARGET_ECONNREFUSED,
592 [EHOSTDOWN] = TARGET_EHOSTDOWN,
593 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
594 [EALREADY] = TARGET_EALREADY,
595 [EINPROGRESS] = TARGET_EINPROGRESS,
596 [ESTALE] = TARGET_ESTALE,
597 [ECANCELED] = TARGET_ECANCELED,
598 [ENOMEDIUM] = TARGET_ENOMEDIUM,
599 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
600 #ifdef ENOKEY
601 [ENOKEY] = TARGET_ENOKEY,
602 #endif
603 #ifdef EKEYEXPIRED
604 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
605 #endif
606 #ifdef EKEYREVOKED
607 [EKEYREVOKED] = TARGET_EKEYREVOKED,
608 #endif
609 #ifdef EKEYREJECTED
610 [EKEYREJECTED] = TARGET_EKEYREJECTED,
611 #endif
612 #ifdef EOWNERDEAD
613 [EOWNERDEAD] = TARGET_EOWNERDEAD,
614 #endif
615 #ifdef ENOTRECOVERABLE
616 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
617 #endif
620 static inline int host_to_target_errno(int err)
622 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
623 host_to_target_errno_table[err]) {
624 return host_to_target_errno_table[err];
626 return err;
629 static inline int target_to_host_errno(int err)
631 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
632 target_to_host_errno_table[err]) {
633 return target_to_host_errno_table[err];
635 return err;
638 static inline abi_long get_errno(abi_long ret)
640 if (ret == -1)
641 return -host_to_target_errno(errno);
642 else
643 return ret;
646 static inline int is_error(abi_long ret)
648 return (abi_ulong)ret >= (abi_ulong)(-4096);
651 char *target_strerror(int err)
653 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
654 return NULL;
656 return strerror(target_to_host_errno(err));
659 #define safe_syscall0(type, name) \
660 static type safe_##name(void) \
662 return safe_syscall(__NR_##name); \
665 #define safe_syscall1(type, name, type1, arg1) \
666 static type safe_##name(type1 arg1) \
668 return safe_syscall(__NR_##name, arg1); \
671 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
672 static type safe_##name(type1 arg1, type2 arg2) \
674 return safe_syscall(__NR_##name, arg1, arg2); \
677 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
678 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
680 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
683 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
684 type4, arg4) \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
690 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
698 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
699 type4, arg4, type5, arg5, type6, arg6) \
700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
701 type5 arg5, type6 arg6) \
703 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
706 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
707 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
708 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
709 int, flags, mode_t, mode)
710 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
711 struct rusage *, rusage)
712 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
713 int, options, struct rusage *, rusage)
714 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
715 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
716 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
717 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
718 const struct timespec *,timeout,int *,uaddr2,int,val3)
719 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
721 static inline int host_to_target_sock_type(int host_type)
723 int target_type;
725 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
726 case SOCK_DGRAM:
727 target_type = TARGET_SOCK_DGRAM;
728 break;
729 case SOCK_STREAM:
730 target_type = TARGET_SOCK_STREAM;
731 break;
732 default:
733 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
734 break;
737 #if defined(SOCK_CLOEXEC)
738 if (host_type & SOCK_CLOEXEC) {
739 target_type |= TARGET_SOCK_CLOEXEC;
741 #endif
743 #if defined(SOCK_NONBLOCK)
744 if (host_type & SOCK_NONBLOCK) {
745 target_type |= TARGET_SOCK_NONBLOCK;
747 #endif
749 return target_type;
752 static abi_ulong target_brk;
753 static abi_ulong target_original_brk;
754 static abi_ulong brk_page;
756 void target_set_brk(abi_ulong new_brk)
758 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
759 brk_page = HOST_PAGE_ALIGN(target_brk);
762 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
763 #define DEBUGF_BRK(message, args...)
765 /* do_brk() must return target values and target errnos. */
766 abi_long do_brk(abi_ulong new_brk)
768 abi_long mapped_addr;
769 int new_alloc_size;
771 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
773 if (!new_brk) {
774 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
775 return target_brk;
777 if (new_brk < target_original_brk) {
778 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
779 target_brk);
780 return target_brk;
783 /* If the new brk is less than the highest page reserved to the
784 * target heap allocation, set it and we're almost done... */
785 if (new_brk <= brk_page) {
786 /* Heap contents are initialized to zero, as for anonymous
787 * mapped pages. */
788 if (new_brk > target_brk) {
789 memset(g2h(target_brk), 0, new_brk - target_brk);
791 target_brk = new_brk;
792 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
793 return target_brk;
796 /* We need to allocate more memory after the brk... Note that
797 * we don't use MAP_FIXED because that will map over the top of
798 * any existing mapping (like the one with the host libc or qemu
799 * itself); instead we treat "mapped but at wrong address" as
800 * a failure and unmap again.
802 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
803 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
804 PROT_READ|PROT_WRITE,
805 MAP_ANON|MAP_PRIVATE, 0, 0));
807 if (mapped_addr == brk_page) {
808 /* Heap contents are initialized to zero, as for anonymous
809 * mapped pages. Technically the new pages are already
810 * initialized to zero since they *are* anonymous mapped
811 * pages, however we have to take care with the contents that
812 * come from the remaining part of the previous page: it may
813 * contains garbage data due to a previous heap usage (grown
814 * then shrunken). */
815 memset(g2h(target_brk), 0, brk_page - target_brk);
817 target_brk = new_brk;
818 brk_page = HOST_PAGE_ALIGN(target_brk);
819 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
820 target_brk);
821 return target_brk;
822 } else if (mapped_addr != -1) {
823 /* Mapped but at wrong address, meaning there wasn't actually
824 * enough space for this brk.
826 target_munmap(mapped_addr, new_alloc_size);
827 mapped_addr = -1;
828 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
830 else {
831 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
834 #if defined(TARGET_ALPHA)
835 /* We (partially) emulate OSF/1 on Alpha, which requires we
836 return a proper errno, not an unchanged brk value. */
837 return -TARGET_ENOMEM;
838 #endif
839 /* For everything else, return the previous break. */
840 return target_brk;
843 static inline abi_long copy_from_user_fdset(fd_set *fds,
844 abi_ulong target_fds_addr,
845 int n)
847 int i, nw, j, k;
848 abi_ulong b, *target_fds;
850 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
851 if (!(target_fds = lock_user(VERIFY_READ,
852 target_fds_addr,
853 sizeof(abi_ulong) * nw,
854 1)))
855 return -TARGET_EFAULT;
857 FD_ZERO(fds);
858 k = 0;
859 for (i = 0; i < nw; i++) {
860 /* grab the abi_ulong */
861 __get_user(b, &target_fds[i]);
862 for (j = 0; j < TARGET_ABI_BITS; j++) {
863 /* check the bit inside the abi_ulong */
864 if ((b >> j) & 1)
865 FD_SET(k, fds);
866 k++;
870 unlock_user(target_fds, target_fds_addr, 0);
872 return 0;
875 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
876 abi_ulong target_fds_addr,
877 int n)
879 if (target_fds_addr) {
880 if (copy_from_user_fdset(fds, target_fds_addr, n))
881 return -TARGET_EFAULT;
882 *fds_ptr = fds;
883 } else {
884 *fds_ptr = NULL;
886 return 0;
889 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
890 const fd_set *fds,
891 int n)
893 int i, nw, j, k;
894 abi_long v;
895 abi_ulong *target_fds;
897 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
898 if (!(target_fds = lock_user(VERIFY_WRITE,
899 target_fds_addr,
900 sizeof(abi_ulong) * nw,
901 0)))
902 return -TARGET_EFAULT;
904 k = 0;
905 for (i = 0; i < nw; i++) {
906 v = 0;
907 for (j = 0; j < TARGET_ABI_BITS; j++) {
908 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
909 k++;
911 __put_user(v, &target_fds[i]);
914 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
916 return 0;
919 #if defined(__alpha__)
920 #define HOST_HZ 1024
921 #else
922 #define HOST_HZ 100
923 #endif
925 static inline abi_long host_to_target_clock_t(long ticks)
927 #if HOST_HZ == TARGET_HZ
928 return ticks;
929 #else
930 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
931 #endif
934 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
935 const struct rusage *rusage)
937 struct target_rusage *target_rusage;
939 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
940 return -TARGET_EFAULT;
941 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
942 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
943 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
944 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
945 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
946 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
947 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
948 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
949 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
950 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
951 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
952 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
953 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
954 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
955 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
956 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
957 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
958 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
959 unlock_user_struct(target_rusage, target_addr, 1);
961 return 0;
964 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
966 abi_ulong target_rlim_swap;
967 rlim_t result;
969 target_rlim_swap = tswapal(target_rlim);
970 if (target_rlim_swap == TARGET_RLIM_INFINITY)
971 return RLIM_INFINITY;
973 result = target_rlim_swap;
974 if (target_rlim_swap != (rlim_t)result)
975 return RLIM_INFINITY;
977 return result;
980 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
982 abi_ulong target_rlim_swap;
983 abi_ulong result;
985 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
986 target_rlim_swap = TARGET_RLIM_INFINITY;
987 else
988 target_rlim_swap = rlim;
989 result = tswapal(target_rlim_swap);
991 return result;
994 static inline int target_to_host_resource(int code)
996 switch (code) {
997 case TARGET_RLIMIT_AS:
998 return RLIMIT_AS;
999 case TARGET_RLIMIT_CORE:
1000 return RLIMIT_CORE;
1001 case TARGET_RLIMIT_CPU:
1002 return RLIMIT_CPU;
1003 case TARGET_RLIMIT_DATA:
1004 return RLIMIT_DATA;
1005 case TARGET_RLIMIT_FSIZE:
1006 return RLIMIT_FSIZE;
1007 case TARGET_RLIMIT_LOCKS:
1008 return RLIMIT_LOCKS;
1009 case TARGET_RLIMIT_MEMLOCK:
1010 return RLIMIT_MEMLOCK;
1011 case TARGET_RLIMIT_MSGQUEUE:
1012 return RLIMIT_MSGQUEUE;
1013 case TARGET_RLIMIT_NICE:
1014 return RLIMIT_NICE;
1015 case TARGET_RLIMIT_NOFILE:
1016 return RLIMIT_NOFILE;
1017 case TARGET_RLIMIT_NPROC:
1018 return RLIMIT_NPROC;
1019 case TARGET_RLIMIT_RSS:
1020 return RLIMIT_RSS;
1021 case TARGET_RLIMIT_RTPRIO:
1022 return RLIMIT_RTPRIO;
1023 case TARGET_RLIMIT_SIGPENDING:
1024 return RLIMIT_SIGPENDING;
1025 case TARGET_RLIMIT_STACK:
1026 return RLIMIT_STACK;
1027 default:
1028 return code;
1032 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1033 abi_ulong target_tv_addr)
1035 struct target_timeval *target_tv;
1037 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1038 return -TARGET_EFAULT;
1040 __get_user(tv->tv_sec, &target_tv->tv_sec);
1041 __get_user(tv->tv_usec, &target_tv->tv_usec);
1043 unlock_user_struct(target_tv, target_tv_addr, 0);
1045 return 0;
1048 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1049 const struct timeval *tv)
1051 struct target_timeval *target_tv;
1053 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1054 return -TARGET_EFAULT;
1056 __put_user(tv->tv_sec, &target_tv->tv_sec);
1057 __put_user(tv->tv_usec, &target_tv->tv_usec);
1059 unlock_user_struct(target_tv, target_tv_addr, 1);
1061 return 0;
1064 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1065 abi_ulong target_tz_addr)
1067 struct target_timezone *target_tz;
1069 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1070 return -TARGET_EFAULT;
1073 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1074 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1076 unlock_user_struct(target_tz, target_tz_addr, 0);
1078 return 0;
1081 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1082 #include <mqueue.h>
1084 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1085 abi_ulong target_mq_attr_addr)
1087 struct target_mq_attr *target_mq_attr;
1089 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1090 target_mq_attr_addr, 1))
1091 return -TARGET_EFAULT;
1093 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1094 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1095 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1096 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1098 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1100 return 0;
1103 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1104 const struct mq_attr *attr)
1106 struct target_mq_attr *target_mq_attr;
1108 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1109 target_mq_attr_addr, 0))
1110 return -TARGET_EFAULT;
1112 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1113 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1114 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1115 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1117 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1119 return 0;
1121 #endif
1123 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1124 /* do_select() must return target values and target errnos. */
1125 static abi_long do_select(int n,
1126 abi_ulong rfd_addr, abi_ulong wfd_addr,
1127 abi_ulong efd_addr, abi_ulong target_tv_addr)
1129 fd_set rfds, wfds, efds;
1130 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1131 struct timeval tv;
1132 struct timespec ts, *ts_ptr;
1133 abi_long ret;
1135 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1136 if (ret) {
1137 return ret;
1139 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1140 if (ret) {
1141 return ret;
1143 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1144 if (ret) {
1145 return ret;
1148 if (target_tv_addr) {
1149 if (copy_from_user_timeval(&tv, target_tv_addr))
1150 return -TARGET_EFAULT;
1151 ts.tv_sec = tv.tv_sec;
1152 ts.tv_nsec = tv.tv_usec * 1000;
1153 ts_ptr = &ts;
1154 } else {
1155 ts_ptr = NULL;
1158 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1159 ts_ptr, NULL));
1161 if (!is_error(ret)) {
1162 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1163 return -TARGET_EFAULT;
1164 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1165 return -TARGET_EFAULT;
1166 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1167 return -TARGET_EFAULT;
1169 if (target_tv_addr) {
1170 tv.tv_sec = ts.tv_sec;
1171 tv.tv_usec = ts.tv_nsec / 1000;
1172 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1173 return -TARGET_EFAULT;
1178 return ret;
1180 #endif
1182 static abi_long do_pipe2(int host_pipe[], int flags)
1184 #ifdef CONFIG_PIPE2
1185 return pipe2(host_pipe, flags);
1186 #else
1187 return -ENOSYS;
1188 #endif
1191 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1192 int flags, int is_pipe2)
1194 int host_pipe[2];
1195 abi_long ret;
1196 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1198 if (is_error(ret))
1199 return get_errno(ret);
1201 /* Several targets have special calling conventions for the original
1202 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1203 if (!is_pipe2) {
1204 #if defined(TARGET_ALPHA)
1205 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1206 return host_pipe[0];
1207 #elif defined(TARGET_MIPS)
1208 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1209 return host_pipe[0];
1210 #elif defined(TARGET_SH4)
1211 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1212 return host_pipe[0];
1213 #elif defined(TARGET_SPARC)
1214 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1215 return host_pipe[0];
1216 #endif
1219 if (put_user_s32(host_pipe[0], pipedes)
1220 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1221 return -TARGET_EFAULT;
1222 return get_errno(ret);
1225 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1226 abi_ulong target_addr,
1227 socklen_t len)
1229 struct target_ip_mreqn *target_smreqn;
1231 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1232 if (!target_smreqn)
1233 return -TARGET_EFAULT;
1234 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1235 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1236 if (len == sizeof(struct target_ip_mreqn))
1237 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1238 unlock_user(target_smreqn, target_addr, 0);
1240 return 0;
1243 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1244 abi_ulong target_addr,
1245 socklen_t len)
1247 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1248 sa_family_t sa_family;
1249 struct target_sockaddr *target_saddr;
1251 if (fd_trans_target_to_host_addr(fd)) {
1252 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1255 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1256 if (!target_saddr)
1257 return -TARGET_EFAULT;
1259 sa_family = tswap16(target_saddr->sa_family);
1261 /* Oops. The caller might send a incomplete sun_path; sun_path
1262 * must be terminated by \0 (see the manual page), but
1263 * unfortunately it is quite common to specify sockaddr_un
1264 * length as "strlen(x->sun_path)" while it should be
1265 * "strlen(...) + 1". We'll fix that here if needed.
1266 * Linux kernel has a similar feature.
1269 if (sa_family == AF_UNIX) {
1270 if (len < unix_maxlen && len > 0) {
1271 char *cp = (char*)target_saddr;
1273 if ( cp[len-1] && !cp[len] )
1274 len++;
1276 if (len > unix_maxlen)
1277 len = unix_maxlen;
1280 memcpy(addr, target_saddr, len);
1281 addr->sa_family = sa_family;
1282 if (sa_family == AF_NETLINK) {
1283 struct sockaddr_nl *nladdr;
1285 nladdr = (struct sockaddr_nl *)addr;
1286 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1287 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1288 } else if (sa_family == AF_PACKET) {
1289 struct target_sockaddr_ll *lladdr;
1291 lladdr = (struct target_sockaddr_ll *)addr;
1292 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1293 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1295 unlock_user(target_saddr, target_addr, 0);
1297 return 0;
1300 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1301 struct sockaddr *addr,
1302 socklen_t len)
1304 struct target_sockaddr *target_saddr;
1306 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1307 if (!target_saddr)
1308 return -TARGET_EFAULT;
1309 memcpy(target_saddr, addr, len);
1310 target_saddr->sa_family = tswap16(addr->sa_family);
1311 if (addr->sa_family == AF_NETLINK) {
1312 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1313 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1314 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1316 unlock_user(target_saddr, target_addr, len);
1318 return 0;
1321 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1322 struct target_msghdr *target_msgh)
1324 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1325 abi_long msg_controllen;
1326 abi_ulong target_cmsg_addr;
1327 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1328 socklen_t space = 0;
1330 msg_controllen = tswapal(target_msgh->msg_controllen);
1331 if (msg_controllen < sizeof (struct target_cmsghdr))
1332 goto the_end;
1333 target_cmsg_addr = tswapal(target_msgh->msg_control);
1334 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1335 target_cmsg_start = target_cmsg;
1336 if (!target_cmsg)
1337 return -TARGET_EFAULT;
1339 while (cmsg && target_cmsg) {
1340 void *data = CMSG_DATA(cmsg);
1341 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1343 int len = tswapal(target_cmsg->cmsg_len)
1344 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1346 space += CMSG_SPACE(len);
1347 if (space > msgh->msg_controllen) {
1348 space -= CMSG_SPACE(len);
1349 /* This is a QEMU bug, since we allocated the payload
1350 * area ourselves (unlike overflow in host-to-target
1351 * conversion, which is just the guest giving us a buffer
1352 * that's too small). It can't happen for the payload types
1353 * we currently support; if it becomes an issue in future
1354 * we would need to improve our allocation strategy to
1355 * something more intelligent than "twice the size of the
1356 * target buffer we're reading from".
1358 gemu_log("Host cmsg overflow\n");
1359 break;
1362 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1363 cmsg->cmsg_level = SOL_SOCKET;
1364 } else {
1365 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1367 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1368 cmsg->cmsg_len = CMSG_LEN(len);
1370 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1371 int *fd = (int *)data;
1372 int *target_fd = (int *)target_data;
1373 int i, numfds = len / sizeof(int);
1375 for (i = 0; i < numfds; i++) {
1376 __get_user(fd[i], target_fd + i);
1378 } else if (cmsg->cmsg_level == SOL_SOCKET
1379 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1380 struct ucred *cred = (struct ucred *)data;
1381 struct target_ucred *target_cred =
1382 (struct target_ucred *)target_data;
1384 __get_user(cred->pid, &target_cred->pid);
1385 __get_user(cred->uid, &target_cred->uid);
1386 __get_user(cred->gid, &target_cred->gid);
1387 } else {
1388 gemu_log("Unsupported ancillary data: %d/%d\n",
1389 cmsg->cmsg_level, cmsg->cmsg_type);
1390 memcpy(data, target_data, len);
1393 cmsg = CMSG_NXTHDR(msgh, cmsg);
1394 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1395 target_cmsg_start);
1397 unlock_user(target_cmsg, target_cmsg_addr, 0);
1398 the_end:
1399 msgh->msg_controllen = space;
1400 return 0;
1403 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1404 struct msghdr *msgh)
1406 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1407 abi_long msg_controllen;
1408 abi_ulong target_cmsg_addr;
1409 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1410 socklen_t space = 0;
1412 msg_controllen = tswapal(target_msgh->msg_controllen);
1413 if (msg_controllen < sizeof (struct target_cmsghdr))
1414 goto the_end;
1415 target_cmsg_addr = tswapal(target_msgh->msg_control);
1416 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1417 target_cmsg_start = target_cmsg;
1418 if (!target_cmsg)
1419 return -TARGET_EFAULT;
1421 while (cmsg && target_cmsg) {
1422 void *data = CMSG_DATA(cmsg);
1423 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1425 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1426 int tgt_len, tgt_space;
1428 /* We never copy a half-header but may copy half-data;
1429 * this is Linux's behaviour in put_cmsg(). Note that
1430 * truncation here is a guest problem (which we report
1431 * to the guest via the CTRUNC bit), unlike truncation
1432 * in target_to_host_cmsg, which is a QEMU bug.
1434 if (msg_controllen < sizeof(struct cmsghdr)) {
1435 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1436 break;
1439 if (cmsg->cmsg_level == SOL_SOCKET) {
1440 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1441 } else {
1442 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1444 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1446 tgt_len = TARGET_CMSG_LEN(len);
1448 /* Payload types which need a different size of payload on
1449 * the target must adjust tgt_len here.
1451 switch (cmsg->cmsg_level) {
1452 case SOL_SOCKET:
1453 switch (cmsg->cmsg_type) {
1454 case SO_TIMESTAMP:
1455 tgt_len = sizeof(struct target_timeval);
1456 break;
1457 default:
1458 break;
1460 default:
1461 break;
1464 if (msg_controllen < tgt_len) {
1465 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1466 tgt_len = msg_controllen;
1469 /* We must now copy-and-convert len bytes of payload
1470 * into tgt_len bytes of destination space. Bear in mind
1471 * that in both source and destination we may be dealing
1472 * with a truncated value!
1474 switch (cmsg->cmsg_level) {
1475 case SOL_SOCKET:
1476 switch (cmsg->cmsg_type) {
1477 case SCM_RIGHTS:
1479 int *fd = (int *)data;
1480 int *target_fd = (int *)target_data;
1481 int i, numfds = tgt_len / sizeof(int);
1483 for (i = 0; i < numfds; i++) {
1484 __put_user(fd[i], target_fd + i);
1486 break;
1488 case SO_TIMESTAMP:
1490 struct timeval *tv = (struct timeval *)data;
1491 struct target_timeval *target_tv =
1492 (struct target_timeval *)target_data;
1494 if (len != sizeof(struct timeval) ||
1495 tgt_len != sizeof(struct target_timeval)) {
1496 goto unimplemented;
1499 /* copy struct timeval to target */
1500 __put_user(tv->tv_sec, &target_tv->tv_sec);
1501 __put_user(tv->tv_usec, &target_tv->tv_usec);
1502 break;
1504 case SCM_CREDENTIALS:
1506 struct ucred *cred = (struct ucred *)data;
1507 struct target_ucred *target_cred =
1508 (struct target_ucred *)target_data;
1510 __put_user(cred->pid, &target_cred->pid);
1511 __put_user(cred->uid, &target_cred->uid);
1512 __put_user(cred->gid, &target_cred->gid);
1513 break;
1515 default:
1516 goto unimplemented;
1518 break;
1520 default:
1521 unimplemented:
1522 gemu_log("Unsupported ancillary data: %d/%d\n",
1523 cmsg->cmsg_level, cmsg->cmsg_type);
1524 memcpy(target_data, data, MIN(len, tgt_len));
1525 if (tgt_len > len) {
1526 memset(target_data + len, 0, tgt_len - len);
1530 target_cmsg->cmsg_len = tswapal(tgt_len);
1531 tgt_space = TARGET_CMSG_SPACE(len);
1532 if (msg_controllen < tgt_space) {
1533 tgt_space = msg_controllen;
1535 msg_controllen -= tgt_space;
1536 space += tgt_space;
1537 cmsg = CMSG_NXTHDR(msgh, cmsg);
1538 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1539 target_cmsg_start);
1541 unlock_user(target_cmsg, target_cmsg_addr, space);
1542 the_end:
1543 target_msgh->msg_controllen = tswapal(space);
1544 return 0;
1547 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1549 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1550 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1551 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1552 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1553 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1556 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1557 size_t len,
1558 abi_long (*host_to_target_nlmsg)
1559 (struct nlmsghdr *))
1561 uint32_t nlmsg_len;
1562 abi_long ret;
1564 while (len > sizeof(struct nlmsghdr)) {
1566 nlmsg_len = nlh->nlmsg_len;
1567 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1568 nlmsg_len > len) {
1569 break;
1572 switch (nlh->nlmsg_type) {
1573 case NLMSG_DONE:
1574 tswap_nlmsghdr(nlh);
1575 return 0;
1576 case NLMSG_NOOP:
1577 break;
1578 case NLMSG_ERROR:
1580 struct nlmsgerr *e = NLMSG_DATA(nlh);
1581 e->error = tswap32(e->error);
1582 tswap_nlmsghdr(&e->msg);
1583 tswap_nlmsghdr(nlh);
1584 return 0;
1586 default:
1587 ret = host_to_target_nlmsg(nlh);
1588 if (ret < 0) {
1589 tswap_nlmsghdr(nlh);
1590 return ret;
1592 break;
1594 tswap_nlmsghdr(nlh);
1595 len -= NLMSG_ALIGN(nlmsg_len);
1596 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1598 return 0;
1601 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1602 size_t len,
1603 abi_long (*target_to_host_nlmsg)
1604 (struct nlmsghdr *))
1606 int ret;
1608 while (len > sizeof(struct nlmsghdr)) {
1609 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1610 tswap32(nlh->nlmsg_len) > len) {
1611 break;
1613 tswap_nlmsghdr(nlh);
1614 switch (nlh->nlmsg_type) {
1615 case NLMSG_DONE:
1616 return 0;
1617 case NLMSG_NOOP:
1618 break;
1619 case NLMSG_ERROR:
1621 struct nlmsgerr *e = NLMSG_DATA(nlh);
1622 e->error = tswap32(e->error);
1623 tswap_nlmsghdr(&e->msg);
1625 default:
1626 ret = target_to_host_nlmsg(nlh);
1627 if (ret < 0) {
1628 return ret;
1631 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1632 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1634 return 0;
1637 #ifdef CONFIG_RTNETLINK
1638 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1639 size_t len,
1640 abi_long (*host_to_target_rtattr)
1641 (struct rtattr *))
1643 unsigned short rta_len;
1644 abi_long ret;
1646 while (len > sizeof(struct rtattr)) {
1647 rta_len = rtattr->rta_len;
1648 if (rta_len < sizeof(struct rtattr) ||
1649 rta_len > len) {
1650 break;
1652 ret = host_to_target_rtattr(rtattr);
1653 rtattr->rta_len = tswap16(rtattr->rta_len);
1654 rtattr->rta_type = tswap16(rtattr->rta_type);
1655 if (ret < 0) {
1656 return ret;
1658 len -= RTA_ALIGN(rta_len);
1659 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1661 return 0;
1664 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1666 uint32_t *u32;
1667 struct rtnl_link_stats *st;
1668 struct rtnl_link_stats64 *st64;
1669 struct rtnl_link_ifmap *map;
1671 switch (rtattr->rta_type) {
1672 /* binary stream */
1673 case IFLA_ADDRESS:
1674 case IFLA_BROADCAST:
1675 /* string */
1676 case IFLA_IFNAME:
1677 case IFLA_QDISC:
1678 break;
1679 /* uin8_t */
1680 case IFLA_OPERSTATE:
1681 case IFLA_LINKMODE:
1682 case IFLA_CARRIER:
1683 case IFLA_PROTO_DOWN:
1684 break;
1685 /* uint32_t */
1686 case IFLA_MTU:
1687 case IFLA_LINK:
1688 case IFLA_WEIGHT:
1689 case IFLA_TXQLEN:
1690 case IFLA_CARRIER_CHANGES:
1691 case IFLA_NUM_RX_QUEUES:
1692 case IFLA_NUM_TX_QUEUES:
1693 case IFLA_PROMISCUITY:
1694 case IFLA_EXT_MASK:
1695 case IFLA_LINK_NETNSID:
1696 case IFLA_GROUP:
1697 case IFLA_MASTER:
1698 case IFLA_NUM_VF:
1699 u32 = RTA_DATA(rtattr);
1700 *u32 = tswap32(*u32);
1701 break;
1702 /* struct rtnl_link_stats */
1703 case IFLA_STATS:
1704 st = RTA_DATA(rtattr);
1705 st->rx_packets = tswap32(st->rx_packets);
1706 st->tx_packets = tswap32(st->tx_packets);
1707 st->rx_bytes = tswap32(st->rx_bytes);
1708 st->tx_bytes = tswap32(st->tx_bytes);
1709 st->rx_errors = tswap32(st->rx_errors);
1710 st->tx_errors = tswap32(st->tx_errors);
1711 st->rx_dropped = tswap32(st->rx_dropped);
1712 st->tx_dropped = tswap32(st->tx_dropped);
1713 st->multicast = tswap32(st->multicast);
1714 st->collisions = tswap32(st->collisions);
1716 /* detailed rx_errors: */
1717 st->rx_length_errors = tswap32(st->rx_length_errors);
1718 st->rx_over_errors = tswap32(st->rx_over_errors);
1719 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1720 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1721 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1722 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1724 /* detailed tx_errors */
1725 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1726 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1727 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1728 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1729 st->tx_window_errors = tswap32(st->tx_window_errors);
1731 /* for cslip etc */
1732 st->rx_compressed = tswap32(st->rx_compressed);
1733 st->tx_compressed = tswap32(st->tx_compressed);
1734 break;
1735 /* struct rtnl_link_stats64 */
1736 case IFLA_STATS64:
1737 st64 = RTA_DATA(rtattr);
1738 st64->rx_packets = tswap64(st64->rx_packets);
1739 st64->tx_packets = tswap64(st64->tx_packets);
1740 st64->rx_bytes = tswap64(st64->rx_bytes);
1741 st64->tx_bytes = tswap64(st64->tx_bytes);
1742 st64->rx_errors = tswap64(st64->rx_errors);
1743 st64->tx_errors = tswap64(st64->tx_errors);
1744 st64->rx_dropped = tswap64(st64->rx_dropped);
1745 st64->tx_dropped = tswap64(st64->tx_dropped);
1746 st64->multicast = tswap64(st64->multicast);
1747 st64->collisions = tswap64(st64->collisions);
1749 /* detailed rx_errors: */
1750 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1751 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1752 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1753 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1754 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1755 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1757 /* detailed tx_errors */
1758 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1759 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1760 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1761 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1762 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1764 /* for cslip etc */
1765 st64->rx_compressed = tswap64(st64->rx_compressed);
1766 st64->tx_compressed = tswap64(st64->tx_compressed);
1767 break;
1768 /* struct rtnl_link_ifmap */
1769 case IFLA_MAP:
1770 map = RTA_DATA(rtattr);
1771 map->mem_start = tswap64(map->mem_start);
1772 map->mem_end = tswap64(map->mem_end);
1773 map->base_addr = tswap64(map->base_addr);
1774 map->irq = tswap16(map->irq);
1775 break;
1776 /* nested */
1777 case IFLA_AF_SPEC:
1778 case IFLA_LINKINFO:
1779 /* FIXME: implement nested type */
1780 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1781 break;
1782 default:
1783 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1784 break;
1786 return 0;
1789 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1791 uint32_t *u32;
1792 struct ifa_cacheinfo *ci;
1794 switch (rtattr->rta_type) {
1795 /* binary: depends on family type */
1796 case IFA_ADDRESS:
1797 case IFA_LOCAL:
1798 break;
1799 /* string */
1800 case IFA_LABEL:
1801 break;
1802 /* u32 */
1803 case IFA_FLAGS:
1804 case IFA_BROADCAST:
1805 u32 = RTA_DATA(rtattr);
1806 *u32 = tswap32(*u32);
1807 break;
1808 /* struct ifa_cacheinfo */
1809 case IFA_CACHEINFO:
1810 ci = RTA_DATA(rtattr);
1811 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1812 ci->ifa_valid = tswap32(ci->ifa_valid);
1813 ci->cstamp = tswap32(ci->cstamp);
1814 ci->tstamp = tswap32(ci->tstamp);
1815 break;
1816 default:
1817 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1818 break;
1820 return 0;
1823 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1825 uint32_t *u32;
1826 switch (rtattr->rta_type) {
1827 /* binary: depends on family type */
1828 case RTA_GATEWAY:
1829 case RTA_DST:
1830 case RTA_PREFSRC:
1831 break;
1832 /* u32 */
1833 case RTA_PRIORITY:
1834 case RTA_TABLE:
1835 case RTA_OIF:
1836 u32 = RTA_DATA(rtattr);
1837 *u32 = tswap32(*u32);
1838 break;
1839 default:
1840 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1841 break;
1843 return 0;
1846 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1847 uint32_t rtattr_len)
1849 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1850 host_to_target_data_link_rtattr);
1853 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1854 uint32_t rtattr_len)
1856 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1857 host_to_target_data_addr_rtattr);
1860 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1861 uint32_t rtattr_len)
1863 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1864 host_to_target_data_route_rtattr);
1867 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1869 uint32_t nlmsg_len;
1870 struct ifinfomsg *ifi;
1871 struct ifaddrmsg *ifa;
1872 struct rtmsg *rtm;
1874 nlmsg_len = nlh->nlmsg_len;
1875 switch (nlh->nlmsg_type) {
1876 case RTM_NEWLINK:
1877 case RTM_DELLINK:
1878 case RTM_GETLINK:
1879 ifi = NLMSG_DATA(nlh);
1880 ifi->ifi_type = tswap16(ifi->ifi_type);
1881 ifi->ifi_index = tswap32(ifi->ifi_index);
1882 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1883 ifi->ifi_change = tswap32(ifi->ifi_change);
1884 host_to_target_link_rtattr(IFLA_RTA(ifi),
1885 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1886 break;
1887 case RTM_NEWADDR:
1888 case RTM_DELADDR:
1889 case RTM_GETADDR:
1890 ifa = NLMSG_DATA(nlh);
1891 ifa->ifa_index = tswap32(ifa->ifa_index);
1892 host_to_target_addr_rtattr(IFA_RTA(ifa),
1893 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1894 break;
1895 case RTM_NEWROUTE:
1896 case RTM_DELROUTE:
1897 case RTM_GETROUTE:
1898 rtm = NLMSG_DATA(nlh);
1899 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1900 host_to_target_route_rtattr(RTM_RTA(rtm),
1901 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1902 break;
1903 default:
1904 return -TARGET_EINVAL;
1906 return 0;
1909 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1910 size_t len)
1912 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1915 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1916 size_t len,
1917 abi_long (*target_to_host_rtattr)
1918 (struct rtattr *))
1920 abi_long ret;
1922 while (len >= sizeof(struct rtattr)) {
1923 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
1924 tswap16(rtattr->rta_len) > len) {
1925 break;
1927 rtattr->rta_len = tswap16(rtattr->rta_len);
1928 rtattr->rta_type = tswap16(rtattr->rta_type);
1929 ret = target_to_host_rtattr(rtattr);
1930 if (ret < 0) {
1931 return ret;
1933 len -= RTA_ALIGN(rtattr->rta_len);
1934 rtattr = (struct rtattr *)(((char *)rtattr) +
1935 RTA_ALIGN(rtattr->rta_len));
1937 return 0;
1940 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
1942 switch (rtattr->rta_type) {
1943 default:
1944 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
1945 break;
1947 return 0;
1950 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
1952 switch (rtattr->rta_type) {
1953 /* binary: depends on family type */
1954 case IFA_LOCAL:
1955 case IFA_ADDRESS:
1956 break;
1957 default:
1958 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
1959 break;
1961 return 0;
1964 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
1966 uint32_t *u32;
1967 switch (rtattr->rta_type) {
1968 /* binary: depends on family type */
1969 case RTA_DST:
1970 case RTA_SRC:
1971 case RTA_GATEWAY:
1972 break;
1973 /* u32 */
1974 case RTA_OIF:
1975 u32 = RTA_DATA(rtattr);
1976 *u32 = tswap32(*u32);
1977 break;
1978 default:
1979 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
1980 break;
1982 return 0;
1985 static void target_to_host_link_rtattr(struct rtattr *rtattr,
1986 uint32_t rtattr_len)
1988 target_to_host_for_each_rtattr(rtattr, rtattr_len,
1989 target_to_host_data_link_rtattr);
1992 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
1993 uint32_t rtattr_len)
1995 target_to_host_for_each_rtattr(rtattr, rtattr_len,
1996 target_to_host_data_addr_rtattr);
1999 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2000 uint32_t rtattr_len)
2002 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2003 target_to_host_data_route_rtattr);
2006 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2008 struct ifinfomsg *ifi;
2009 struct ifaddrmsg *ifa;
2010 struct rtmsg *rtm;
2012 switch (nlh->nlmsg_type) {
2013 case RTM_GETLINK:
2014 break;
2015 case RTM_NEWLINK:
2016 case RTM_DELLINK:
2017 ifi = NLMSG_DATA(nlh);
2018 ifi->ifi_type = tswap16(ifi->ifi_type);
2019 ifi->ifi_index = tswap32(ifi->ifi_index);
2020 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2021 ifi->ifi_change = tswap32(ifi->ifi_change);
2022 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2023 NLMSG_LENGTH(sizeof(*ifi)));
2024 break;
2025 case RTM_GETADDR:
2026 case RTM_NEWADDR:
2027 case RTM_DELADDR:
2028 ifa = NLMSG_DATA(nlh);
2029 ifa->ifa_index = tswap32(ifa->ifa_index);
2030 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2031 NLMSG_LENGTH(sizeof(*ifa)));
2032 break;
2033 case RTM_GETROUTE:
2034 break;
2035 case RTM_NEWROUTE:
2036 case RTM_DELROUTE:
2037 rtm = NLMSG_DATA(nlh);
2038 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2039 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2040 NLMSG_LENGTH(sizeof(*rtm)));
2041 break;
2042 default:
2043 return -TARGET_EOPNOTSUPP;
2045 return 0;
2048 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2050 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2052 #endif /* CONFIG_RTNETLINK */
2054 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2056 switch (nlh->nlmsg_type) {
2057 default:
2058 gemu_log("Unknown host audit message type %d\n",
2059 nlh->nlmsg_type);
2060 return -TARGET_EINVAL;
2062 return 0;
2065 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2066 size_t len)
2068 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2071 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2073 switch (nlh->nlmsg_type) {
2074 case AUDIT_USER:
2075 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2076 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2077 break;
2078 default:
2079 gemu_log("Unknown target audit message type %d\n",
2080 nlh->nlmsg_type);
2081 return -TARGET_EINVAL;
2084 return 0;
2087 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2089 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2092 /* do_setsockopt() Must return target values and target errnos. */
2093 static abi_long do_setsockopt(int sockfd, int level, int optname,
2094 abi_ulong optval_addr, socklen_t optlen)
2096 abi_long ret;
2097 int val;
2098 struct ip_mreqn *ip_mreq;
2099 struct ip_mreq_source *ip_mreq_source;
2101 switch(level) {
2102 case SOL_TCP:
2103 /* TCP options all take an 'int' value. */
2104 if (optlen < sizeof(uint32_t))
2105 return -TARGET_EINVAL;
2107 if (get_user_u32(val, optval_addr))
2108 return -TARGET_EFAULT;
2109 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2110 break;
2111 case SOL_IP:
2112 switch(optname) {
2113 case IP_TOS:
2114 case IP_TTL:
2115 case IP_HDRINCL:
2116 case IP_ROUTER_ALERT:
2117 case IP_RECVOPTS:
2118 case IP_RETOPTS:
2119 case IP_PKTINFO:
2120 case IP_MTU_DISCOVER:
2121 case IP_RECVERR:
2122 case IP_RECVTOS:
2123 #ifdef IP_FREEBIND
2124 case IP_FREEBIND:
2125 #endif
2126 case IP_MULTICAST_TTL:
2127 case IP_MULTICAST_LOOP:
2128 val = 0;
2129 if (optlen >= sizeof(uint32_t)) {
2130 if (get_user_u32(val, optval_addr))
2131 return -TARGET_EFAULT;
2132 } else if (optlen >= 1) {
2133 if (get_user_u8(val, optval_addr))
2134 return -TARGET_EFAULT;
2136 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2137 break;
2138 case IP_ADD_MEMBERSHIP:
2139 case IP_DROP_MEMBERSHIP:
2140 if (optlen < sizeof (struct target_ip_mreq) ||
2141 optlen > sizeof (struct target_ip_mreqn))
2142 return -TARGET_EINVAL;
2144 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2145 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2146 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2147 break;
2149 case IP_BLOCK_SOURCE:
2150 case IP_UNBLOCK_SOURCE:
2151 case IP_ADD_SOURCE_MEMBERSHIP:
2152 case IP_DROP_SOURCE_MEMBERSHIP:
2153 if (optlen != sizeof (struct target_ip_mreq_source))
2154 return -TARGET_EINVAL;
2156 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2157 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2158 unlock_user (ip_mreq_source, optval_addr, 0);
2159 break;
2161 default:
2162 goto unimplemented;
2164 break;
2165 case SOL_IPV6:
2166 switch (optname) {
2167 case IPV6_MTU_DISCOVER:
2168 case IPV6_MTU:
2169 case IPV6_V6ONLY:
2170 case IPV6_RECVPKTINFO:
2171 val = 0;
2172 if (optlen < sizeof(uint32_t)) {
2173 return -TARGET_EINVAL;
2175 if (get_user_u32(val, optval_addr)) {
2176 return -TARGET_EFAULT;
2178 ret = get_errno(setsockopt(sockfd, level, optname,
2179 &val, sizeof(val)));
2180 break;
2181 default:
2182 goto unimplemented;
2184 break;
2185 case SOL_RAW:
2186 switch (optname) {
2187 case ICMP_FILTER:
2188 /* struct icmp_filter takes an u32 value */
2189 if (optlen < sizeof(uint32_t)) {
2190 return -TARGET_EINVAL;
2193 if (get_user_u32(val, optval_addr)) {
2194 return -TARGET_EFAULT;
2196 ret = get_errno(setsockopt(sockfd, level, optname,
2197 &val, sizeof(val)));
2198 break;
2200 default:
2201 goto unimplemented;
2203 break;
2204 case TARGET_SOL_SOCKET:
2205 switch (optname) {
2206 case TARGET_SO_RCVTIMEO:
2208 struct timeval tv;
2210 optname = SO_RCVTIMEO;
2212 set_timeout:
2213 if (optlen != sizeof(struct target_timeval)) {
2214 return -TARGET_EINVAL;
2217 if (copy_from_user_timeval(&tv, optval_addr)) {
2218 return -TARGET_EFAULT;
2221 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2222 &tv, sizeof(tv)));
2223 return ret;
2225 case TARGET_SO_SNDTIMEO:
2226 optname = SO_SNDTIMEO;
2227 goto set_timeout;
2228 case TARGET_SO_ATTACH_FILTER:
2230 struct target_sock_fprog *tfprog;
2231 struct target_sock_filter *tfilter;
2232 struct sock_fprog fprog;
2233 struct sock_filter *filter;
2234 int i;
2236 if (optlen != sizeof(*tfprog)) {
2237 return -TARGET_EINVAL;
2239 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2240 return -TARGET_EFAULT;
2242 if (!lock_user_struct(VERIFY_READ, tfilter,
2243 tswapal(tfprog->filter), 0)) {
2244 unlock_user_struct(tfprog, optval_addr, 1);
2245 return -TARGET_EFAULT;
2248 fprog.len = tswap16(tfprog->len);
2249 filter = g_try_new(struct sock_filter, fprog.len);
2250 if (filter == NULL) {
2251 unlock_user_struct(tfilter, tfprog->filter, 1);
2252 unlock_user_struct(tfprog, optval_addr, 1);
2253 return -TARGET_ENOMEM;
2255 for (i = 0; i < fprog.len; i++) {
2256 filter[i].code = tswap16(tfilter[i].code);
2257 filter[i].jt = tfilter[i].jt;
2258 filter[i].jf = tfilter[i].jf;
2259 filter[i].k = tswap32(tfilter[i].k);
2261 fprog.filter = filter;
2263 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2264 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2265 g_free(filter);
2267 unlock_user_struct(tfilter, tfprog->filter, 1);
2268 unlock_user_struct(tfprog, optval_addr, 1);
2269 return ret;
2271 case TARGET_SO_BINDTODEVICE:
2273 char *dev_ifname, *addr_ifname;
2275 if (optlen > IFNAMSIZ - 1) {
2276 optlen = IFNAMSIZ - 1;
2278 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2279 if (!dev_ifname) {
2280 return -TARGET_EFAULT;
2282 optname = SO_BINDTODEVICE;
2283 addr_ifname = alloca(IFNAMSIZ);
2284 memcpy(addr_ifname, dev_ifname, optlen);
2285 addr_ifname[optlen] = 0;
2286 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2287 addr_ifname, optlen));
2288 unlock_user (dev_ifname, optval_addr, 0);
2289 return ret;
2291 /* Options with 'int' argument. */
2292 case TARGET_SO_DEBUG:
2293 optname = SO_DEBUG;
2294 break;
2295 case TARGET_SO_REUSEADDR:
2296 optname = SO_REUSEADDR;
2297 break;
2298 case TARGET_SO_TYPE:
2299 optname = SO_TYPE;
2300 break;
2301 case TARGET_SO_ERROR:
2302 optname = SO_ERROR;
2303 break;
2304 case TARGET_SO_DONTROUTE:
2305 optname = SO_DONTROUTE;
2306 break;
2307 case TARGET_SO_BROADCAST:
2308 optname = SO_BROADCAST;
2309 break;
2310 case TARGET_SO_SNDBUF:
2311 optname = SO_SNDBUF;
2312 break;
2313 case TARGET_SO_SNDBUFFORCE:
2314 optname = SO_SNDBUFFORCE;
2315 break;
2316 case TARGET_SO_RCVBUF:
2317 optname = SO_RCVBUF;
2318 break;
2319 case TARGET_SO_RCVBUFFORCE:
2320 optname = SO_RCVBUFFORCE;
2321 break;
2322 case TARGET_SO_KEEPALIVE:
2323 optname = SO_KEEPALIVE;
2324 break;
2325 case TARGET_SO_OOBINLINE:
2326 optname = SO_OOBINLINE;
2327 break;
2328 case TARGET_SO_NO_CHECK:
2329 optname = SO_NO_CHECK;
2330 break;
2331 case TARGET_SO_PRIORITY:
2332 optname = SO_PRIORITY;
2333 break;
2334 #ifdef SO_BSDCOMPAT
2335 case TARGET_SO_BSDCOMPAT:
2336 optname = SO_BSDCOMPAT;
2337 break;
2338 #endif
2339 case TARGET_SO_PASSCRED:
2340 optname = SO_PASSCRED;
2341 break;
2342 case TARGET_SO_PASSSEC:
2343 optname = SO_PASSSEC;
2344 break;
2345 case TARGET_SO_TIMESTAMP:
2346 optname = SO_TIMESTAMP;
2347 break;
2348 case TARGET_SO_RCVLOWAT:
2349 optname = SO_RCVLOWAT;
2350 break;
2351 break;
2352 default:
2353 goto unimplemented;
2355 if (optlen < sizeof(uint32_t))
2356 return -TARGET_EINVAL;
2358 if (get_user_u32(val, optval_addr))
2359 return -TARGET_EFAULT;
2360 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2361 break;
2362 default:
2363 unimplemented:
2364 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2365 ret = -TARGET_ENOPROTOOPT;
2367 return ret;
2370 /* do_getsockopt() Must return target values and target errnos. */
2371 static abi_long do_getsockopt(int sockfd, int level, int optname,
2372 abi_ulong optval_addr, abi_ulong optlen)
2374 abi_long ret;
2375 int len, val;
2376 socklen_t lv;
2378 switch(level) {
2379 case TARGET_SOL_SOCKET:
2380 level = SOL_SOCKET;
2381 switch (optname) {
2382 /* These don't just return a single integer */
2383 case TARGET_SO_LINGER:
2384 case TARGET_SO_RCVTIMEO:
2385 case TARGET_SO_SNDTIMEO:
2386 case TARGET_SO_PEERNAME:
2387 goto unimplemented;
2388 case TARGET_SO_PEERCRED: {
2389 struct ucred cr;
2390 socklen_t crlen;
2391 struct target_ucred *tcr;
2393 if (get_user_u32(len, optlen)) {
2394 return -TARGET_EFAULT;
2396 if (len < 0) {
2397 return -TARGET_EINVAL;
2400 crlen = sizeof(cr);
2401 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2402 &cr, &crlen));
2403 if (ret < 0) {
2404 return ret;
2406 if (len > crlen) {
2407 len = crlen;
2409 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2410 return -TARGET_EFAULT;
2412 __put_user(cr.pid, &tcr->pid);
2413 __put_user(cr.uid, &tcr->uid);
2414 __put_user(cr.gid, &tcr->gid);
2415 unlock_user_struct(tcr, optval_addr, 1);
2416 if (put_user_u32(len, optlen)) {
2417 return -TARGET_EFAULT;
2419 break;
2421 /* Options with 'int' argument. */
2422 case TARGET_SO_DEBUG:
2423 optname = SO_DEBUG;
2424 goto int_case;
2425 case TARGET_SO_REUSEADDR:
2426 optname = SO_REUSEADDR;
2427 goto int_case;
2428 case TARGET_SO_TYPE:
2429 optname = SO_TYPE;
2430 goto int_case;
2431 case TARGET_SO_ERROR:
2432 optname = SO_ERROR;
2433 goto int_case;
2434 case TARGET_SO_DONTROUTE:
2435 optname = SO_DONTROUTE;
2436 goto int_case;
2437 case TARGET_SO_BROADCAST:
2438 optname = SO_BROADCAST;
2439 goto int_case;
2440 case TARGET_SO_SNDBUF:
2441 optname = SO_SNDBUF;
2442 goto int_case;
2443 case TARGET_SO_RCVBUF:
2444 optname = SO_RCVBUF;
2445 goto int_case;
2446 case TARGET_SO_KEEPALIVE:
2447 optname = SO_KEEPALIVE;
2448 goto int_case;
2449 case TARGET_SO_OOBINLINE:
2450 optname = SO_OOBINLINE;
2451 goto int_case;
2452 case TARGET_SO_NO_CHECK:
2453 optname = SO_NO_CHECK;
2454 goto int_case;
2455 case TARGET_SO_PRIORITY:
2456 optname = SO_PRIORITY;
2457 goto int_case;
2458 #ifdef SO_BSDCOMPAT
2459 case TARGET_SO_BSDCOMPAT:
2460 optname = SO_BSDCOMPAT;
2461 goto int_case;
2462 #endif
2463 case TARGET_SO_PASSCRED:
2464 optname = SO_PASSCRED;
2465 goto int_case;
2466 case TARGET_SO_TIMESTAMP:
2467 optname = SO_TIMESTAMP;
2468 goto int_case;
2469 case TARGET_SO_RCVLOWAT:
2470 optname = SO_RCVLOWAT;
2471 goto int_case;
2472 case TARGET_SO_ACCEPTCONN:
2473 optname = SO_ACCEPTCONN;
2474 goto int_case;
2475 default:
2476 goto int_case;
2478 break;
2479 case SOL_TCP:
2480 /* TCP options all take an 'int' value. */
2481 int_case:
2482 if (get_user_u32(len, optlen))
2483 return -TARGET_EFAULT;
2484 if (len < 0)
2485 return -TARGET_EINVAL;
2486 lv = sizeof(lv);
2487 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2488 if (ret < 0)
2489 return ret;
2490 if (optname == SO_TYPE) {
2491 val = host_to_target_sock_type(val);
2493 if (len > lv)
2494 len = lv;
2495 if (len == 4) {
2496 if (put_user_u32(val, optval_addr))
2497 return -TARGET_EFAULT;
2498 } else {
2499 if (put_user_u8(val, optval_addr))
2500 return -TARGET_EFAULT;
2502 if (put_user_u32(len, optlen))
2503 return -TARGET_EFAULT;
2504 break;
2505 case SOL_IP:
2506 switch(optname) {
2507 case IP_TOS:
2508 case IP_TTL:
2509 case IP_HDRINCL:
2510 case IP_ROUTER_ALERT:
2511 case IP_RECVOPTS:
2512 case IP_RETOPTS:
2513 case IP_PKTINFO:
2514 case IP_MTU_DISCOVER:
2515 case IP_RECVERR:
2516 case IP_RECVTOS:
2517 #ifdef IP_FREEBIND
2518 case IP_FREEBIND:
2519 #endif
2520 case IP_MULTICAST_TTL:
2521 case IP_MULTICAST_LOOP:
2522 if (get_user_u32(len, optlen))
2523 return -TARGET_EFAULT;
2524 if (len < 0)
2525 return -TARGET_EINVAL;
2526 lv = sizeof(lv);
2527 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2528 if (ret < 0)
2529 return ret;
2530 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2531 len = 1;
2532 if (put_user_u32(len, optlen)
2533 || put_user_u8(val, optval_addr))
2534 return -TARGET_EFAULT;
2535 } else {
2536 if (len > sizeof(int))
2537 len = sizeof(int);
2538 if (put_user_u32(len, optlen)
2539 || put_user_u32(val, optval_addr))
2540 return -TARGET_EFAULT;
2542 break;
2543 default:
2544 ret = -TARGET_ENOPROTOOPT;
2545 break;
2547 break;
2548 default:
2549 unimplemented:
2550 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2551 level, optname);
2552 ret = -TARGET_EOPNOTSUPP;
2553 break;
2555 return ret;
2558 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2559 int count, int copy)
2561 struct target_iovec *target_vec;
2562 struct iovec *vec;
2563 abi_ulong total_len, max_len;
2564 int i;
2565 int err = 0;
2566 bool bad_address = false;
2568 if (count == 0) {
2569 errno = 0;
2570 return NULL;
2572 if (count < 0 || count > IOV_MAX) {
2573 errno = EINVAL;
2574 return NULL;
2577 vec = g_try_new0(struct iovec, count);
2578 if (vec == NULL) {
2579 errno = ENOMEM;
2580 return NULL;
2583 target_vec = lock_user(VERIFY_READ, target_addr,
2584 count * sizeof(struct target_iovec), 1);
2585 if (target_vec == NULL) {
2586 err = EFAULT;
2587 goto fail2;
2590 /* ??? If host page size > target page size, this will result in a
2591 value larger than what we can actually support. */
2592 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2593 total_len = 0;
2595 for (i = 0; i < count; i++) {
2596 abi_ulong base = tswapal(target_vec[i].iov_base);
2597 abi_long len = tswapal(target_vec[i].iov_len);
2599 if (len < 0) {
2600 err = EINVAL;
2601 goto fail;
2602 } else if (len == 0) {
2603 /* Zero length pointer is ignored. */
2604 vec[i].iov_base = 0;
2605 } else {
2606 vec[i].iov_base = lock_user(type, base, len, copy);
2607 /* If the first buffer pointer is bad, this is a fault. But
2608 * subsequent bad buffers will result in a partial write; this
2609 * is realized by filling the vector with null pointers and
2610 * zero lengths. */
2611 if (!vec[i].iov_base) {
2612 if (i == 0) {
2613 err = EFAULT;
2614 goto fail;
2615 } else {
2616 bad_address = true;
2619 if (bad_address) {
2620 len = 0;
2622 if (len > max_len - total_len) {
2623 len = max_len - total_len;
2626 vec[i].iov_len = len;
2627 total_len += len;
2630 unlock_user(target_vec, target_addr, 0);
2631 return vec;
2633 fail:
2634 while (--i >= 0) {
2635 if (tswapal(target_vec[i].iov_len) > 0) {
2636 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2639 unlock_user(target_vec, target_addr, 0);
2640 fail2:
2641 g_free(vec);
2642 errno = err;
2643 return NULL;
2646 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2647 int count, int copy)
2649 struct target_iovec *target_vec;
2650 int i;
2652 target_vec = lock_user(VERIFY_READ, target_addr,
2653 count * sizeof(struct target_iovec), 1);
2654 if (target_vec) {
2655 for (i = 0; i < count; i++) {
2656 abi_ulong base = tswapal(target_vec[i].iov_base);
2657 abi_long len = tswapal(target_vec[i].iov_len);
2658 if (len < 0) {
2659 break;
2661 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2663 unlock_user(target_vec, target_addr, 0);
2666 g_free(vec);
2669 static inline int target_to_host_sock_type(int *type)
2671 int host_type = 0;
2672 int target_type = *type;
2674 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2675 case TARGET_SOCK_DGRAM:
2676 host_type = SOCK_DGRAM;
2677 break;
2678 case TARGET_SOCK_STREAM:
2679 host_type = SOCK_STREAM;
2680 break;
2681 default:
2682 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2683 break;
2685 if (target_type & TARGET_SOCK_CLOEXEC) {
2686 #if defined(SOCK_CLOEXEC)
2687 host_type |= SOCK_CLOEXEC;
2688 #else
2689 return -TARGET_EINVAL;
2690 #endif
2692 if (target_type & TARGET_SOCK_NONBLOCK) {
2693 #if defined(SOCK_NONBLOCK)
2694 host_type |= SOCK_NONBLOCK;
2695 #elif !defined(O_NONBLOCK)
2696 return -TARGET_EINVAL;
2697 #endif
2699 *type = host_type;
2700 return 0;
2703 /* Try to emulate socket type flags after socket creation. */
2704 static int sock_flags_fixup(int fd, int target_type)
2706 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2707 if (target_type & TARGET_SOCK_NONBLOCK) {
2708 int flags = fcntl(fd, F_GETFL);
2709 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2710 close(fd);
2711 return -TARGET_EINVAL;
2714 #endif
2715 return fd;
2718 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2719 abi_ulong target_addr,
2720 socklen_t len)
2722 struct sockaddr *addr = host_addr;
2723 struct target_sockaddr *target_saddr;
2725 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2726 if (!target_saddr) {
2727 return -TARGET_EFAULT;
2730 memcpy(addr, target_saddr, len);
2731 addr->sa_family = tswap16(target_saddr->sa_family);
2732 /* spkt_protocol is big-endian */
2734 unlock_user(target_saddr, target_addr, 0);
2735 return 0;
2738 static TargetFdTrans target_packet_trans = {
2739 .target_to_host_addr = packet_target_to_host_sockaddr,
2742 #ifdef CONFIG_RTNETLINK
2743 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2745 return target_to_host_nlmsg_route(buf, len);
2748 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2750 return host_to_target_nlmsg_route(buf, len);
2753 static TargetFdTrans target_netlink_route_trans = {
2754 .target_to_host_data = netlink_route_target_to_host,
2755 .host_to_target_data = netlink_route_host_to_target,
2757 #endif /* CONFIG_RTNETLINK */
2759 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2761 return target_to_host_nlmsg_audit(buf, len);
2764 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2766 return host_to_target_nlmsg_audit(buf, len);
2769 static TargetFdTrans target_netlink_audit_trans = {
2770 .target_to_host_data = netlink_audit_target_to_host,
2771 .host_to_target_data = netlink_audit_host_to_target,
2774 /* do_socket() Must return target values and target errnos. */
2775 static abi_long do_socket(int domain, int type, int protocol)
2777 int target_type = type;
2778 int ret;
2780 ret = target_to_host_sock_type(&type);
2781 if (ret) {
2782 return ret;
2785 if (domain == PF_NETLINK && !(
2786 #ifdef CONFIG_RTNETLINK
2787 protocol == NETLINK_ROUTE ||
2788 #endif
2789 protocol == NETLINK_KOBJECT_UEVENT ||
2790 protocol == NETLINK_AUDIT)) {
2791 return -EPFNOSUPPORT;
2794 if (domain == AF_PACKET ||
2795 (domain == AF_INET && type == SOCK_PACKET)) {
2796 protocol = tswap16(protocol);
2799 ret = get_errno(socket(domain, type, protocol));
2800 if (ret >= 0) {
2801 ret = sock_flags_fixup(ret, target_type);
2802 if (type == SOCK_PACKET) {
2803 /* Manage an obsolete case :
2804 * if socket type is SOCK_PACKET, bind by name
2806 fd_trans_register(ret, &target_packet_trans);
2807 } else if (domain == PF_NETLINK) {
2808 switch (protocol) {
2809 #ifdef CONFIG_RTNETLINK
2810 case NETLINK_ROUTE:
2811 fd_trans_register(ret, &target_netlink_route_trans);
2812 break;
2813 #endif
2814 case NETLINK_KOBJECT_UEVENT:
2815 /* nothing to do: messages are strings */
2816 break;
2817 case NETLINK_AUDIT:
2818 fd_trans_register(ret, &target_netlink_audit_trans);
2819 break;
2820 default:
2821 g_assert_not_reached();
2825 return ret;
2828 /* do_bind() Must return target values and target errnos. */
2829 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2830 socklen_t addrlen)
2832 void *addr;
2833 abi_long ret;
2835 if ((int)addrlen < 0) {
2836 return -TARGET_EINVAL;
2839 addr = alloca(addrlen+1);
2841 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2842 if (ret)
2843 return ret;
2845 return get_errno(bind(sockfd, addr, addrlen));
2848 /* do_connect() Must return target values and target errnos. */
2849 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2850 socklen_t addrlen)
2852 void *addr;
2853 abi_long ret;
2855 if ((int)addrlen < 0) {
2856 return -TARGET_EINVAL;
2859 addr = alloca(addrlen+1);
2861 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2862 if (ret)
2863 return ret;
2865 return get_errno(connect(sockfd, addr, addrlen));
2868 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2869 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2870 int flags, int send)
2872 abi_long ret, len;
2873 struct msghdr msg;
2874 int count;
2875 struct iovec *vec;
2876 abi_ulong target_vec;
2878 if (msgp->msg_name) {
2879 msg.msg_namelen = tswap32(msgp->msg_namelen);
2880 msg.msg_name = alloca(msg.msg_namelen+1);
2881 ret = target_to_host_sockaddr(fd, msg.msg_name,
2882 tswapal(msgp->msg_name),
2883 msg.msg_namelen);
2884 if (ret) {
2885 goto out2;
2887 } else {
2888 msg.msg_name = NULL;
2889 msg.msg_namelen = 0;
2891 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2892 msg.msg_control = alloca(msg.msg_controllen);
2893 msg.msg_flags = tswap32(msgp->msg_flags);
2895 count = tswapal(msgp->msg_iovlen);
2896 target_vec = tswapal(msgp->msg_iov);
2897 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2898 target_vec, count, send);
2899 if (vec == NULL) {
2900 ret = -host_to_target_errno(errno);
2901 goto out2;
2903 msg.msg_iovlen = count;
2904 msg.msg_iov = vec;
2906 if (send) {
2907 if (fd_trans_target_to_host_data(fd)) {
2908 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2909 msg.msg_iov->iov_len);
2910 } else {
2911 ret = target_to_host_cmsg(&msg, msgp);
2913 if (ret == 0) {
2914 ret = get_errno(sendmsg(fd, &msg, flags));
2916 } else {
2917 ret = get_errno(recvmsg(fd, &msg, flags));
2918 if (!is_error(ret)) {
2919 len = ret;
2920 if (fd_trans_host_to_target_data(fd)) {
2921 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2922 msg.msg_iov->iov_len);
2923 } else {
2924 ret = host_to_target_cmsg(msgp, &msg);
2926 if (!is_error(ret)) {
2927 msgp->msg_namelen = tswap32(msg.msg_namelen);
2928 if (msg.msg_name != NULL) {
2929 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2930 msg.msg_name, msg.msg_namelen);
2931 if (ret) {
2932 goto out;
2936 ret = len;
2941 out:
2942 unlock_iovec(vec, target_vec, count, !send);
2943 out2:
2944 return ret;
2947 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2948 int flags, int send)
2950 abi_long ret;
2951 struct target_msghdr *msgp;
2953 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2954 msgp,
2955 target_msg,
2956 send ? 1 : 0)) {
2957 return -TARGET_EFAULT;
2959 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2960 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2961 return ret;
2964 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2965 * so it might not have this *mmsg-specific flag either.
2967 #ifndef MSG_WAITFORONE
2968 #define MSG_WAITFORONE 0x10000
2969 #endif
2971 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2972 unsigned int vlen, unsigned int flags,
2973 int send)
2975 struct target_mmsghdr *mmsgp;
2976 abi_long ret = 0;
2977 int i;
2979 if (vlen > UIO_MAXIOV) {
2980 vlen = UIO_MAXIOV;
2983 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2984 if (!mmsgp) {
2985 return -TARGET_EFAULT;
2988 for (i = 0; i < vlen; i++) {
2989 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2990 if (is_error(ret)) {
2991 break;
2993 mmsgp[i].msg_len = tswap32(ret);
2994 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2995 if (flags & MSG_WAITFORONE) {
2996 flags |= MSG_DONTWAIT;
3000 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3002 /* Return number of datagrams sent if we sent any at all;
3003 * otherwise return the error.
3005 if (i) {
3006 return i;
3008 return ret;
3011 /* If we don't have a system accept4() then just call accept.
3012 * The callsites to do_accept4() will ensure that they don't
3013 * pass a non-zero flags argument in this config.
3015 #ifndef CONFIG_ACCEPT4
3016 static inline int accept4(int sockfd, struct sockaddr *addr,
3017 socklen_t *addrlen, int flags)
3019 assert(flags == 0);
3020 return accept(sockfd, addr, addrlen);
3022 #endif
3024 /* do_accept4() Must return target values and target errnos. */
3025 static abi_long do_accept4(int fd, abi_ulong target_addr,
3026 abi_ulong target_addrlen_addr, int flags)
3028 socklen_t addrlen;
3029 void *addr;
3030 abi_long ret;
3031 int host_flags;
3033 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3035 if (target_addr == 0) {
3036 return get_errno(accept4(fd, NULL, NULL, host_flags));
3039 /* linux returns EINVAL if addrlen pointer is invalid */
3040 if (get_user_u32(addrlen, target_addrlen_addr))
3041 return -TARGET_EINVAL;
3043 if ((int)addrlen < 0) {
3044 return -TARGET_EINVAL;
3047 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3048 return -TARGET_EINVAL;
3050 addr = alloca(addrlen);
3052 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
3053 if (!is_error(ret)) {
3054 host_to_target_sockaddr(target_addr, addr, addrlen);
3055 if (put_user_u32(addrlen, target_addrlen_addr))
3056 ret = -TARGET_EFAULT;
3058 return ret;
3061 /* do_getpeername() Must return target values and target errnos. */
3062 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3063 abi_ulong target_addrlen_addr)
3065 socklen_t addrlen;
3066 void *addr;
3067 abi_long ret;
3069 if (get_user_u32(addrlen, target_addrlen_addr))
3070 return -TARGET_EFAULT;
3072 if ((int)addrlen < 0) {
3073 return -TARGET_EINVAL;
3076 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3077 return -TARGET_EFAULT;
3079 addr = alloca(addrlen);
3081 ret = get_errno(getpeername(fd, addr, &addrlen));
3082 if (!is_error(ret)) {
3083 host_to_target_sockaddr(target_addr, addr, addrlen);
3084 if (put_user_u32(addrlen, target_addrlen_addr))
3085 ret = -TARGET_EFAULT;
3087 return ret;
3090 /* do_getsockname() Must return target values and target errnos. */
3091 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3092 abi_ulong target_addrlen_addr)
3094 socklen_t addrlen;
3095 void *addr;
3096 abi_long ret;
3098 if (get_user_u32(addrlen, target_addrlen_addr))
3099 return -TARGET_EFAULT;
3101 if ((int)addrlen < 0) {
3102 return -TARGET_EINVAL;
3105 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3106 return -TARGET_EFAULT;
3108 addr = alloca(addrlen);
3110 ret = get_errno(getsockname(fd, addr, &addrlen));
3111 if (!is_error(ret)) {
3112 host_to_target_sockaddr(target_addr, addr, addrlen);
3113 if (put_user_u32(addrlen, target_addrlen_addr))
3114 ret = -TARGET_EFAULT;
3116 return ret;
3119 /* do_socketpair() Must return target values and target errnos. */
3120 static abi_long do_socketpair(int domain, int type, int protocol,
3121 abi_ulong target_tab_addr)
3123 int tab[2];
3124 abi_long ret;
3126 target_to_host_sock_type(&type);
3128 ret = get_errno(socketpair(domain, type, protocol, tab));
3129 if (!is_error(ret)) {
3130 if (put_user_s32(tab[0], target_tab_addr)
3131 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3132 ret = -TARGET_EFAULT;
3134 return ret;
3137 /* do_sendto() Must return target values and target errnos. */
3138 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3139 abi_ulong target_addr, socklen_t addrlen)
3141 void *addr;
3142 void *host_msg;
3143 abi_long ret;
3145 if ((int)addrlen < 0) {
3146 return -TARGET_EINVAL;
3149 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3150 if (!host_msg)
3151 return -TARGET_EFAULT;
3152 if (fd_trans_target_to_host_data(fd)) {
3153 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3154 if (ret < 0) {
3155 unlock_user(host_msg, msg, 0);
3156 return ret;
3159 if (target_addr) {
3160 addr = alloca(addrlen+1);
3161 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3162 if (ret) {
3163 unlock_user(host_msg, msg, 0);
3164 return ret;
3166 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
3167 } else {
3168 ret = get_errno(send(fd, host_msg, len, flags));
3170 unlock_user(host_msg, msg, 0);
3171 return ret;
3174 /* do_recvfrom() Must return target values and target errnos. */
3175 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3176 abi_ulong target_addr,
3177 abi_ulong target_addrlen)
3179 socklen_t addrlen;
3180 void *addr;
3181 void *host_msg;
3182 abi_long ret;
3184 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3185 if (!host_msg)
3186 return -TARGET_EFAULT;
3187 if (target_addr) {
3188 if (get_user_u32(addrlen, target_addrlen)) {
3189 ret = -TARGET_EFAULT;
3190 goto fail;
3192 if ((int)addrlen < 0) {
3193 ret = -TARGET_EINVAL;
3194 goto fail;
3196 addr = alloca(addrlen);
3197 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
3198 } else {
3199 addr = NULL; /* To keep compiler quiet. */
3200 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
3202 if (!is_error(ret)) {
3203 if (target_addr) {
3204 host_to_target_sockaddr(target_addr, addr, addrlen);
3205 if (put_user_u32(addrlen, target_addrlen)) {
3206 ret = -TARGET_EFAULT;
3207 goto fail;
3210 unlock_user(host_msg, msg, len);
3211 } else {
3212 fail:
3213 unlock_user(host_msg, msg, 0);
3215 return ret;
3218 #ifdef TARGET_NR_socketcall
3219 /* do_socketcall() Must return target values and target errnos. */
3220 static abi_long do_socketcall(int num, abi_ulong vptr)
3222 static const unsigned ac[] = { /* number of arguments per call */
3223 [SOCKOP_socket] = 3, /* domain, type, protocol */
3224 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3225 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3226 [SOCKOP_listen] = 2, /* sockfd, backlog */
3227 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3228 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3229 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3230 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3231 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3232 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3233 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3234 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3235 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3236 [SOCKOP_shutdown] = 2, /* sockfd, how */
3237 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3238 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3239 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3240 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3241 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3242 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3244 abi_long a[6]; /* max 6 args */
3246 /* first, collect the arguments in a[] according to ac[] */
3247 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3248 unsigned i;
3249 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3250 for (i = 0; i < ac[num]; ++i) {
3251 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3252 return -TARGET_EFAULT;
3257 /* now when we have the args, actually handle the call */
3258 switch (num) {
3259 case SOCKOP_socket: /* domain, type, protocol */
3260 return do_socket(a[0], a[1], a[2]);
3261 case SOCKOP_bind: /* sockfd, addr, addrlen */
3262 return do_bind(a[0], a[1], a[2]);
3263 case SOCKOP_connect: /* sockfd, addr, addrlen */
3264 return do_connect(a[0], a[1], a[2]);
3265 case SOCKOP_listen: /* sockfd, backlog */
3266 return get_errno(listen(a[0], a[1]));
3267 case SOCKOP_accept: /* sockfd, addr, addrlen */
3268 return do_accept4(a[0], a[1], a[2], 0);
3269 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3270 return do_accept4(a[0], a[1], a[2], a[3]);
3271 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3272 return do_getsockname(a[0], a[1], a[2]);
3273 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3274 return do_getpeername(a[0], a[1], a[2]);
3275 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3276 return do_socketpair(a[0], a[1], a[2], a[3]);
3277 case SOCKOP_send: /* sockfd, msg, len, flags */
3278 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3279 case SOCKOP_recv: /* sockfd, msg, len, flags */
3280 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3281 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3282 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3283 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3284 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3285 case SOCKOP_shutdown: /* sockfd, how */
3286 return get_errno(shutdown(a[0], a[1]));
3287 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3288 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3289 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3290 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3291 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3292 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3293 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3294 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3295 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3296 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3297 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3298 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3299 default:
3300 gemu_log("Unsupported socketcall: %d\n", num);
3301 return -TARGET_ENOSYS;
3304 #endif
3306 #define N_SHM_REGIONS 32
3308 static struct shm_region {
3309 abi_ulong start;
3310 abi_ulong size;
3311 bool in_use;
3312 } shm_regions[N_SHM_REGIONS];
3314 struct target_semid_ds
3316 struct target_ipc_perm sem_perm;
3317 abi_ulong sem_otime;
3318 #if !defined(TARGET_PPC64)
3319 abi_ulong __unused1;
3320 #endif
3321 abi_ulong sem_ctime;
3322 #if !defined(TARGET_PPC64)
3323 abi_ulong __unused2;
3324 #endif
3325 abi_ulong sem_nsems;
3326 abi_ulong __unused3;
3327 abi_ulong __unused4;
3330 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3331 abi_ulong target_addr)
3333 struct target_ipc_perm *target_ip;
3334 struct target_semid_ds *target_sd;
3336 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3337 return -TARGET_EFAULT;
3338 target_ip = &(target_sd->sem_perm);
3339 host_ip->__key = tswap32(target_ip->__key);
3340 host_ip->uid = tswap32(target_ip->uid);
3341 host_ip->gid = tswap32(target_ip->gid);
3342 host_ip->cuid = tswap32(target_ip->cuid);
3343 host_ip->cgid = tswap32(target_ip->cgid);
3344 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3345 host_ip->mode = tswap32(target_ip->mode);
3346 #else
3347 host_ip->mode = tswap16(target_ip->mode);
3348 #endif
3349 #if defined(TARGET_PPC)
3350 host_ip->__seq = tswap32(target_ip->__seq);
3351 #else
3352 host_ip->__seq = tswap16(target_ip->__seq);
3353 #endif
3354 unlock_user_struct(target_sd, target_addr, 0);
3355 return 0;
3358 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3359 struct ipc_perm *host_ip)
3361 struct target_ipc_perm *target_ip;
3362 struct target_semid_ds *target_sd;
3364 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3365 return -TARGET_EFAULT;
3366 target_ip = &(target_sd->sem_perm);
3367 target_ip->__key = tswap32(host_ip->__key);
3368 target_ip->uid = tswap32(host_ip->uid);
3369 target_ip->gid = tswap32(host_ip->gid);
3370 target_ip->cuid = tswap32(host_ip->cuid);
3371 target_ip->cgid = tswap32(host_ip->cgid);
3372 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3373 target_ip->mode = tswap32(host_ip->mode);
3374 #else
3375 target_ip->mode = tswap16(host_ip->mode);
3376 #endif
3377 #if defined(TARGET_PPC)
3378 target_ip->__seq = tswap32(host_ip->__seq);
3379 #else
3380 target_ip->__seq = tswap16(host_ip->__seq);
3381 #endif
3382 unlock_user_struct(target_sd, target_addr, 1);
3383 return 0;
3386 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3387 abi_ulong target_addr)
3389 struct target_semid_ds *target_sd;
3391 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3392 return -TARGET_EFAULT;
3393 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3394 return -TARGET_EFAULT;
3395 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3396 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3397 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3398 unlock_user_struct(target_sd, target_addr, 0);
3399 return 0;
3402 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3403 struct semid_ds *host_sd)
3405 struct target_semid_ds *target_sd;
3407 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3408 return -TARGET_EFAULT;
3409 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3410 return -TARGET_EFAULT;
3411 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3412 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3413 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3414 unlock_user_struct(target_sd, target_addr, 1);
3415 return 0;
3418 struct target_seminfo {
3419 int semmap;
3420 int semmni;
3421 int semmns;
3422 int semmnu;
3423 int semmsl;
3424 int semopm;
3425 int semume;
3426 int semusz;
3427 int semvmx;
3428 int semaem;
3431 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3432 struct seminfo *host_seminfo)
3434 struct target_seminfo *target_seminfo;
3435 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3436 return -TARGET_EFAULT;
3437 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3438 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3439 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3440 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3441 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3442 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3443 __put_user(host_seminfo->semume, &target_seminfo->semume);
3444 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3445 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3446 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3447 unlock_user_struct(target_seminfo, target_addr, 1);
3448 return 0;
3451 union semun {
3452 int val;
3453 struct semid_ds *buf;
3454 unsigned short *array;
3455 struct seminfo *__buf;
3458 union target_semun {
3459 int val;
3460 abi_ulong buf;
3461 abi_ulong array;
3462 abi_ulong __buf;
3465 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3466 abi_ulong target_addr)
3468 int nsems;
3469 unsigned short *array;
3470 union semun semun;
3471 struct semid_ds semid_ds;
3472 int i, ret;
3474 semun.buf = &semid_ds;
3476 ret = semctl(semid, 0, IPC_STAT, semun);
3477 if (ret == -1)
3478 return get_errno(ret);
3480 nsems = semid_ds.sem_nsems;
3482 *host_array = g_try_new(unsigned short, nsems);
3483 if (!*host_array) {
3484 return -TARGET_ENOMEM;
3486 array = lock_user(VERIFY_READ, target_addr,
3487 nsems*sizeof(unsigned short), 1);
3488 if (!array) {
3489 g_free(*host_array);
3490 return -TARGET_EFAULT;
3493 for(i=0; i<nsems; i++) {
3494 __get_user((*host_array)[i], &array[i]);
3496 unlock_user(array, target_addr, 0);
3498 return 0;
3501 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3502 unsigned short **host_array)
3504 int nsems;
3505 unsigned short *array;
3506 union semun semun;
3507 struct semid_ds semid_ds;
3508 int i, ret;
3510 semun.buf = &semid_ds;
3512 ret = semctl(semid, 0, IPC_STAT, semun);
3513 if (ret == -1)
3514 return get_errno(ret);
3516 nsems = semid_ds.sem_nsems;
3518 array = lock_user(VERIFY_WRITE, target_addr,
3519 nsems*sizeof(unsigned short), 0);
3520 if (!array)
3521 return -TARGET_EFAULT;
3523 for(i=0; i<nsems; i++) {
3524 __put_user((*host_array)[i], &array[i]);
3526 g_free(*host_array);
3527 unlock_user(array, target_addr, 1);
3529 return 0;
3532 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3533 abi_ulong target_arg)
3535 union target_semun target_su = { .buf = target_arg };
3536 union semun arg;
3537 struct semid_ds dsarg;
3538 unsigned short *array = NULL;
3539 struct seminfo seminfo;
3540 abi_long ret = -TARGET_EINVAL;
3541 abi_long err;
3542 cmd &= 0xff;
3544 switch( cmd ) {
3545 case GETVAL:
3546 case SETVAL:
3547 /* In 64 bit cross-endian situations, we will erroneously pick up
3548 * the wrong half of the union for the "val" element. To rectify
3549 * this, the entire 8-byte structure is byteswapped, followed by
3550 * a swap of the 4 byte val field. In other cases, the data is
3551 * already in proper host byte order. */
3552 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3553 target_su.buf = tswapal(target_su.buf);
3554 arg.val = tswap32(target_su.val);
3555 } else {
3556 arg.val = target_su.val;
3558 ret = get_errno(semctl(semid, semnum, cmd, arg));
3559 break;
3560 case GETALL:
3561 case SETALL:
3562 err = target_to_host_semarray(semid, &array, target_su.array);
3563 if (err)
3564 return err;
3565 arg.array = array;
3566 ret = get_errno(semctl(semid, semnum, cmd, arg));
3567 err = host_to_target_semarray(semid, target_su.array, &array);
3568 if (err)
3569 return err;
3570 break;
3571 case IPC_STAT:
3572 case IPC_SET:
3573 case SEM_STAT:
3574 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3575 if (err)
3576 return err;
3577 arg.buf = &dsarg;
3578 ret = get_errno(semctl(semid, semnum, cmd, arg));
3579 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3580 if (err)
3581 return err;
3582 break;
3583 case IPC_INFO:
3584 case SEM_INFO:
3585 arg.__buf = &seminfo;
3586 ret = get_errno(semctl(semid, semnum, cmd, arg));
3587 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3588 if (err)
3589 return err;
3590 break;
3591 case IPC_RMID:
3592 case GETPID:
3593 case GETNCNT:
3594 case GETZCNT:
3595 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3596 break;
3599 return ret;
3602 struct target_sembuf {
3603 unsigned short sem_num;
3604 short sem_op;
3605 short sem_flg;
3608 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3609 abi_ulong target_addr,
3610 unsigned nsops)
3612 struct target_sembuf *target_sembuf;
3613 int i;
3615 target_sembuf = lock_user(VERIFY_READ, target_addr,
3616 nsops*sizeof(struct target_sembuf), 1);
3617 if (!target_sembuf)
3618 return -TARGET_EFAULT;
3620 for(i=0; i<nsops; i++) {
3621 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3622 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3623 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3626 unlock_user(target_sembuf, target_addr, 0);
3628 return 0;
3631 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3633 struct sembuf sops[nsops];
3635 if (target_to_host_sembuf(sops, ptr, nsops))
3636 return -TARGET_EFAULT;
3638 return get_errno(semop(semid, sops, nsops));
3641 struct target_msqid_ds
3643 struct target_ipc_perm msg_perm;
3644 abi_ulong msg_stime;
3645 #if TARGET_ABI_BITS == 32
3646 abi_ulong __unused1;
3647 #endif
3648 abi_ulong msg_rtime;
3649 #if TARGET_ABI_BITS == 32
3650 abi_ulong __unused2;
3651 #endif
3652 abi_ulong msg_ctime;
3653 #if TARGET_ABI_BITS == 32
3654 abi_ulong __unused3;
3655 #endif
3656 abi_ulong __msg_cbytes;
3657 abi_ulong msg_qnum;
3658 abi_ulong msg_qbytes;
3659 abi_ulong msg_lspid;
3660 abi_ulong msg_lrpid;
3661 abi_ulong __unused4;
3662 abi_ulong __unused5;
3665 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3666 abi_ulong target_addr)
3668 struct target_msqid_ds *target_md;
3670 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3671 return -TARGET_EFAULT;
3672 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3673 return -TARGET_EFAULT;
3674 host_md->msg_stime = tswapal(target_md->msg_stime);
3675 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3676 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3677 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3678 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3679 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3680 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3681 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3682 unlock_user_struct(target_md, target_addr, 0);
3683 return 0;
3686 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3687 struct msqid_ds *host_md)
3689 struct target_msqid_ds *target_md;
3691 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3692 return -TARGET_EFAULT;
3693 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3694 return -TARGET_EFAULT;
3695 target_md->msg_stime = tswapal(host_md->msg_stime);
3696 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3697 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3698 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3699 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3700 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3701 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3702 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3703 unlock_user_struct(target_md, target_addr, 1);
3704 return 0;
3707 struct target_msginfo {
3708 int msgpool;
3709 int msgmap;
3710 int msgmax;
3711 int msgmnb;
3712 int msgmni;
3713 int msgssz;
3714 int msgtql;
3715 unsigned short int msgseg;
3718 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3719 struct msginfo *host_msginfo)
3721 struct target_msginfo *target_msginfo;
3722 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3723 return -TARGET_EFAULT;
3724 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3725 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3726 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3727 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3728 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3729 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3730 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3731 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3732 unlock_user_struct(target_msginfo, target_addr, 1);
3733 return 0;
3736 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3738 struct msqid_ds dsarg;
3739 struct msginfo msginfo;
3740 abi_long ret = -TARGET_EINVAL;
3742 cmd &= 0xff;
3744 switch (cmd) {
3745 case IPC_STAT:
3746 case IPC_SET:
3747 case MSG_STAT:
3748 if (target_to_host_msqid_ds(&dsarg,ptr))
3749 return -TARGET_EFAULT;
3750 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3751 if (host_to_target_msqid_ds(ptr,&dsarg))
3752 return -TARGET_EFAULT;
3753 break;
3754 case IPC_RMID:
3755 ret = get_errno(msgctl(msgid, cmd, NULL));
3756 break;
3757 case IPC_INFO:
3758 case MSG_INFO:
3759 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3760 if (host_to_target_msginfo(ptr, &msginfo))
3761 return -TARGET_EFAULT;
3762 break;
3765 return ret;
3768 struct target_msgbuf {
3769 abi_long mtype;
3770 char mtext[1];
3773 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3774 ssize_t msgsz, int msgflg)
3776 struct target_msgbuf *target_mb;
3777 struct msgbuf *host_mb;
3778 abi_long ret = 0;
3780 if (msgsz < 0) {
3781 return -TARGET_EINVAL;
3784 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3785 return -TARGET_EFAULT;
3786 host_mb = g_try_malloc(msgsz + sizeof(long));
3787 if (!host_mb) {
3788 unlock_user_struct(target_mb, msgp, 0);
3789 return -TARGET_ENOMEM;
3791 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3792 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3793 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3794 g_free(host_mb);
3795 unlock_user_struct(target_mb, msgp, 0);
3797 return ret;
3800 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3801 ssize_t msgsz, abi_long msgtyp,
3802 int msgflg)
3804 struct target_msgbuf *target_mb;
3805 char *target_mtext;
3806 struct msgbuf *host_mb;
3807 abi_long ret = 0;
3809 if (msgsz < 0) {
3810 return -TARGET_EINVAL;
3813 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3814 return -TARGET_EFAULT;
3816 host_mb = g_try_malloc(msgsz + sizeof(long));
3817 if (!host_mb) {
3818 ret = -TARGET_ENOMEM;
3819 goto end;
3821 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3823 if (ret > 0) {
3824 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3825 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3826 if (!target_mtext) {
3827 ret = -TARGET_EFAULT;
3828 goto end;
3830 memcpy(target_mb->mtext, host_mb->mtext, ret);
3831 unlock_user(target_mtext, target_mtext_addr, ret);
3834 target_mb->mtype = tswapal(host_mb->mtype);
3836 end:
3837 if (target_mb)
3838 unlock_user_struct(target_mb, msgp, 1);
3839 g_free(host_mb);
3840 return ret;
3843 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3844 abi_ulong target_addr)
3846 struct target_shmid_ds *target_sd;
3848 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3849 return -TARGET_EFAULT;
3850 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3851 return -TARGET_EFAULT;
3852 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3853 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3854 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3855 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3856 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3857 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3858 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3859 unlock_user_struct(target_sd, target_addr, 0);
3860 return 0;
3863 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3864 struct shmid_ds *host_sd)
3866 struct target_shmid_ds *target_sd;
3868 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3869 return -TARGET_EFAULT;
3870 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3871 return -TARGET_EFAULT;
3872 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3873 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3874 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3875 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3876 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3877 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3878 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3879 unlock_user_struct(target_sd, target_addr, 1);
3880 return 0;
3883 struct target_shminfo {
3884 abi_ulong shmmax;
3885 abi_ulong shmmin;
3886 abi_ulong shmmni;
3887 abi_ulong shmseg;
3888 abi_ulong shmall;
3891 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3892 struct shminfo *host_shminfo)
3894 struct target_shminfo *target_shminfo;
3895 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3896 return -TARGET_EFAULT;
3897 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3898 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3899 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3900 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3901 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3902 unlock_user_struct(target_shminfo, target_addr, 1);
3903 return 0;
3906 struct target_shm_info {
3907 int used_ids;
3908 abi_ulong shm_tot;
3909 abi_ulong shm_rss;
3910 abi_ulong shm_swp;
3911 abi_ulong swap_attempts;
3912 abi_ulong swap_successes;
3915 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3916 struct shm_info *host_shm_info)
3918 struct target_shm_info *target_shm_info;
3919 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3920 return -TARGET_EFAULT;
3921 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3922 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3923 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3924 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3925 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3926 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3927 unlock_user_struct(target_shm_info, target_addr, 1);
3928 return 0;
3931 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3933 struct shmid_ds dsarg;
3934 struct shminfo shminfo;
3935 struct shm_info shm_info;
3936 abi_long ret = -TARGET_EINVAL;
3938 cmd &= 0xff;
3940 switch(cmd) {
3941 case IPC_STAT:
3942 case IPC_SET:
3943 case SHM_STAT:
3944 if (target_to_host_shmid_ds(&dsarg, buf))
3945 return -TARGET_EFAULT;
3946 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3947 if (host_to_target_shmid_ds(buf, &dsarg))
3948 return -TARGET_EFAULT;
3949 break;
3950 case IPC_INFO:
3951 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3952 if (host_to_target_shminfo(buf, &shminfo))
3953 return -TARGET_EFAULT;
3954 break;
3955 case SHM_INFO:
3956 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3957 if (host_to_target_shm_info(buf, &shm_info))
3958 return -TARGET_EFAULT;
3959 break;
3960 case IPC_RMID:
3961 case SHM_LOCK:
3962 case SHM_UNLOCK:
3963 ret = get_errno(shmctl(shmid, cmd, NULL));
3964 break;
3967 return ret;
3970 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3972 abi_long raddr;
3973 void *host_raddr;
3974 struct shmid_ds shm_info;
3975 int i,ret;
3977 /* find out the length of the shared memory segment */
3978 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3979 if (is_error(ret)) {
3980 /* can't get length, bail out */
3981 return ret;
3984 mmap_lock();
3986 if (shmaddr)
3987 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3988 else {
3989 abi_ulong mmap_start;
3991 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3993 if (mmap_start == -1) {
3994 errno = ENOMEM;
3995 host_raddr = (void *)-1;
3996 } else
3997 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4000 if (host_raddr == (void *)-1) {
4001 mmap_unlock();
4002 return get_errno((long)host_raddr);
4004 raddr=h2g((unsigned long)host_raddr);
4006 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4007 PAGE_VALID | PAGE_READ |
4008 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4010 for (i = 0; i < N_SHM_REGIONS; i++) {
4011 if (!shm_regions[i].in_use) {
4012 shm_regions[i].in_use = true;
4013 shm_regions[i].start = raddr;
4014 shm_regions[i].size = shm_info.shm_segsz;
4015 break;
4019 mmap_unlock();
4020 return raddr;
4024 static inline abi_long do_shmdt(abi_ulong shmaddr)
4026 int i;
4028 for (i = 0; i < N_SHM_REGIONS; ++i) {
4029 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4030 shm_regions[i].in_use = false;
4031 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4032 break;
4036 return get_errno(shmdt(g2h(shmaddr)));
4039 #ifdef TARGET_NR_ipc
4040 /* ??? This only works with linear mappings. */
4041 /* do_ipc() must return target values and target errnos. */
4042 static abi_long do_ipc(unsigned int call, abi_long first,
4043 abi_long second, abi_long third,
4044 abi_long ptr, abi_long fifth)
4046 int version;
4047 abi_long ret = 0;
4049 version = call >> 16;
4050 call &= 0xffff;
4052 switch (call) {
4053 case IPCOP_semop:
4054 ret = do_semop(first, ptr, second);
4055 break;
4057 case IPCOP_semget:
4058 ret = get_errno(semget(first, second, third));
4059 break;
4061 case IPCOP_semctl: {
4062 /* The semun argument to semctl is passed by value, so dereference the
4063 * ptr argument. */
4064 abi_ulong atptr;
4065 get_user_ual(atptr, ptr);
4066 ret = do_semctl(first, second, third, atptr);
4067 break;
4070 case IPCOP_msgget:
4071 ret = get_errno(msgget(first, second));
4072 break;
4074 case IPCOP_msgsnd:
4075 ret = do_msgsnd(first, ptr, second, third);
4076 break;
4078 case IPCOP_msgctl:
4079 ret = do_msgctl(first, second, ptr);
4080 break;
4082 case IPCOP_msgrcv:
4083 switch (version) {
4084 case 0:
4086 struct target_ipc_kludge {
4087 abi_long msgp;
4088 abi_long msgtyp;
4089 } *tmp;
4091 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4092 ret = -TARGET_EFAULT;
4093 break;
4096 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4098 unlock_user_struct(tmp, ptr, 0);
4099 break;
4101 default:
4102 ret = do_msgrcv(first, ptr, second, fifth, third);
4104 break;
4106 case IPCOP_shmat:
4107 switch (version) {
4108 default:
4110 abi_ulong raddr;
4111 raddr = do_shmat(first, ptr, second);
4112 if (is_error(raddr))
4113 return get_errno(raddr);
4114 if (put_user_ual(raddr, third))
4115 return -TARGET_EFAULT;
4116 break;
4118 case 1:
4119 ret = -TARGET_EINVAL;
4120 break;
4122 break;
4123 case IPCOP_shmdt:
4124 ret = do_shmdt(ptr);
4125 break;
4127 case IPCOP_shmget:
4128 /* IPC_* flag values are the same on all linux platforms */
4129 ret = get_errno(shmget(first, second, third));
4130 break;
4132 /* IPC_* and SHM_* command values are the same on all linux platforms */
4133 case IPCOP_shmctl:
4134 ret = do_shmctl(first, second, ptr);
4135 break;
4136 default:
4137 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4138 ret = -TARGET_ENOSYS;
4139 break;
4141 return ret;
4143 #endif
4145 /* kernel structure types definitions */
4147 #define STRUCT(name, ...) STRUCT_ ## name,
4148 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4149 enum {
4150 #include "syscall_types.h"
4151 STRUCT_MAX
4153 #undef STRUCT
4154 #undef STRUCT_SPECIAL
4156 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4157 #define STRUCT_SPECIAL(name)
4158 #include "syscall_types.h"
4159 #undef STRUCT
4160 #undef STRUCT_SPECIAL
4162 typedef struct IOCTLEntry IOCTLEntry;
4164 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4165 int fd, int cmd, abi_long arg);
4167 struct IOCTLEntry {
4168 int target_cmd;
4169 unsigned int host_cmd;
4170 const char *name;
4171 int access;
4172 do_ioctl_fn *do_ioctl;
4173 const argtype arg_type[5];
4176 #define IOC_R 0x0001
4177 #define IOC_W 0x0002
4178 #define IOC_RW (IOC_R | IOC_W)
4180 #define MAX_STRUCT_SIZE 4096
4182 #ifdef CONFIG_FIEMAP
4183 /* So fiemap access checks don't overflow on 32 bit systems.
4184 * This is very slightly smaller than the limit imposed by
4185 * the underlying kernel.
4187 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4188 / sizeof(struct fiemap_extent))
4190 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4191 int fd, int cmd, abi_long arg)
4193 /* The parameter for this ioctl is a struct fiemap followed
4194 * by an array of struct fiemap_extent whose size is set
4195 * in fiemap->fm_extent_count. The array is filled in by the
4196 * ioctl.
4198 int target_size_in, target_size_out;
4199 struct fiemap *fm;
4200 const argtype *arg_type = ie->arg_type;
4201 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4202 void *argptr, *p;
4203 abi_long ret;
4204 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4205 uint32_t outbufsz;
4206 int free_fm = 0;
4208 assert(arg_type[0] == TYPE_PTR);
4209 assert(ie->access == IOC_RW);
4210 arg_type++;
4211 target_size_in = thunk_type_size(arg_type, 0);
4212 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4213 if (!argptr) {
4214 return -TARGET_EFAULT;
4216 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4217 unlock_user(argptr, arg, 0);
4218 fm = (struct fiemap *)buf_temp;
4219 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4220 return -TARGET_EINVAL;
4223 outbufsz = sizeof (*fm) +
4224 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4226 if (outbufsz > MAX_STRUCT_SIZE) {
4227 /* We can't fit all the extents into the fixed size buffer.
4228 * Allocate one that is large enough and use it instead.
4230 fm = g_try_malloc(outbufsz);
4231 if (!fm) {
4232 return -TARGET_ENOMEM;
4234 memcpy(fm, buf_temp, sizeof(struct fiemap));
4235 free_fm = 1;
4237 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
4238 if (!is_error(ret)) {
4239 target_size_out = target_size_in;
4240 /* An extent_count of 0 means we were only counting the extents
4241 * so there are no structs to copy
4243 if (fm->fm_extent_count != 0) {
4244 target_size_out += fm->fm_mapped_extents * extent_size;
4246 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4247 if (!argptr) {
4248 ret = -TARGET_EFAULT;
4249 } else {
4250 /* Convert the struct fiemap */
4251 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4252 if (fm->fm_extent_count != 0) {
4253 p = argptr + target_size_in;
4254 /* ...and then all the struct fiemap_extents */
4255 for (i = 0; i < fm->fm_mapped_extents; i++) {
4256 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4257 THUNK_TARGET);
4258 p += extent_size;
4261 unlock_user(argptr, arg, target_size_out);
4264 if (free_fm) {
4265 g_free(fm);
4267 return ret;
4269 #endif
4271 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4272 int fd, int cmd, abi_long arg)
4274 const argtype *arg_type = ie->arg_type;
4275 int target_size;
4276 void *argptr;
4277 int ret;
4278 struct ifconf *host_ifconf;
4279 uint32_t outbufsz;
4280 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4281 int target_ifreq_size;
4282 int nb_ifreq;
4283 int free_buf = 0;
4284 int i;
4285 int target_ifc_len;
4286 abi_long target_ifc_buf;
4287 int host_ifc_len;
4288 char *host_ifc_buf;
4290 assert(arg_type[0] == TYPE_PTR);
4291 assert(ie->access == IOC_RW);
4293 arg_type++;
4294 target_size = thunk_type_size(arg_type, 0);
4296 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4297 if (!argptr)
4298 return -TARGET_EFAULT;
4299 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4300 unlock_user(argptr, arg, 0);
4302 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4303 target_ifc_len = host_ifconf->ifc_len;
4304 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4306 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4307 nb_ifreq = target_ifc_len / target_ifreq_size;
4308 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4310 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4311 if (outbufsz > MAX_STRUCT_SIZE) {
4312 /* We can't fit all the extents into the fixed size buffer.
4313 * Allocate one that is large enough and use it instead.
4315 host_ifconf = malloc(outbufsz);
4316 if (!host_ifconf) {
4317 return -TARGET_ENOMEM;
4319 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4320 free_buf = 1;
4322 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4324 host_ifconf->ifc_len = host_ifc_len;
4325 host_ifconf->ifc_buf = host_ifc_buf;
4327 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
4328 if (!is_error(ret)) {
4329 /* convert host ifc_len to target ifc_len */
4331 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4332 target_ifc_len = nb_ifreq * target_ifreq_size;
4333 host_ifconf->ifc_len = target_ifc_len;
4335 /* restore target ifc_buf */
4337 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4339 /* copy struct ifconf to target user */
4341 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4342 if (!argptr)
4343 return -TARGET_EFAULT;
4344 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4345 unlock_user(argptr, arg, target_size);
4347 /* copy ifreq[] to target user */
4349 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4350 for (i = 0; i < nb_ifreq ; i++) {
4351 thunk_convert(argptr + i * target_ifreq_size,
4352 host_ifc_buf + i * sizeof(struct ifreq),
4353 ifreq_arg_type, THUNK_TARGET);
4355 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4358 if (free_buf) {
4359 free(host_ifconf);
4362 return ret;
4365 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4366 int cmd, abi_long arg)
4368 void *argptr;
4369 struct dm_ioctl *host_dm;
4370 abi_long guest_data;
4371 uint32_t guest_data_size;
4372 int target_size;
4373 const argtype *arg_type = ie->arg_type;
4374 abi_long ret;
4375 void *big_buf = NULL;
4376 char *host_data;
4378 arg_type++;
4379 target_size = thunk_type_size(arg_type, 0);
4380 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4381 if (!argptr) {
4382 ret = -TARGET_EFAULT;
4383 goto out;
4385 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4386 unlock_user(argptr, arg, 0);
4388 /* buf_temp is too small, so fetch things into a bigger buffer */
4389 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4390 memcpy(big_buf, buf_temp, target_size);
4391 buf_temp = big_buf;
4392 host_dm = big_buf;
4394 guest_data = arg + host_dm->data_start;
4395 if ((guest_data - arg) < 0) {
4396 ret = -EINVAL;
4397 goto out;
4399 guest_data_size = host_dm->data_size - host_dm->data_start;
4400 host_data = (char*)host_dm + host_dm->data_start;
4402 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4403 switch (ie->host_cmd) {
4404 case DM_REMOVE_ALL:
4405 case DM_LIST_DEVICES:
4406 case DM_DEV_CREATE:
4407 case DM_DEV_REMOVE:
4408 case DM_DEV_SUSPEND:
4409 case DM_DEV_STATUS:
4410 case DM_DEV_WAIT:
4411 case DM_TABLE_STATUS:
4412 case DM_TABLE_CLEAR:
4413 case DM_TABLE_DEPS:
4414 case DM_LIST_VERSIONS:
4415 /* no input data */
4416 break;
4417 case DM_DEV_RENAME:
4418 case DM_DEV_SET_GEOMETRY:
4419 /* data contains only strings */
4420 memcpy(host_data, argptr, guest_data_size);
4421 break;
4422 case DM_TARGET_MSG:
4423 memcpy(host_data, argptr, guest_data_size);
4424 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4425 break;
4426 case DM_TABLE_LOAD:
4428 void *gspec = argptr;
4429 void *cur_data = host_data;
4430 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4431 int spec_size = thunk_type_size(arg_type, 0);
4432 int i;
4434 for (i = 0; i < host_dm->target_count; i++) {
4435 struct dm_target_spec *spec = cur_data;
4436 uint32_t next;
4437 int slen;
4439 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4440 slen = strlen((char*)gspec + spec_size) + 1;
4441 next = spec->next;
4442 spec->next = sizeof(*spec) + slen;
4443 strcpy((char*)&spec[1], gspec + spec_size);
4444 gspec += next;
4445 cur_data += spec->next;
4447 break;
4449 default:
4450 ret = -TARGET_EINVAL;
4451 unlock_user(argptr, guest_data, 0);
4452 goto out;
4454 unlock_user(argptr, guest_data, 0);
4456 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4457 if (!is_error(ret)) {
4458 guest_data = arg + host_dm->data_start;
4459 guest_data_size = host_dm->data_size - host_dm->data_start;
4460 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4461 switch (ie->host_cmd) {
4462 case DM_REMOVE_ALL:
4463 case DM_DEV_CREATE:
4464 case DM_DEV_REMOVE:
4465 case DM_DEV_RENAME:
4466 case DM_DEV_SUSPEND:
4467 case DM_DEV_STATUS:
4468 case DM_TABLE_LOAD:
4469 case DM_TABLE_CLEAR:
4470 case DM_TARGET_MSG:
4471 case DM_DEV_SET_GEOMETRY:
4472 /* no return data */
4473 break;
4474 case DM_LIST_DEVICES:
4476 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4477 uint32_t remaining_data = guest_data_size;
4478 void *cur_data = argptr;
4479 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4480 int nl_size = 12; /* can't use thunk_size due to alignment */
4482 while (1) {
4483 uint32_t next = nl->next;
4484 if (next) {
4485 nl->next = nl_size + (strlen(nl->name) + 1);
4487 if (remaining_data < nl->next) {
4488 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4489 break;
4491 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4492 strcpy(cur_data + nl_size, nl->name);
4493 cur_data += nl->next;
4494 remaining_data -= nl->next;
4495 if (!next) {
4496 break;
4498 nl = (void*)nl + next;
4500 break;
4502 case DM_DEV_WAIT:
4503 case DM_TABLE_STATUS:
4505 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4506 void *cur_data = argptr;
4507 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4508 int spec_size = thunk_type_size(arg_type, 0);
4509 int i;
4511 for (i = 0; i < host_dm->target_count; i++) {
4512 uint32_t next = spec->next;
4513 int slen = strlen((char*)&spec[1]) + 1;
4514 spec->next = (cur_data - argptr) + spec_size + slen;
4515 if (guest_data_size < spec->next) {
4516 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4517 break;
4519 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4520 strcpy(cur_data + spec_size, (char*)&spec[1]);
4521 cur_data = argptr + spec->next;
4522 spec = (void*)host_dm + host_dm->data_start + next;
4524 break;
4526 case DM_TABLE_DEPS:
4528 void *hdata = (void*)host_dm + host_dm->data_start;
4529 int count = *(uint32_t*)hdata;
4530 uint64_t *hdev = hdata + 8;
4531 uint64_t *gdev = argptr + 8;
4532 int i;
4534 *(uint32_t*)argptr = tswap32(count);
4535 for (i = 0; i < count; i++) {
4536 *gdev = tswap64(*hdev);
4537 gdev++;
4538 hdev++;
4540 break;
4542 case DM_LIST_VERSIONS:
4544 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4545 uint32_t remaining_data = guest_data_size;
4546 void *cur_data = argptr;
4547 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4548 int vers_size = thunk_type_size(arg_type, 0);
4550 while (1) {
4551 uint32_t next = vers->next;
4552 if (next) {
4553 vers->next = vers_size + (strlen(vers->name) + 1);
4555 if (remaining_data < vers->next) {
4556 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4557 break;
4559 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4560 strcpy(cur_data + vers_size, vers->name);
4561 cur_data += vers->next;
4562 remaining_data -= vers->next;
4563 if (!next) {
4564 break;
4566 vers = (void*)vers + next;
4568 break;
4570 default:
4571 unlock_user(argptr, guest_data, 0);
4572 ret = -TARGET_EINVAL;
4573 goto out;
4575 unlock_user(argptr, guest_data, guest_data_size);
4577 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4578 if (!argptr) {
4579 ret = -TARGET_EFAULT;
4580 goto out;
4582 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4583 unlock_user(argptr, arg, target_size);
4585 out:
4586 g_free(big_buf);
4587 return ret;
4590 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4591 int cmd, abi_long arg)
4593 void *argptr;
4594 int target_size;
4595 const argtype *arg_type = ie->arg_type;
4596 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4597 abi_long ret;
4599 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4600 struct blkpg_partition host_part;
4602 /* Read and convert blkpg */
4603 arg_type++;
4604 target_size = thunk_type_size(arg_type, 0);
4605 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4606 if (!argptr) {
4607 ret = -TARGET_EFAULT;
4608 goto out;
4610 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4611 unlock_user(argptr, arg, 0);
4613 switch (host_blkpg->op) {
4614 case BLKPG_ADD_PARTITION:
4615 case BLKPG_DEL_PARTITION:
4616 /* payload is struct blkpg_partition */
4617 break;
4618 default:
4619 /* Unknown opcode */
4620 ret = -TARGET_EINVAL;
4621 goto out;
4624 /* Read and convert blkpg->data */
4625 arg = (abi_long)(uintptr_t)host_blkpg->data;
4626 target_size = thunk_type_size(part_arg_type, 0);
4627 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4628 if (!argptr) {
4629 ret = -TARGET_EFAULT;
4630 goto out;
4632 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4633 unlock_user(argptr, arg, 0);
4635 /* Swizzle the data pointer to our local copy and call! */
4636 host_blkpg->data = &host_part;
4637 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
4639 out:
4640 return ret;
4643 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4644 int fd, int cmd, abi_long arg)
4646 const argtype *arg_type = ie->arg_type;
4647 const StructEntry *se;
4648 const argtype *field_types;
4649 const int *dst_offsets, *src_offsets;
4650 int target_size;
4651 void *argptr;
4652 abi_ulong *target_rt_dev_ptr;
4653 unsigned long *host_rt_dev_ptr;
4654 abi_long ret;
4655 int i;
4657 assert(ie->access == IOC_W);
4658 assert(*arg_type == TYPE_PTR);
4659 arg_type++;
4660 assert(*arg_type == TYPE_STRUCT);
4661 target_size = thunk_type_size(arg_type, 0);
4662 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4663 if (!argptr) {
4664 return -TARGET_EFAULT;
4666 arg_type++;
4667 assert(*arg_type == (int)STRUCT_rtentry);
4668 se = struct_entries + *arg_type++;
4669 assert(se->convert[0] == NULL);
4670 /* convert struct here to be able to catch rt_dev string */
4671 field_types = se->field_types;
4672 dst_offsets = se->field_offsets[THUNK_HOST];
4673 src_offsets = se->field_offsets[THUNK_TARGET];
4674 for (i = 0; i < se->nb_fields; i++) {
4675 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4676 assert(*field_types == TYPE_PTRVOID);
4677 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4678 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4679 if (*target_rt_dev_ptr != 0) {
4680 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4681 tswapal(*target_rt_dev_ptr));
4682 if (!*host_rt_dev_ptr) {
4683 unlock_user(argptr, arg, 0);
4684 return -TARGET_EFAULT;
4686 } else {
4687 *host_rt_dev_ptr = 0;
4689 field_types++;
4690 continue;
4692 field_types = thunk_convert(buf_temp + dst_offsets[i],
4693 argptr + src_offsets[i],
4694 field_types, THUNK_HOST);
4696 unlock_user(argptr, arg, 0);
4698 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4699 if (*host_rt_dev_ptr != 0) {
4700 unlock_user((void *)*host_rt_dev_ptr,
4701 *target_rt_dev_ptr, 0);
4703 return ret;
4706 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4707 int fd, int cmd, abi_long arg)
4709 int sig = target_to_host_signal(arg);
4710 return get_errno(ioctl(fd, ie->host_cmd, sig));
4713 static IOCTLEntry ioctl_entries[] = {
4714 #define IOCTL(cmd, access, ...) \
4715 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4716 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4717 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4718 #include "ioctls.h"
4719 { 0, 0, },
4722 /* ??? Implement proper locking for ioctls. */
4723 /* do_ioctl() Must return target values and target errnos. */
4724 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4726 const IOCTLEntry *ie;
4727 const argtype *arg_type;
4728 abi_long ret;
4729 uint8_t buf_temp[MAX_STRUCT_SIZE];
4730 int target_size;
4731 void *argptr;
4733 ie = ioctl_entries;
4734 for(;;) {
4735 if (ie->target_cmd == 0) {
4736 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4737 return -TARGET_ENOSYS;
4739 if (ie->target_cmd == cmd)
4740 break;
4741 ie++;
4743 arg_type = ie->arg_type;
4744 #if defined(DEBUG)
4745 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4746 #endif
4747 if (ie->do_ioctl) {
4748 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4751 switch(arg_type[0]) {
4752 case TYPE_NULL:
4753 /* no argument */
4754 ret = get_errno(ioctl(fd, ie->host_cmd));
4755 break;
4756 case TYPE_PTRVOID:
4757 case TYPE_INT:
4758 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4759 break;
4760 case TYPE_PTR:
4761 arg_type++;
4762 target_size = thunk_type_size(arg_type, 0);
4763 switch(ie->access) {
4764 case IOC_R:
4765 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4766 if (!is_error(ret)) {
4767 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4768 if (!argptr)
4769 return -TARGET_EFAULT;
4770 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4771 unlock_user(argptr, arg, target_size);
4773 break;
4774 case IOC_W:
4775 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4776 if (!argptr)
4777 return -TARGET_EFAULT;
4778 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4779 unlock_user(argptr, arg, 0);
4780 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4781 break;
4782 default:
4783 case IOC_RW:
4784 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4785 if (!argptr)
4786 return -TARGET_EFAULT;
4787 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4788 unlock_user(argptr, arg, 0);
4789 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4790 if (!is_error(ret)) {
4791 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4792 if (!argptr)
4793 return -TARGET_EFAULT;
4794 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4795 unlock_user(argptr, arg, target_size);
4797 break;
4799 break;
4800 default:
4801 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4802 (long)cmd, arg_type[0]);
4803 ret = -TARGET_ENOSYS;
4804 break;
4806 return ret;
4809 static const bitmask_transtbl iflag_tbl[] = {
4810 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4811 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4812 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4813 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4814 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4815 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4816 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4817 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4818 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4819 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4820 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4821 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4822 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4823 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4824 { 0, 0, 0, 0 }
4827 static const bitmask_transtbl oflag_tbl[] = {
4828 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4829 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4830 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4831 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4832 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4833 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4834 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4835 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4836 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4837 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4838 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4839 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4840 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4841 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4842 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4843 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4844 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4845 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4846 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4847 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4848 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4849 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4850 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4851 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4852 { 0, 0, 0, 0 }
4855 static const bitmask_transtbl cflag_tbl[] = {
4856 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4857 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4858 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4859 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4860 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4861 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4862 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4863 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4864 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4865 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4866 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4867 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4868 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4869 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4870 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4871 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4872 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4873 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4874 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4875 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4876 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4877 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4878 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4879 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4880 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4881 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4882 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4883 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4884 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4885 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4886 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4887 { 0, 0, 0, 0 }
4890 static const bitmask_transtbl lflag_tbl[] = {
4891 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4892 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4893 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4894 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4895 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4896 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4897 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4898 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4899 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4900 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4901 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4902 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4903 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4904 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4905 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4906 { 0, 0, 0, 0 }
4909 static void target_to_host_termios (void *dst, const void *src)
4911 struct host_termios *host = dst;
4912 const struct target_termios *target = src;
4914 host->c_iflag =
4915 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4916 host->c_oflag =
4917 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4918 host->c_cflag =
4919 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4920 host->c_lflag =
4921 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4922 host->c_line = target->c_line;
4924 memset(host->c_cc, 0, sizeof(host->c_cc));
4925 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4926 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4927 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4928 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4929 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4930 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4931 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4932 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4933 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4934 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4935 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4936 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4937 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4938 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4939 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4940 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4941 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4944 static void host_to_target_termios (void *dst, const void *src)
4946 struct target_termios *target = dst;
4947 const struct host_termios *host = src;
4949 target->c_iflag =
4950 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4951 target->c_oflag =
4952 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4953 target->c_cflag =
4954 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4955 target->c_lflag =
4956 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4957 target->c_line = host->c_line;
4959 memset(target->c_cc, 0, sizeof(target->c_cc));
4960 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4961 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4962 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4963 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4964 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4965 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4966 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4967 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4968 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4969 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4970 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4971 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4972 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4973 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4974 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4975 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4976 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4979 static const StructEntry struct_termios_def = {
4980 .convert = { host_to_target_termios, target_to_host_termios },
4981 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4982 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4985 static bitmask_transtbl mmap_flags_tbl[] = {
4986 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4987 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4988 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4989 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4990 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4991 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4992 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4993 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4994 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4995 MAP_NORESERVE },
4996 { 0, 0, 0, 0 }
4999 #if defined(TARGET_I386)
5001 /* NOTE: there is really one LDT for all the threads */
5002 static uint8_t *ldt_table;
5004 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5006 int size;
5007 void *p;
5009 if (!ldt_table)
5010 return 0;
5011 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5012 if (size > bytecount)
5013 size = bytecount;
5014 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5015 if (!p)
5016 return -TARGET_EFAULT;
5017 /* ??? Should this by byteswapped? */
5018 memcpy(p, ldt_table, size);
5019 unlock_user(p, ptr, size);
5020 return size;
5023 /* XXX: add locking support */
5024 static abi_long write_ldt(CPUX86State *env,
5025 abi_ulong ptr, unsigned long bytecount, int oldmode)
5027 struct target_modify_ldt_ldt_s ldt_info;
5028 struct target_modify_ldt_ldt_s *target_ldt_info;
5029 int seg_32bit, contents, read_exec_only, limit_in_pages;
5030 int seg_not_present, useable, lm;
5031 uint32_t *lp, entry_1, entry_2;
5033 if (bytecount != sizeof(ldt_info))
5034 return -TARGET_EINVAL;
5035 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5036 return -TARGET_EFAULT;
5037 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5038 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5039 ldt_info.limit = tswap32(target_ldt_info->limit);
5040 ldt_info.flags = tswap32(target_ldt_info->flags);
5041 unlock_user_struct(target_ldt_info, ptr, 0);
5043 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5044 return -TARGET_EINVAL;
5045 seg_32bit = ldt_info.flags & 1;
5046 contents = (ldt_info.flags >> 1) & 3;
5047 read_exec_only = (ldt_info.flags >> 3) & 1;
5048 limit_in_pages = (ldt_info.flags >> 4) & 1;
5049 seg_not_present = (ldt_info.flags >> 5) & 1;
5050 useable = (ldt_info.flags >> 6) & 1;
5051 #ifdef TARGET_ABI32
5052 lm = 0;
5053 #else
5054 lm = (ldt_info.flags >> 7) & 1;
5055 #endif
5056 if (contents == 3) {
5057 if (oldmode)
5058 return -TARGET_EINVAL;
5059 if (seg_not_present == 0)
5060 return -TARGET_EINVAL;
5062 /* allocate the LDT */
5063 if (!ldt_table) {
5064 env->ldt.base = target_mmap(0,
5065 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5066 PROT_READ|PROT_WRITE,
5067 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5068 if (env->ldt.base == -1)
5069 return -TARGET_ENOMEM;
5070 memset(g2h(env->ldt.base), 0,
5071 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5072 env->ldt.limit = 0xffff;
5073 ldt_table = g2h(env->ldt.base);
5076 /* NOTE: same code as Linux kernel */
5077 /* Allow LDTs to be cleared by the user. */
5078 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5079 if (oldmode ||
5080 (contents == 0 &&
5081 read_exec_only == 1 &&
5082 seg_32bit == 0 &&
5083 limit_in_pages == 0 &&
5084 seg_not_present == 1 &&
5085 useable == 0 )) {
5086 entry_1 = 0;
5087 entry_2 = 0;
5088 goto install;
5092 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5093 (ldt_info.limit & 0x0ffff);
5094 entry_2 = (ldt_info.base_addr & 0xff000000) |
5095 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5096 (ldt_info.limit & 0xf0000) |
5097 ((read_exec_only ^ 1) << 9) |
5098 (contents << 10) |
5099 ((seg_not_present ^ 1) << 15) |
5100 (seg_32bit << 22) |
5101 (limit_in_pages << 23) |
5102 (lm << 21) |
5103 0x7000;
5104 if (!oldmode)
5105 entry_2 |= (useable << 20);
5107 /* Install the new entry ... */
5108 install:
5109 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5110 lp[0] = tswap32(entry_1);
5111 lp[1] = tswap32(entry_2);
5112 return 0;
5115 /* specific and weird i386 syscalls */
5116 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5117 unsigned long bytecount)
5119 abi_long ret;
5121 switch (func) {
5122 case 0:
5123 ret = read_ldt(ptr, bytecount);
5124 break;
5125 case 1:
5126 ret = write_ldt(env, ptr, bytecount, 1);
5127 break;
5128 case 0x11:
5129 ret = write_ldt(env, ptr, bytecount, 0);
5130 break;
5131 default:
5132 ret = -TARGET_ENOSYS;
5133 break;
5135 return ret;
5138 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5139 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5141 uint64_t *gdt_table = g2h(env->gdt.base);
5142 struct target_modify_ldt_ldt_s ldt_info;
5143 struct target_modify_ldt_ldt_s *target_ldt_info;
5144 int seg_32bit, contents, read_exec_only, limit_in_pages;
5145 int seg_not_present, useable, lm;
5146 uint32_t *lp, entry_1, entry_2;
5147 int i;
5149 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5150 if (!target_ldt_info)
5151 return -TARGET_EFAULT;
5152 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5153 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5154 ldt_info.limit = tswap32(target_ldt_info->limit);
5155 ldt_info.flags = tswap32(target_ldt_info->flags);
5156 if (ldt_info.entry_number == -1) {
5157 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5158 if (gdt_table[i] == 0) {
5159 ldt_info.entry_number = i;
5160 target_ldt_info->entry_number = tswap32(i);
5161 break;
5165 unlock_user_struct(target_ldt_info, ptr, 1);
5167 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5168 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5169 return -TARGET_EINVAL;
5170 seg_32bit = ldt_info.flags & 1;
5171 contents = (ldt_info.flags >> 1) & 3;
5172 read_exec_only = (ldt_info.flags >> 3) & 1;
5173 limit_in_pages = (ldt_info.flags >> 4) & 1;
5174 seg_not_present = (ldt_info.flags >> 5) & 1;
5175 useable = (ldt_info.flags >> 6) & 1;
5176 #ifdef TARGET_ABI32
5177 lm = 0;
5178 #else
5179 lm = (ldt_info.flags >> 7) & 1;
5180 #endif
5182 if (contents == 3) {
5183 if (seg_not_present == 0)
5184 return -TARGET_EINVAL;
5187 /* NOTE: same code as Linux kernel */
5188 /* Allow LDTs to be cleared by the user. */
5189 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5190 if ((contents == 0 &&
5191 read_exec_only == 1 &&
5192 seg_32bit == 0 &&
5193 limit_in_pages == 0 &&
5194 seg_not_present == 1 &&
5195 useable == 0 )) {
5196 entry_1 = 0;
5197 entry_2 = 0;
5198 goto install;
5202 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5203 (ldt_info.limit & 0x0ffff);
5204 entry_2 = (ldt_info.base_addr & 0xff000000) |
5205 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5206 (ldt_info.limit & 0xf0000) |
5207 ((read_exec_only ^ 1) << 9) |
5208 (contents << 10) |
5209 ((seg_not_present ^ 1) << 15) |
5210 (seg_32bit << 22) |
5211 (limit_in_pages << 23) |
5212 (useable << 20) |
5213 (lm << 21) |
5214 0x7000;
5216 /* Install the new entry ... */
5217 install:
5218 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5219 lp[0] = tswap32(entry_1);
5220 lp[1] = tswap32(entry_2);
5221 return 0;
5224 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5226 struct target_modify_ldt_ldt_s *target_ldt_info;
5227 uint64_t *gdt_table = g2h(env->gdt.base);
5228 uint32_t base_addr, limit, flags;
5229 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5230 int seg_not_present, useable, lm;
5231 uint32_t *lp, entry_1, entry_2;
5233 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5234 if (!target_ldt_info)
5235 return -TARGET_EFAULT;
5236 idx = tswap32(target_ldt_info->entry_number);
5237 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5238 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5239 unlock_user_struct(target_ldt_info, ptr, 1);
5240 return -TARGET_EINVAL;
5242 lp = (uint32_t *)(gdt_table + idx);
5243 entry_1 = tswap32(lp[0]);
5244 entry_2 = tswap32(lp[1]);
5246 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5247 contents = (entry_2 >> 10) & 3;
5248 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5249 seg_32bit = (entry_2 >> 22) & 1;
5250 limit_in_pages = (entry_2 >> 23) & 1;
5251 useable = (entry_2 >> 20) & 1;
5252 #ifdef TARGET_ABI32
5253 lm = 0;
5254 #else
5255 lm = (entry_2 >> 21) & 1;
5256 #endif
5257 flags = (seg_32bit << 0) | (contents << 1) |
5258 (read_exec_only << 3) | (limit_in_pages << 4) |
5259 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5260 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5261 base_addr = (entry_1 >> 16) |
5262 (entry_2 & 0xff000000) |
5263 ((entry_2 & 0xff) << 16);
5264 target_ldt_info->base_addr = tswapal(base_addr);
5265 target_ldt_info->limit = tswap32(limit);
5266 target_ldt_info->flags = tswap32(flags);
5267 unlock_user_struct(target_ldt_info, ptr, 1);
5268 return 0;
5270 #endif /* TARGET_I386 && TARGET_ABI32 */
5272 #ifndef TARGET_ABI32
5273 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5275 abi_long ret = 0;
5276 abi_ulong val;
5277 int idx;
5279 switch(code) {
5280 case TARGET_ARCH_SET_GS:
5281 case TARGET_ARCH_SET_FS:
5282 if (code == TARGET_ARCH_SET_GS)
5283 idx = R_GS;
5284 else
5285 idx = R_FS;
5286 cpu_x86_load_seg(env, idx, 0);
5287 env->segs[idx].base = addr;
5288 break;
5289 case TARGET_ARCH_GET_GS:
5290 case TARGET_ARCH_GET_FS:
5291 if (code == TARGET_ARCH_GET_GS)
5292 idx = R_GS;
5293 else
5294 idx = R_FS;
5295 val = env->segs[idx].base;
5296 if (put_user(val, addr, abi_ulong))
5297 ret = -TARGET_EFAULT;
5298 break;
5299 default:
5300 ret = -TARGET_EINVAL;
5301 break;
5303 return ret;
5305 #endif
5307 #endif /* defined(TARGET_I386) */
5309 #define NEW_STACK_SIZE 0x40000
5312 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5313 typedef struct {
5314 CPUArchState *env;
5315 pthread_mutex_t mutex;
5316 pthread_cond_t cond;
5317 pthread_t thread;
5318 uint32_t tid;
5319 abi_ulong child_tidptr;
5320 abi_ulong parent_tidptr;
5321 sigset_t sigmask;
5322 } new_thread_info;
5324 static void *clone_func(void *arg)
5326 new_thread_info *info = arg;
5327 CPUArchState *env;
5328 CPUState *cpu;
5329 TaskState *ts;
5331 rcu_register_thread();
5332 env = info->env;
5333 cpu = ENV_GET_CPU(env);
5334 thread_cpu = cpu;
5335 ts = (TaskState *)cpu->opaque;
5336 info->tid = gettid();
5337 cpu->host_tid = info->tid;
5338 task_settid(ts);
5339 if (info->child_tidptr)
5340 put_user_u32(info->tid, info->child_tidptr);
5341 if (info->parent_tidptr)
5342 put_user_u32(info->tid, info->parent_tidptr);
5343 /* Enable signals. */
5344 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5345 /* Signal to the parent that we're ready. */
5346 pthread_mutex_lock(&info->mutex);
5347 pthread_cond_broadcast(&info->cond);
5348 pthread_mutex_unlock(&info->mutex);
5349 /* Wait until the parent has finshed initializing the tls state. */
5350 pthread_mutex_lock(&clone_lock);
5351 pthread_mutex_unlock(&clone_lock);
5352 cpu_loop(env);
5353 /* never exits */
5354 return NULL;
5357 /* do_fork() Must return host values and target errnos (unlike most
5358 do_*() functions). */
5359 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5360 abi_ulong parent_tidptr, target_ulong newtls,
5361 abi_ulong child_tidptr)
5363 CPUState *cpu = ENV_GET_CPU(env);
5364 int ret;
5365 TaskState *ts;
5366 CPUState *new_cpu;
5367 CPUArchState *new_env;
5368 unsigned int nptl_flags;
5369 sigset_t sigmask;
5371 /* Emulate vfork() with fork() */
5372 if (flags & CLONE_VFORK)
5373 flags &= ~(CLONE_VFORK | CLONE_VM);
5375 if (flags & CLONE_VM) {
5376 TaskState *parent_ts = (TaskState *)cpu->opaque;
5377 new_thread_info info;
5378 pthread_attr_t attr;
5380 ts = g_new0(TaskState, 1);
5381 init_task_state(ts);
5382 /* we create a new CPU instance. */
5383 new_env = cpu_copy(env);
5384 /* Init regs that differ from the parent. */
5385 cpu_clone_regs(new_env, newsp);
5386 new_cpu = ENV_GET_CPU(new_env);
5387 new_cpu->opaque = ts;
5388 ts->bprm = parent_ts->bprm;
5389 ts->info = parent_ts->info;
5390 nptl_flags = flags;
5391 flags &= ~CLONE_NPTL_FLAGS2;
5393 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5394 ts->child_tidptr = child_tidptr;
5397 if (nptl_flags & CLONE_SETTLS)
5398 cpu_set_tls (new_env, newtls);
5400 /* Grab a mutex so that thread setup appears atomic. */
5401 pthread_mutex_lock(&clone_lock);
5403 memset(&info, 0, sizeof(info));
5404 pthread_mutex_init(&info.mutex, NULL);
5405 pthread_mutex_lock(&info.mutex);
5406 pthread_cond_init(&info.cond, NULL);
5407 info.env = new_env;
5408 if (nptl_flags & CLONE_CHILD_SETTID)
5409 info.child_tidptr = child_tidptr;
5410 if (nptl_flags & CLONE_PARENT_SETTID)
5411 info.parent_tidptr = parent_tidptr;
5413 ret = pthread_attr_init(&attr);
5414 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5415 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5416 /* It is not safe to deliver signals until the child has finished
5417 initializing, so temporarily block all signals. */
5418 sigfillset(&sigmask);
5419 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5421 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5422 /* TODO: Free new CPU state if thread creation failed. */
5424 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5425 pthread_attr_destroy(&attr);
5426 if (ret == 0) {
5427 /* Wait for the child to initialize. */
5428 pthread_cond_wait(&info.cond, &info.mutex);
5429 ret = info.tid;
5430 if (flags & CLONE_PARENT_SETTID)
5431 put_user_u32(ret, parent_tidptr);
5432 } else {
5433 ret = -1;
5435 pthread_mutex_unlock(&info.mutex);
5436 pthread_cond_destroy(&info.cond);
5437 pthread_mutex_destroy(&info.mutex);
5438 pthread_mutex_unlock(&clone_lock);
5439 } else {
5440 /* if no CLONE_VM, we consider it is a fork */
5441 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5442 return -TARGET_EINVAL;
5444 fork_start();
5445 ret = fork();
5446 if (ret == 0) {
5447 /* Child Process. */
5448 rcu_after_fork();
5449 cpu_clone_regs(env, newsp);
5450 fork_end(1);
5451 /* There is a race condition here. The parent process could
5452 theoretically read the TID in the child process before the child
5453 tid is set. This would require using either ptrace
5454 (not implemented) or having *_tidptr to point at a shared memory
5455 mapping. We can't repeat the spinlock hack used above because
5456 the child process gets its own copy of the lock. */
5457 if (flags & CLONE_CHILD_SETTID)
5458 put_user_u32(gettid(), child_tidptr);
5459 if (flags & CLONE_PARENT_SETTID)
5460 put_user_u32(gettid(), parent_tidptr);
5461 ts = (TaskState *)cpu->opaque;
5462 if (flags & CLONE_SETTLS)
5463 cpu_set_tls (env, newtls);
5464 if (flags & CLONE_CHILD_CLEARTID)
5465 ts->child_tidptr = child_tidptr;
5466 } else {
5467 fork_end(0);
5470 return ret;
5473 /* warning : doesn't handle linux specific flags... */
5474 static int target_to_host_fcntl_cmd(int cmd)
5476 switch(cmd) {
5477 case TARGET_F_DUPFD:
5478 case TARGET_F_GETFD:
5479 case TARGET_F_SETFD:
5480 case TARGET_F_GETFL:
5481 case TARGET_F_SETFL:
5482 return cmd;
5483 case TARGET_F_GETLK:
5484 return F_GETLK;
5485 case TARGET_F_SETLK:
5486 return F_SETLK;
5487 case TARGET_F_SETLKW:
5488 return F_SETLKW;
5489 case TARGET_F_GETOWN:
5490 return F_GETOWN;
5491 case TARGET_F_SETOWN:
5492 return F_SETOWN;
5493 case TARGET_F_GETSIG:
5494 return F_GETSIG;
5495 case TARGET_F_SETSIG:
5496 return F_SETSIG;
5497 #if TARGET_ABI_BITS == 32
5498 case TARGET_F_GETLK64:
5499 return F_GETLK64;
5500 case TARGET_F_SETLK64:
5501 return F_SETLK64;
5502 case TARGET_F_SETLKW64:
5503 return F_SETLKW64;
5504 #endif
5505 case TARGET_F_SETLEASE:
5506 return F_SETLEASE;
5507 case TARGET_F_GETLEASE:
5508 return F_GETLEASE;
5509 #ifdef F_DUPFD_CLOEXEC
5510 case TARGET_F_DUPFD_CLOEXEC:
5511 return F_DUPFD_CLOEXEC;
5512 #endif
5513 case TARGET_F_NOTIFY:
5514 return F_NOTIFY;
5515 #ifdef F_GETOWN_EX
5516 case TARGET_F_GETOWN_EX:
5517 return F_GETOWN_EX;
5518 #endif
5519 #ifdef F_SETOWN_EX
5520 case TARGET_F_SETOWN_EX:
5521 return F_SETOWN_EX;
5522 #endif
5523 default:
5524 return -TARGET_EINVAL;
5526 return -TARGET_EINVAL;
5529 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5530 static const bitmask_transtbl flock_tbl[] = {
5531 TRANSTBL_CONVERT(F_RDLCK),
5532 TRANSTBL_CONVERT(F_WRLCK),
5533 TRANSTBL_CONVERT(F_UNLCK),
5534 TRANSTBL_CONVERT(F_EXLCK),
5535 TRANSTBL_CONVERT(F_SHLCK),
5536 { 0, 0, 0, 0 }
5539 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5541 struct flock fl;
5542 struct target_flock *target_fl;
5543 struct flock64 fl64;
5544 struct target_flock64 *target_fl64;
5545 #ifdef F_GETOWN_EX
5546 struct f_owner_ex fox;
5547 struct target_f_owner_ex *target_fox;
5548 #endif
5549 abi_long ret;
5550 int host_cmd = target_to_host_fcntl_cmd(cmd);
5552 if (host_cmd == -TARGET_EINVAL)
5553 return host_cmd;
5555 switch(cmd) {
5556 case TARGET_F_GETLK:
5557 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5558 return -TARGET_EFAULT;
5559 fl.l_type =
5560 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5561 fl.l_whence = tswap16(target_fl->l_whence);
5562 fl.l_start = tswapal(target_fl->l_start);
5563 fl.l_len = tswapal(target_fl->l_len);
5564 fl.l_pid = tswap32(target_fl->l_pid);
5565 unlock_user_struct(target_fl, arg, 0);
5566 ret = get_errno(fcntl(fd, host_cmd, &fl));
5567 if (ret == 0) {
5568 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
5569 return -TARGET_EFAULT;
5570 target_fl->l_type =
5571 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
5572 target_fl->l_whence = tswap16(fl.l_whence);
5573 target_fl->l_start = tswapal(fl.l_start);
5574 target_fl->l_len = tswapal(fl.l_len);
5575 target_fl->l_pid = tswap32(fl.l_pid);
5576 unlock_user_struct(target_fl, arg, 1);
5578 break;
5580 case TARGET_F_SETLK:
5581 case TARGET_F_SETLKW:
5582 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5583 return -TARGET_EFAULT;
5584 fl.l_type =
5585 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5586 fl.l_whence = tswap16(target_fl->l_whence);
5587 fl.l_start = tswapal(target_fl->l_start);
5588 fl.l_len = tswapal(target_fl->l_len);
5589 fl.l_pid = tswap32(target_fl->l_pid);
5590 unlock_user_struct(target_fl, arg, 0);
5591 ret = get_errno(fcntl(fd, host_cmd, &fl));
5592 break;
5594 case TARGET_F_GETLK64:
5595 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5596 return -TARGET_EFAULT;
5597 fl64.l_type =
5598 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5599 fl64.l_whence = tswap16(target_fl64->l_whence);
5600 fl64.l_start = tswap64(target_fl64->l_start);
5601 fl64.l_len = tswap64(target_fl64->l_len);
5602 fl64.l_pid = tswap32(target_fl64->l_pid);
5603 unlock_user_struct(target_fl64, arg, 0);
5604 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5605 if (ret == 0) {
5606 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
5607 return -TARGET_EFAULT;
5608 target_fl64->l_type =
5609 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
5610 target_fl64->l_whence = tswap16(fl64.l_whence);
5611 target_fl64->l_start = tswap64(fl64.l_start);
5612 target_fl64->l_len = tswap64(fl64.l_len);
5613 target_fl64->l_pid = tswap32(fl64.l_pid);
5614 unlock_user_struct(target_fl64, arg, 1);
5616 break;
5617 case TARGET_F_SETLK64:
5618 case TARGET_F_SETLKW64:
5619 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5620 return -TARGET_EFAULT;
5621 fl64.l_type =
5622 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5623 fl64.l_whence = tswap16(target_fl64->l_whence);
5624 fl64.l_start = tswap64(target_fl64->l_start);
5625 fl64.l_len = tswap64(target_fl64->l_len);
5626 fl64.l_pid = tswap32(target_fl64->l_pid);
5627 unlock_user_struct(target_fl64, arg, 0);
5628 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5629 break;
5631 case TARGET_F_GETFL:
5632 ret = get_errno(fcntl(fd, host_cmd, arg));
5633 if (ret >= 0) {
5634 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5636 break;
5638 case TARGET_F_SETFL:
5639 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
5640 break;
5642 #ifdef F_GETOWN_EX
5643 case TARGET_F_GETOWN_EX:
5644 ret = get_errno(fcntl(fd, host_cmd, &fox));
5645 if (ret >= 0) {
5646 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5647 return -TARGET_EFAULT;
5648 target_fox->type = tswap32(fox.type);
5649 target_fox->pid = tswap32(fox.pid);
5650 unlock_user_struct(target_fox, arg, 1);
5652 break;
5653 #endif
5655 #ifdef F_SETOWN_EX
5656 case TARGET_F_SETOWN_EX:
5657 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5658 return -TARGET_EFAULT;
5659 fox.type = tswap32(target_fox->type);
5660 fox.pid = tswap32(target_fox->pid);
5661 unlock_user_struct(target_fox, arg, 0);
5662 ret = get_errno(fcntl(fd, host_cmd, &fox));
5663 break;
5664 #endif
5666 case TARGET_F_SETOWN:
5667 case TARGET_F_GETOWN:
5668 case TARGET_F_SETSIG:
5669 case TARGET_F_GETSIG:
5670 case TARGET_F_SETLEASE:
5671 case TARGET_F_GETLEASE:
5672 ret = get_errno(fcntl(fd, host_cmd, arg));
5673 break;
5675 default:
5676 ret = get_errno(fcntl(fd, cmd, arg));
5677 break;
5679 return ret;
5682 #ifdef USE_UID16
5684 static inline int high2lowuid(int uid)
5686 if (uid > 65535)
5687 return 65534;
5688 else
5689 return uid;
5692 static inline int high2lowgid(int gid)
5694 if (gid > 65535)
5695 return 65534;
5696 else
5697 return gid;
5700 static inline int low2highuid(int uid)
5702 if ((int16_t)uid == -1)
5703 return -1;
5704 else
5705 return uid;
5708 static inline int low2highgid(int gid)
5710 if ((int16_t)gid == -1)
5711 return -1;
5712 else
5713 return gid;
5715 static inline int tswapid(int id)
5717 return tswap16(id);
5720 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5722 #else /* !USE_UID16 */
5723 static inline int high2lowuid(int uid)
5725 return uid;
5727 static inline int high2lowgid(int gid)
5729 return gid;
5731 static inline int low2highuid(int uid)
5733 return uid;
5735 static inline int low2highgid(int gid)
5737 return gid;
5739 static inline int tswapid(int id)
5741 return tswap32(id);
5744 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5746 #endif /* USE_UID16 */
5748 /* We must do direct syscalls for setting UID/GID, because we want to
5749 * implement the Linux system call semantics of "change only for this thread",
5750 * not the libc/POSIX semantics of "change for all threads in process".
5751 * (See http://ewontfix.com/17/ for more details.)
5752 * We use the 32-bit version of the syscalls if present; if it is not
5753 * then either the host architecture supports 32-bit UIDs natively with
5754 * the standard syscall, or the 16-bit UID is the best we can do.
5756 #ifdef __NR_setuid32
5757 #define __NR_sys_setuid __NR_setuid32
5758 #else
5759 #define __NR_sys_setuid __NR_setuid
5760 #endif
5761 #ifdef __NR_setgid32
5762 #define __NR_sys_setgid __NR_setgid32
5763 #else
5764 #define __NR_sys_setgid __NR_setgid
5765 #endif
5766 #ifdef __NR_setresuid32
5767 #define __NR_sys_setresuid __NR_setresuid32
5768 #else
5769 #define __NR_sys_setresuid __NR_setresuid
5770 #endif
5771 #ifdef __NR_setresgid32
5772 #define __NR_sys_setresgid __NR_setresgid32
5773 #else
5774 #define __NR_sys_setresgid __NR_setresgid
5775 #endif
5777 _syscall1(int, sys_setuid, uid_t, uid)
5778 _syscall1(int, sys_setgid, gid_t, gid)
5779 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5780 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5782 void syscall_init(void)
5784 IOCTLEntry *ie;
5785 const argtype *arg_type;
5786 int size;
5787 int i;
5789 thunk_init(STRUCT_MAX);
5791 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5792 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5793 #include "syscall_types.h"
5794 #undef STRUCT
5795 #undef STRUCT_SPECIAL
5797 /* Build target_to_host_errno_table[] table from
5798 * host_to_target_errno_table[]. */
5799 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5800 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5803 /* we patch the ioctl size if necessary. We rely on the fact that
5804 no ioctl has all the bits at '1' in the size field */
5805 ie = ioctl_entries;
5806 while (ie->target_cmd != 0) {
5807 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5808 TARGET_IOC_SIZEMASK) {
5809 arg_type = ie->arg_type;
5810 if (arg_type[0] != TYPE_PTR) {
5811 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5812 ie->target_cmd);
5813 exit(1);
5815 arg_type++;
5816 size = thunk_type_size(arg_type, 0);
5817 ie->target_cmd = (ie->target_cmd &
5818 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5819 (size << TARGET_IOC_SIZESHIFT);
5822 /* automatic consistency check if same arch */
5823 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5824 (defined(__x86_64__) && defined(TARGET_X86_64))
5825 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5826 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5827 ie->name, ie->target_cmd, ie->host_cmd);
5829 #endif
5830 ie++;
5834 #if TARGET_ABI_BITS == 32
5835 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5837 #ifdef TARGET_WORDS_BIGENDIAN
5838 return ((uint64_t)word0 << 32) | word1;
5839 #else
5840 return ((uint64_t)word1 << 32) | word0;
5841 #endif
5843 #else /* TARGET_ABI_BITS == 32 */
5844 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5846 return word0;
5848 #endif /* TARGET_ABI_BITS != 32 */
5850 #ifdef TARGET_NR_truncate64
5851 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5852 abi_long arg2,
5853 abi_long arg3,
5854 abi_long arg4)
5856 if (regpairs_aligned(cpu_env)) {
5857 arg2 = arg3;
5858 arg3 = arg4;
5860 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5862 #endif
5864 #ifdef TARGET_NR_ftruncate64
5865 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5866 abi_long arg2,
5867 abi_long arg3,
5868 abi_long arg4)
5870 if (regpairs_aligned(cpu_env)) {
5871 arg2 = arg3;
5872 arg3 = arg4;
5874 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5876 #endif
5878 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5879 abi_ulong target_addr)
5881 struct target_timespec *target_ts;
5883 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5884 return -TARGET_EFAULT;
5885 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5886 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5887 unlock_user_struct(target_ts, target_addr, 0);
5888 return 0;
5891 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5892 struct timespec *host_ts)
5894 struct target_timespec *target_ts;
5896 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5897 return -TARGET_EFAULT;
5898 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5899 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5900 unlock_user_struct(target_ts, target_addr, 1);
5901 return 0;
5904 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5905 abi_ulong target_addr)
5907 struct target_itimerspec *target_itspec;
5909 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5910 return -TARGET_EFAULT;
5913 host_itspec->it_interval.tv_sec =
5914 tswapal(target_itspec->it_interval.tv_sec);
5915 host_itspec->it_interval.tv_nsec =
5916 tswapal(target_itspec->it_interval.tv_nsec);
5917 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5918 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5920 unlock_user_struct(target_itspec, target_addr, 1);
5921 return 0;
5924 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5925 struct itimerspec *host_its)
5927 struct target_itimerspec *target_itspec;
5929 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5930 return -TARGET_EFAULT;
5933 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5934 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5936 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5937 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5939 unlock_user_struct(target_itspec, target_addr, 0);
5940 return 0;
5943 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5944 abi_ulong target_addr)
5946 struct target_sigevent *target_sevp;
5948 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5949 return -TARGET_EFAULT;
5952 /* This union is awkward on 64 bit systems because it has a 32 bit
5953 * integer and a pointer in it; we follow the conversion approach
5954 * used for handling sigval types in signal.c so the guest should get
5955 * the correct value back even if we did a 64 bit byteswap and it's
5956 * using the 32 bit integer.
5958 host_sevp->sigev_value.sival_ptr =
5959 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5960 host_sevp->sigev_signo =
5961 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5962 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5963 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5965 unlock_user_struct(target_sevp, target_addr, 1);
5966 return 0;
5969 #if defined(TARGET_NR_mlockall)
5970 static inline int target_to_host_mlockall_arg(int arg)
5972 int result = 0;
5974 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5975 result |= MCL_CURRENT;
5977 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5978 result |= MCL_FUTURE;
5980 return result;
5982 #endif
5984 static inline abi_long host_to_target_stat64(void *cpu_env,
5985 abi_ulong target_addr,
5986 struct stat *host_st)
5988 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5989 if (((CPUARMState *)cpu_env)->eabi) {
5990 struct target_eabi_stat64 *target_st;
5992 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5993 return -TARGET_EFAULT;
5994 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5995 __put_user(host_st->st_dev, &target_st->st_dev);
5996 __put_user(host_st->st_ino, &target_st->st_ino);
5997 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5998 __put_user(host_st->st_ino, &target_st->__st_ino);
5999 #endif
6000 __put_user(host_st->st_mode, &target_st->st_mode);
6001 __put_user(host_st->st_nlink, &target_st->st_nlink);
6002 __put_user(host_st->st_uid, &target_st->st_uid);
6003 __put_user(host_st->st_gid, &target_st->st_gid);
6004 __put_user(host_st->st_rdev, &target_st->st_rdev);
6005 __put_user(host_st->st_size, &target_st->st_size);
6006 __put_user(host_st->st_blksize, &target_st->st_blksize);
6007 __put_user(host_st->st_blocks, &target_st->st_blocks);
6008 __put_user(host_st->st_atime, &target_st->target_st_atime);
6009 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6010 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6011 unlock_user_struct(target_st, target_addr, 1);
6012 } else
6013 #endif
6015 #if defined(TARGET_HAS_STRUCT_STAT64)
6016 struct target_stat64 *target_st;
6017 #else
6018 struct target_stat *target_st;
6019 #endif
6021 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6022 return -TARGET_EFAULT;
6023 memset(target_st, 0, sizeof(*target_st));
6024 __put_user(host_st->st_dev, &target_st->st_dev);
6025 __put_user(host_st->st_ino, &target_st->st_ino);
6026 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6027 __put_user(host_st->st_ino, &target_st->__st_ino);
6028 #endif
6029 __put_user(host_st->st_mode, &target_st->st_mode);
6030 __put_user(host_st->st_nlink, &target_st->st_nlink);
6031 __put_user(host_st->st_uid, &target_st->st_uid);
6032 __put_user(host_st->st_gid, &target_st->st_gid);
6033 __put_user(host_st->st_rdev, &target_st->st_rdev);
6034 /* XXX: better use of kernel struct */
6035 __put_user(host_st->st_size, &target_st->st_size);
6036 __put_user(host_st->st_blksize, &target_st->st_blksize);
6037 __put_user(host_st->st_blocks, &target_st->st_blocks);
6038 __put_user(host_st->st_atime, &target_st->target_st_atime);
6039 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6040 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6041 unlock_user_struct(target_st, target_addr, 1);
6044 return 0;
6047 /* ??? Using host futex calls even when target atomic operations
6048 are not really atomic probably breaks things. However implementing
6049 futexes locally would make futexes shared between multiple processes
6050 tricky. However they're probably useless because guest atomic
6051 operations won't work either. */
6052 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6053 target_ulong uaddr2, int val3)
6055 struct timespec ts, *pts;
6056 int base_op;
6058 /* ??? We assume FUTEX_* constants are the same on both host
6059 and target. */
6060 #ifdef FUTEX_CMD_MASK
6061 base_op = op & FUTEX_CMD_MASK;
6062 #else
6063 base_op = op;
6064 #endif
6065 switch (base_op) {
6066 case FUTEX_WAIT:
6067 case FUTEX_WAIT_BITSET:
6068 if (timeout) {
6069 pts = &ts;
6070 target_to_host_timespec(pts, timeout);
6071 } else {
6072 pts = NULL;
6074 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6075 pts, NULL, val3));
6076 case FUTEX_WAKE:
6077 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6078 case FUTEX_FD:
6079 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6080 case FUTEX_REQUEUE:
6081 case FUTEX_CMP_REQUEUE:
6082 case FUTEX_WAKE_OP:
6083 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6084 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6085 But the prototype takes a `struct timespec *'; insert casts
6086 to satisfy the compiler. We do not need to tswap TIMEOUT
6087 since it's not compared to guest memory. */
6088 pts = (struct timespec *)(uintptr_t) timeout;
6089 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6090 g2h(uaddr2),
6091 (base_op == FUTEX_CMP_REQUEUE
6092 ? tswap32(val3)
6093 : val3)));
6094 default:
6095 return -TARGET_ENOSYS;
6098 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6099 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6100 abi_long handle, abi_long mount_id,
6101 abi_long flags)
6103 struct file_handle *target_fh;
6104 struct file_handle *fh;
6105 int mid = 0;
6106 abi_long ret;
6107 char *name;
6108 unsigned int size, total_size;
6110 if (get_user_s32(size, handle)) {
6111 return -TARGET_EFAULT;
6114 name = lock_user_string(pathname);
6115 if (!name) {
6116 return -TARGET_EFAULT;
6119 total_size = sizeof(struct file_handle) + size;
6120 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6121 if (!target_fh) {
6122 unlock_user(name, pathname, 0);
6123 return -TARGET_EFAULT;
6126 fh = g_malloc0(total_size);
6127 fh->handle_bytes = size;
6129 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6130 unlock_user(name, pathname, 0);
6132 /* man name_to_handle_at(2):
6133 * Other than the use of the handle_bytes field, the caller should treat
6134 * the file_handle structure as an opaque data type
6137 memcpy(target_fh, fh, total_size);
6138 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6139 target_fh->handle_type = tswap32(fh->handle_type);
6140 g_free(fh);
6141 unlock_user(target_fh, handle, total_size);
6143 if (put_user_s32(mid, mount_id)) {
6144 return -TARGET_EFAULT;
6147 return ret;
6150 #endif
6152 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6153 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6154 abi_long flags)
6156 struct file_handle *target_fh;
6157 struct file_handle *fh;
6158 unsigned int size, total_size;
6159 abi_long ret;
6161 if (get_user_s32(size, handle)) {
6162 return -TARGET_EFAULT;
6165 total_size = sizeof(struct file_handle) + size;
6166 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6167 if (!target_fh) {
6168 return -TARGET_EFAULT;
6171 fh = g_memdup(target_fh, total_size);
6172 fh->handle_bytes = size;
6173 fh->handle_type = tswap32(target_fh->handle_type);
6175 ret = get_errno(open_by_handle_at(mount_fd, fh,
6176 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6178 g_free(fh);
6180 unlock_user(target_fh, handle, total_size);
6182 return ret;
6184 #endif
6186 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6188 /* signalfd siginfo conversion */
6190 static void
6191 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6192 const struct signalfd_siginfo *info)
6194 int sig = host_to_target_signal(info->ssi_signo);
6196 /* linux/signalfd.h defines a ssi_addr_lsb
6197 * not defined in sys/signalfd.h but used by some kernels
6200 #ifdef BUS_MCEERR_AO
6201 if (tinfo->ssi_signo == SIGBUS &&
6202 (tinfo->ssi_code == BUS_MCEERR_AR ||
6203 tinfo->ssi_code == BUS_MCEERR_AO)) {
6204 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6205 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6206 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6208 #endif
6210 tinfo->ssi_signo = tswap32(sig);
6211 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6212 tinfo->ssi_code = tswap32(info->ssi_code);
6213 tinfo->ssi_pid = tswap32(info->ssi_pid);
6214 tinfo->ssi_uid = tswap32(info->ssi_uid);
6215 tinfo->ssi_fd = tswap32(info->ssi_fd);
6216 tinfo->ssi_tid = tswap32(info->ssi_tid);
6217 tinfo->ssi_band = tswap32(info->ssi_band);
6218 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6219 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6220 tinfo->ssi_status = tswap32(info->ssi_status);
6221 tinfo->ssi_int = tswap32(info->ssi_int);
6222 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6223 tinfo->ssi_utime = tswap64(info->ssi_utime);
6224 tinfo->ssi_stime = tswap64(info->ssi_stime);
6225 tinfo->ssi_addr = tswap64(info->ssi_addr);
6228 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6230 int i;
6232 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6233 host_to_target_signalfd_siginfo(buf + i, buf + i);
6236 return len;
6239 static TargetFdTrans target_signalfd_trans = {
6240 .host_to_target_data = host_to_target_data_signalfd,
6243 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6245 int host_flags;
6246 target_sigset_t *target_mask;
6247 sigset_t host_mask;
6248 abi_long ret;
6250 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6251 return -TARGET_EINVAL;
6253 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6254 return -TARGET_EFAULT;
6257 target_to_host_sigset(&host_mask, target_mask);
6259 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6261 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6262 if (ret >= 0) {
6263 fd_trans_register(ret, &target_signalfd_trans);
6266 unlock_user_struct(target_mask, mask, 0);
6268 return ret;
6270 #endif
6272 /* Map host to target signal numbers for the wait family of syscalls.
6273 Assume all other status bits are the same. */
6274 int host_to_target_waitstatus(int status)
6276 if (WIFSIGNALED(status)) {
6277 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6279 if (WIFSTOPPED(status)) {
6280 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6281 | (status & 0xff);
6283 return status;
6286 static int open_self_cmdline(void *cpu_env, int fd)
6288 int fd_orig = -1;
6289 bool word_skipped = false;
6291 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6292 if (fd_orig < 0) {
6293 return fd_orig;
6296 while (true) {
6297 ssize_t nb_read;
6298 char buf[128];
6299 char *cp_buf = buf;
6301 nb_read = read(fd_orig, buf, sizeof(buf));
6302 if (nb_read < 0) {
6303 int e = errno;
6304 fd_orig = close(fd_orig);
6305 errno = e;
6306 return -1;
6307 } else if (nb_read == 0) {
6308 break;
6311 if (!word_skipped) {
6312 /* Skip the first string, which is the path to qemu-*-static
6313 instead of the actual command. */
6314 cp_buf = memchr(buf, 0, sizeof(buf));
6315 if (cp_buf) {
6316 /* Null byte found, skip one string */
6317 cp_buf++;
6318 nb_read -= cp_buf - buf;
6319 word_skipped = true;
6323 if (word_skipped) {
6324 if (write(fd, cp_buf, nb_read) != nb_read) {
6325 int e = errno;
6326 close(fd_orig);
6327 errno = e;
6328 return -1;
6333 return close(fd_orig);
6336 static int open_self_maps(void *cpu_env, int fd)
6338 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6339 TaskState *ts = cpu->opaque;
6340 FILE *fp;
6341 char *line = NULL;
6342 size_t len = 0;
6343 ssize_t read;
6345 fp = fopen("/proc/self/maps", "r");
6346 if (fp == NULL) {
6347 return -1;
6350 while ((read = getline(&line, &len, fp)) != -1) {
6351 int fields, dev_maj, dev_min, inode;
6352 uint64_t min, max, offset;
6353 char flag_r, flag_w, flag_x, flag_p;
6354 char path[512] = "";
6355 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6356 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6357 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6359 if ((fields < 10) || (fields > 11)) {
6360 continue;
6362 if (h2g_valid(min)) {
6363 int flags = page_get_flags(h2g(min));
6364 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6365 if (page_check_range(h2g(min), max - min, flags) == -1) {
6366 continue;
6368 if (h2g(min) == ts->info->stack_limit) {
6369 pstrcpy(path, sizeof(path), " [stack]");
6371 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6372 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6373 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6374 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6375 path[0] ? " " : "", path);
6379 free(line);
6380 fclose(fp);
6382 return 0;
6385 static int open_self_stat(void *cpu_env, int fd)
6387 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6388 TaskState *ts = cpu->opaque;
6389 abi_ulong start_stack = ts->info->start_stack;
6390 int i;
6392 for (i = 0; i < 44; i++) {
6393 char buf[128];
6394 int len;
6395 uint64_t val = 0;
6397 if (i == 0) {
6398 /* pid */
6399 val = getpid();
6400 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6401 } else if (i == 1) {
6402 /* app name */
6403 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6404 } else if (i == 27) {
6405 /* stack bottom */
6406 val = start_stack;
6407 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6408 } else {
6409 /* for the rest, there is MasterCard */
6410 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6413 len = strlen(buf);
6414 if (write(fd, buf, len) != len) {
6415 return -1;
6419 return 0;
6422 static int open_self_auxv(void *cpu_env, int fd)
6424 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6425 TaskState *ts = cpu->opaque;
6426 abi_ulong auxv = ts->info->saved_auxv;
6427 abi_ulong len = ts->info->auxv_len;
6428 char *ptr;
6431 * Auxiliary vector is stored in target process stack.
6432 * read in whole auxv vector and copy it to file
6434 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6435 if (ptr != NULL) {
6436 while (len > 0) {
6437 ssize_t r;
6438 r = write(fd, ptr, len);
6439 if (r <= 0) {
6440 break;
6442 len -= r;
6443 ptr += r;
6445 lseek(fd, 0, SEEK_SET);
6446 unlock_user(ptr, auxv, len);
6449 return 0;
6452 static int is_proc_myself(const char *filename, const char *entry)
6454 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6455 filename += strlen("/proc/");
6456 if (!strncmp(filename, "self/", strlen("self/"))) {
6457 filename += strlen("self/");
6458 } else if (*filename >= '1' && *filename <= '9') {
6459 char myself[80];
6460 snprintf(myself, sizeof(myself), "%d/", getpid());
6461 if (!strncmp(filename, myself, strlen(myself))) {
6462 filename += strlen(myself);
6463 } else {
6464 return 0;
6466 } else {
6467 return 0;
6469 if (!strcmp(filename, entry)) {
6470 return 1;
6473 return 0;
6476 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6477 static int is_proc(const char *filename, const char *entry)
6479 return strcmp(filename, entry) == 0;
6482 static int open_net_route(void *cpu_env, int fd)
6484 FILE *fp;
6485 char *line = NULL;
6486 size_t len = 0;
6487 ssize_t read;
6489 fp = fopen("/proc/net/route", "r");
6490 if (fp == NULL) {
6491 return -1;
6494 /* read header */
6496 read = getline(&line, &len, fp);
6497 dprintf(fd, "%s", line);
6499 /* read routes */
6501 while ((read = getline(&line, &len, fp)) != -1) {
6502 char iface[16];
6503 uint32_t dest, gw, mask;
6504 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6505 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6506 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6507 &mask, &mtu, &window, &irtt);
6508 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6509 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6510 metric, tswap32(mask), mtu, window, irtt);
6513 free(line);
6514 fclose(fp);
6516 return 0;
6518 #endif
6520 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6522 struct fake_open {
6523 const char *filename;
6524 int (*fill)(void *cpu_env, int fd);
6525 int (*cmp)(const char *s1, const char *s2);
6527 const struct fake_open *fake_open;
6528 static const struct fake_open fakes[] = {
6529 { "maps", open_self_maps, is_proc_myself },
6530 { "stat", open_self_stat, is_proc_myself },
6531 { "auxv", open_self_auxv, is_proc_myself },
6532 { "cmdline", open_self_cmdline, is_proc_myself },
6533 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6534 { "/proc/net/route", open_net_route, is_proc },
6535 #endif
6536 { NULL, NULL, NULL }
6539 if (is_proc_myself(pathname, "exe")) {
6540 int execfd = qemu_getauxval(AT_EXECFD);
6541 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6544 for (fake_open = fakes; fake_open->filename; fake_open++) {
6545 if (fake_open->cmp(pathname, fake_open->filename)) {
6546 break;
6550 if (fake_open->filename) {
6551 const char *tmpdir;
6552 char filename[PATH_MAX];
6553 int fd, r;
6555 /* create temporary file to map stat to */
6556 tmpdir = getenv("TMPDIR");
6557 if (!tmpdir)
6558 tmpdir = "/tmp";
6559 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6560 fd = mkstemp(filename);
6561 if (fd < 0) {
6562 return fd;
6564 unlink(filename);
6566 if ((r = fake_open->fill(cpu_env, fd))) {
6567 int e = errno;
6568 close(fd);
6569 errno = e;
6570 return r;
6572 lseek(fd, 0, SEEK_SET);
6574 return fd;
6577 return safe_openat(dirfd, path(pathname), flags, mode);
6580 #define TIMER_MAGIC 0x0caf0000
6581 #define TIMER_MAGIC_MASK 0xffff0000
6583 /* Convert QEMU provided timer ID back to internal 16bit index format */
6584 static target_timer_t get_timer_id(abi_long arg)
6586 target_timer_t timerid = arg;
6588 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6589 return -TARGET_EINVAL;
6592 timerid &= 0xffff;
6594 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6595 return -TARGET_EINVAL;
6598 return timerid;
6601 /* do_syscall() should always have a single exit point at the end so
6602 that actions, such as logging of syscall results, can be performed.
6603 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6604 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6605 abi_long arg2, abi_long arg3, abi_long arg4,
6606 abi_long arg5, abi_long arg6, abi_long arg7,
6607 abi_long arg8)
6609 CPUState *cpu = ENV_GET_CPU(cpu_env);
6610 abi_long ret;
6611 struct stat st;
6612 struct statfs stfs;
6613 void *p;
6615 #if defined(DEBUG_ERESTARTSYS)
6616 /* Debug-only code for exercising the syscall-restart code paths
6617 * in the per-architecture cpu main loops: restart every syscall
6618 * the guest makes once before letting it through.
6621 static int flag;
6623 flag = !flag;
6624 if (flag) {
6625 return -TARGET_ERESTARTSYS;
6628 #endif
6630 #ifdef DEBUG
6631 gemu_log("syscall %d", num);
6632 #endif
6633 if(do_strace)
6634 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6636 switch(num) {
6637 case TARGET_NR_exit:
6638 /* In old applications this may be used to implement _exit(2).
6639 However in threaded applictions it is used for thread termination,
6640 and _exit_group is used for application termination.
6641 Do thread termination if we have more then one thread. */
6642 /* FIXME: This probably breaks if a signal arrives. We should probably
6643 be disabling signals. */
6644 if (CPU_NEXT(first_cpu)) {
6645 TaskState *ts;
6647 cpu_list_lock();
6648 /* Remove the CPU from the list. */
6649 QTAILQ_REMOVE(&cpus, cpu, node);
6650 cpu_list_unlock();
6651 ts = cpu->opaque;
6652 if (ts->child_tidptr) {
6653 put_user_u32(0, ts->child_tidptr);
6654 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6655 NULL, NULL, 0);
6657 thread_cpu = NULL;
6658 object_unref(OBJECT(cpu));
6659 g_free(ts);
6660 rcu_unregister_thread();
6661 pthread_exit(NULL);
6663 #ifdef TARGET_GPROF
6664 _mcleanup();
6665 #endif
6666 gdb_exit(cpu_env, arg1);
6667 _exit(arg1);
6668 ret = 0; /* avoid warning */
6669 break;
6670 case TARGET_NR_read:
6671 if (arg3 == 0)
6672 ret = 0;
6673 else {
6674 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6675 goto efault;
6676 ret = get_errno(safe_read(arg1, p, arg3));
6677 if (ret >= 0 &&
6678 fd_trans_host_to_target_data(arg1)) {
6679 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6681 unlock_user(p, arg2, ret);
6683 break;
6684 case TARGET_NR_write:
6685 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6686 goto efault;
6687 ret = get_errno(safe_write(arg1, p, arg3));
6688 unlock_user(p, arg2, 0);
6689 break;
6690 #ifdef TARGET_NR_open
6691 case TARGET_NR_open:
6692 if (!(p = lock_user_string(arg1)))
6693 goto efault;
6694 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6695 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6696 arg3));
6697 fd_trans_unregister(ret);
6698 unlock_user(p, arg1, 0);
6699 break;
6700 #endif
6701 case TARGET_NR_openat:
6702 if (!(p = lock_user_string(arg2)))
6703 goto efault;
6704 ret = get_errno(do_openat(cpu_env, arg1, p,
6705 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6706 arg4));
6707 fd_trans_unregister(ret);
6708 unlock_user(p, arg2, 0);
6709 break;
6710 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6711 case TARGET_NR_name_to_handle_at:
6712 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6713 break;
6714 #endif
6715 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6716 case TARGET_NR_open_by_handle_at:
6717 ret = do_open_by_handle_at(arg1, arg2, arg3);
6718 fd_trans_unregister(ret);
6719 break;
6720 #endif
6721 case TARGET_NR_close:
6722 fd_trans_unregister(arg1);
6723 ret = get_errno(close(arg1));
6724 break;
6725 case TARGET_NR_brk:
6726 ret = do_brk(arg1);
6727 break;
6728 #ifdef TARGET_NR_fork
6729 case TARGET_NR_fork:
6730 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6731 break;
6732 #endif
6733 #ifdef TARGET_NR_waitpid
6734 case TARGET_NR_waitpid:
6736 int status;
6737 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6738 if (!is_error(ret) && arg2 && ret
6739 && put_user_s32(host_to_target_waitstatus(status), arg2))
6740 goto efault;
6742 break;
6743 #endif
6744 #ifdef TARGET_NR_waitid
6745 case TARGET_NR_waitid:
6747 siginfo_t info;
6748 info.si_pid = 0;
6749 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6750 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6751 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6752 goto efault;
6753 host_to_target_siginfo(p, &info);
6754 unlock_user(p, arg3, sizeof(target_siginfo_t));
6757 break;
6758 #endif
6759 #ifdef TARGET_NR_creat /* not on alpha */
6760 case TARGET_NR_creat:
6761 if (!(p = lock_user_string(arg1)))
6762 goto efault;
6763 ret = get_errno(creat(p, arg2));
6764 fd_trans_unregister(ret);
6765 unlock_user(p, arg1, 0);
6766 break;
6767 #endif
6768 #ifdef TARGET_NR_link
6769 case TARGET_NR_link:
6771 void * p2;
6772 p = lock_user_string(arg1);
6773 p2 = lock_user_string(arg2);
6774 if (!p || !p2)
6775 ret = -TARGET_EFAULT;
6776 else
6777 ret = get_errno(link(p, p2));
6778 unlock_user(p2, arg2, 0);
6779 unlock_user(p, arg1, 0);
6781 break;
6782 #endif
6783 #if defined(TARGET_NR_linkat)
6784 case TARGET_NR_linkat:
6786 void * p2 = NULL;
6787 if (!arg2 || !arg4)
6788 goto efault;
6789 p = lock_user_string(arg2);
6790 p2 = lock_user_string(arg4);
6791 if (!p || !p2)
6792 ret = -TARGET_EFAULT;
6793 else
6794 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6795 unlock_user(p, arg2, 0);
6796 unlock_user(p2, arg4, 0);
6798 break;
6799 #endif
6800 #ifdef TARGET_NR_unlink
6801 case TARGET_NR_unlink:
6802 if (!(p = lock_user_string(arg1)))
6803 goto efault;
6804 ret = get_errno(unlink(p));
6805 unlock_user(p, arg1, 0);
6806 break;
6807 #endif
6808 #if defined(TARGET_NR_unlinkat)
6809 case TARGET_NR_unlinkat:
6810 if (!(p = lock_user_string(arg2)))
6811 goto efault;
6812 ret = get_errno(unlinkat(arg1, p, arg3));
6813 unlock_user(p, arg2, 0);
6814 break;
6815 #endif
6816 case TARGET_NR_execve:
6818 char **argp, **envp;
6819 int argc, envc;
6820 abi_ulong gp;
6821 abi_ulong guest_argp;
6822 abi_ulong guest_envp;
6823 abi_ulong addr;
6824 char **q;
6825 int total_size = 0;
6827 argc = 0;
6828 guest_argp = arg2;
6829 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6830 if (get_user_ual(addr, gp))
6831 goto efault;
6832 if (!addr)
6833 break;
6834 argc++;
6836 envc = 0;
6837 guest_envp = arg3;
6838 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6839 if (get_user_ual(addr, gp))
6840 goto efault;
6841 if (!addr)
6842 break;
6843 envc++;
6846 argp = alloca((argc + 1) * sizeof(void *));
6847 envp = alloca((envc + 1) * sizeof(void *));
6849 for (gp = guest_argp, q = argp; gp;
6850 gp += sizeof(abi_ulong), q++) {
6851 if (get_user_ual(addr, gp))
6852 goto execve_efault;
6853 if (!addr)
6854 break;
6855 if (!(*q = lock_user_string(addr)))
6856 goto execve_efault;
6857 total_size += strlen(*q) + 1;
6859 *q = NULL;
6861 for (gp = guest_envp, q = envp; gp;
6862 gp += sizeof(abi_ulong), q++) {
6863 if (get_user_ual(addr, gp))
6864 goto execve_efault;
6865 if (!addr)
6866 break;
6867 if (!(*q = lock_user_string(addr)))
6868 goto execve_efault;
6869 total_size += strlen(*q) + 1;
6871 *q = NULL;
6873 if (!(p = lock_user_string(arg1)))
6874 goto execve_efault;
6875 /* Although execve() is not an interruptible syscall it is
6876 * a special case where we must use the safe_syscall wrapper:
6877 * if we allow a signal to happen before we make the host
6878 * syscall then we will 'lose' it, because at the point of
6879 * execve the process leaves QEMU's control. So we use the
6880 * safe syscall wrapper to ensure that we either take the
6881 * signal as a guest signal, or else it does not happen
6882 * before the execve completes and makes it the other
6883 * program's problem.
6885 ret = get_errno(safe_execve(p, argp, envp));
6886 unlock_user(p, arg1, 0);
6888 goto execve_end;
6890 execve_efault:
6891 ret = -TARGET_EFAULT;
6893 execve_end:
6894 for (gp = guest_argp, q = argp; *q;
6895 gp += sizeof(abi_ulong), q++) {
6896 if (get_user_ual(addr, gp)
6897 || !addr)
6898 break;
6899 unlock_user(*q, addr, 0);
6901 for (gp = guest_envp, q = envp; *q;
6902 gp += sizeof(abi_ulong), q++) {
6903 if (get_user_ual(addr, gp)
6904 || !addr)
6905 break;
6906 unlock_user(*q, addr, 0);
6909 break;
6910 case TARGET_NR_chdir:
6911 if (!(p = lock_user_string(arg1)))
6912 goto efault;
6913 ret = get_errno(chdir(p));
6914 unlock_user(p, arg1, 0);
6915 break;
6916 #ifdef TARGET_NR_time
6917 case TARGET_NR_time:
6919 time_t host_time;
6920 ret = get_errno(time(&host_time));
6921 if (!is_error(ret)
6922 && arg1
6923 && put_user_sal(host_time, arg1))
6924 goto efault;
6926 break;
6927 #endif
6928 #ifdef TARGET_NR_mknod
6929 case TARGET_NR_mknod:
6930 if (!(p = lock_user_string(arg1)))
6931 goto efault;
6932 ret = get_errno(mknod(p, arg2, arg3));
6933 unlock_user(p, arg1, 0);
6934 break;
6935 #endif
6936 #if defined(TARGET_NR_mknodat)
6937 case TARGET_NR_mknodat:
6938 if (!(p = lock_user_string(arg2)))
6939 goto efault;
6940 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6941 unlock_user(p, arg2, 0);
6942 break;
6943 #endif
6944 #ifdef TARGET_NR_chmod
6945 case TARGET_NR_chmod:
6946 if (!(p = lock_user_string(arg1)))
6947 goto efault;
6948 ret = get_errno(chmod(p, arg2));
6949 unlock_user(p, arg1, 0);
6950 break;
6951 #endif
6952 #ifdef TARGET_NR_break
6953 case TARGET_NR_break:
6954 goto unimplemented;
6955 #endif
6956 #ifdef TARGET_NR_oldstat
6957 case TARGET_NR_oldstat:
6958 goto unimplemented;
6959 #endif
6960 case TARGET_NR_lseek:
6961 ret = get_errno(lseek(arg1, arg2, arg3));
6962 break;
6963 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6964 /* Alpha specific */
6965 case TARGET_NR_getxpid:
6966 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6967 ret = get_errno(getpid());
6968 break;
6969 #endif
6970 #ifdef TARGET_NR_getpid
6971 case TARGET_NR_getpid:
6972 ret = get_errno(getpid());
6973 break;
6974 #endif
6975 case TARGET_NR_mount:
6977 /* need to look at the data field */
6978 void *p2, *p3;
6980 if (arg1) {
6981 p = lock_user_string(arg1);
6982 if (!p) {
6983 goto efault;
6985 } else {
6986 p = NULL;
6989 p2 = lock_user_string(arg2);
6990 if (!p2) {
6991 if (arg1) {
6992 unlock_user(p, arg1, 0);
6994 goto efault;
6997 if (arg3) {
6998 p3 = lock_user_string(arg3);
6999 if (!p3) {
7000 if (arg1) {
7001 unlock_user(p, arg1, 0);
7003 unlock_user(p2, arg2, 0);
7004 goto efault;
7006 } else {
7007 p3 = NULL;
7010 /* FIXME - arg5 should be locked, but it isn't clear how to
7011 * do that since it's not guaranteed to be a NULL-terminated
7012 * string.
7014 if (!arg5) {
7015 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7016 } else {
7017 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7019 ret = get_errno(ret);
7021 if (arg1) {
7022 unlock_user(p, arg1, 0);
7024 unlock_user(p2, arg2, 0);
7025 if (arg3) {
7026 unlock_user(p3, arg3, 0);
7029 break;
7030 #ifdef TARGET_NR_umount
7031 case TARGET_NR_umount:
7032 if (!(p = lock_user_string(arg1)))
7033 goto efault;
7034 ret = get_errno(umount(p));
7035 unlock_user(p, arg1, 0);
7036 break;
7037 #endif
7038 #ifdef TARGET_NR_stime /* not on alpha */
7039 case TARGET_NR_stime:
7041 time_t host_time;
7042 if (get_user_sal(host_time, arg1))
7043 goto efault;
7044 ret = get_errno(stime(&host_time));
7046 break;
7047 #endif
7048 case TARGET_NR_ptrace:
7049 goto unimplemented;
7050 #ifdef TARGET_NR_alarm /* not on alpha */
7051 case TARGET_NR_alarm:
7052 ret = alarm(arg1);
7053 break;
7054 #endif
7055 #ifdef TARGET_NR_oldfstat
7056 case TARGET_NR_oldfstat:
7057 goto unimplemented;
7058 #endif
7059 #ifdef TARGET_NR_pause /* not on alpha */
7060 case TARGET_NR_pause:
7061 ret = get_errno(pause());
7062 break;
7063 #endif
7064 #ifdef TARGET_NR_utime
7065 case TARGET_NR_utime:
7067 struct utimbuf tbuf, *host_tbuf;
7068 struct target_utimbuf *target_tbuf;
7069 if (arg2) {
7070 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7071 goto efault;
7072 tbuf.actime = tswapal(target_tbuf->actime);
7073 tbuf.modtime = tswapal(target_tbuf->modtime);
7074 unlock_user_struct(target_tbuf, arg2, 0);
7075 host_tbuf = &tbuf;
7076 } else {
7077 host_tbuf = NULL;
7079 if (!(p = lock_user_string(arg1)))
7080 goto efault;
7081 ret = get_errno(utime(p, host_tbuf));
7082 unlock_user(p, arg1, 0);
7084 break;
7085 #endif
7086 #ifdef TARGET_NR_utimes
7087 case TARGET_NR_utimes:
7089 struct timeval *tvp, tv[2];
7090 if (arg2) {
7091 if (copy_from_user_timeval(&tv[0], arg2)
7092 || copy_from_user_timeval(&tv[1],
7093 arg2 + sizeof(struct target_timeval)))
7094 goto efault;
7095 tvp = tv;
7096 } else {
7097 tvp = NULL;
7099 if (!(p = lock_user_string(arg1)))
7100 goto efault;
7101 ret = get_errno(utimes(p, tvp));
7102 unlock_user(p, arg1, 0);
7104 break;
7105 #endif
7106 #if defined(TARGET_NR_futimesat)
7107 case TARGET_NR_futimesat:
7109 struct timeval *tvp, tv[2];
7110 if (arg3) {
7111 if (copy_from_user_timeval(&tv[0], arg3)
7112 || copy_from_user_timeval(&tv[1],
7113 arg3 + sizeof(struct target_timeval)))
7114 goto efault;
7115 tvp = tv;
7116 } else {
7117 tvp = NULL;
7119 if (!(p = lock_user_string(arg2)))
7120 goto efault;
7121 ret = get_errno(futimesat(arg1, path(p), tvp));
7122 unlock_user(p, arg2, 0);
7124 break;
7125 #endif
7126 #ifdef TARGET_NR_stty
7127 case TARGET_NR_stty:
7128 goto unimplemented;
7129 #endif
7130 #ifdef TARGET_NR_gtty
7131 case TARGET_NR_gtty:
7132 goto unimplemented;
7133 #endif
7134 #ifdef TARGET_NR_access
7135 case TARGET_NR_access:
7136 if (!(p = lock_user_string(arg1)))
7137 goto efault;
7138 ret = get_errno(access(path(p), arg2));
7139 unlock_user(p, arg1, 0);
7140 break;
7141 #endif
7142 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7143 case TARGET_NR_faccessat:
7144 if (!(p = lock_user_string(arg2)))
7145 goto efault;
7146 ret = get_errno(faccessat(arg1, p, arg3, 0));
7147 unlock_user(p, arg2, 0);
7148 break;
7149 #endif
7150 #ifdef TARGET_NR_nice /* not on alpha */
7151 case TARGET_NR_nice:
7152 ret = get_errno(nice(arg1));
7153 break;
7154 #endif
7155 #ifdef TARGET_NR_ftime
7156 case TARGET_NR_ftime:
7157 goto unimplemented;
7158 #endif
7159 case TARGET_NR_sync:
7160 sync();
7161 ret = 0;
7162 break;
7163 case TARGET_NR_kill:
7164 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
7165 break;
7166 #ifdef TARGET_NR_rename
7167 case TARGET_NR_rename:
7169 void *p2;
7170 p = lock_user_string(arg1);
7171 p2 = lock_user_string(arg2);
7172 if (!p || !p2)
7173 ret = -TARGET_EFAULT;
7174 else
7175 ret = get_errno(rename(p, p2));
7176 unlock_user(p2, arg2, 0);
7177 unlock_user(p, arg1, 0);
7179 break;
7180 #endif
7181 #if defined(TARGET_NR_renameat)
7182 case TARGET_NR_renameat:
7184 void *p2;
7185 p = lock_user_string(arg2);
7186 p2 = lock_user_string(arg4);
7187 if (!p || !p2)
7188 ret = -TARGET_EFAULT;
7189 else
7190 ret = get_errno(renameat(arg1, p, arg3, p2));
7191 unlock_user(p2, arg4, 0);
7192 unlock_user(p, arg2, 0);
7194 break;
7195 #endif
7196 #ifdef TARGET_NR_mkdir
7197 case TARGET_NR_mkdir:
7198 if (!(p = lock_user_string(arg1)))
7199 goto efault;
7200 ret = get_errno(mkdir(p, arg2));
7201 unlock_user(p, arg1, 0);
7202 break;
7203 #endif
7204 #if defined(TARGET_NR_mkdirat)
7205 case TARGET_NR_mkdirat:
7206 if (!(p = lock_user_string(arg2)))
7207 goto efault;
7208 ret = get_errno(mkdirat(arg1, p, arg3));
7209 unlock_user(p, arg2, 0);
7210 break;
7211 #endif
7212 #ifdef TARGET_NR_rmdir
7213 case TARGET_NR_rmdir:
7214 if (!(p = lock_user_string(arg1)))
7215 goto efault;
7216 ret = get_errno(rmdir(p));
7217 unlock_user(p, arg1, 0);
7218 break;
7219 #endif
7220 case TARGET_NR_dup:
7221 ret = get_errno(dup(arg1));
7222 if (ret >= 0) {
7223 fd_trans_dup(arg1, ret);
7225 break;
7226 #ifdef TARGET_NR_pipe
7227 case TARGET_NR_pipe:
7228 ret = do_pipe(cpu_env, arg1, 0, 0);
7229 break;
7230 #endif
7231 #ifdef TARGET_NR_pipe2
7232 case TARGET_NR_pipe2:
7233 ret = do_pipe(cpu_env, arg1,
7234 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7235 break;
7236 #endif
7237 case TARGET_NR_times:
7239 struct target_tms *tmsp;
7240 struct tms tms;
7241 ret = get_errno(times(&tms));
7242 if (arg1) {
7243 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7244 if (!tmsp)
7245 goto efault;
7246 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7247 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7248 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7249 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7251 if (!is_error(ret))
7252 ret = host_to_target_clock_t(ret);
7254 break;
7255 #ifdef TARGET_NR_prof
7256 case TARGET_NR_prof:
7257 goto unimplemented;
7258 #endif
7259 #ifdef TARGET_NR_signal
7260 case TARGET_NR_signal:
7261 goto unimplemented;
7262 #endif
7263 case TARGET_NR_acct:
7264 if (arg1 == 0) {
7265 ret = get_errno(acct(NULL));
7266 } else {
7267 if (!(p = lock_user_string(arg1)))
7268 goto efault;
7269 ret = get_errno(acct(path(p)));
7270 unlock_user(p, arg1, 0);
7272 break;
7273 #ifdef TARGET_NR_umount2
7274 case TARGET_NR_umount2:
7275 if (!(p = lock_user_string(arg1)))
7276 goto efault;
7277 ret = get_errno(umount2(p, arg2));
7278 unlock_user(p, arg1, 0);
7279 break;
7280 #endif
7281 #ifdef TARGET_NR_lock
7282 case TARGET_NR_lock:
7283 goto unimplemented;
7284 #endif
7285 case TARGET_NR_ioctl:
7286 ret = do_ioctl(arg1, arg2, arg3);
7287 break;
7288 case TARGET_NR_fcntl:
7289 ret = do_fcntl(arg1, arg2, arg3);
7290 break;
7291 #ifdef TARGET_NR_mpx
7292 case TARGET_NR_mpx:
7293 goto unimplemented;
7294 #endif
7295 case TARGET_NR_setpgid:
7296 ret = get_errno(setpgid(arg1, arg2));
7297 break;
7298 #ifdef TARGET_NR_ulimit
7299 case TARGET_NR_ulimit:
7300 goto unimplemented;
7301 #endif
7302 #ifdef TARGET_NR_oldolduname
7303 case TARGET_NR_oldolduname:
7304 goto unimplemented;
7305 #endif
7306 case TARGET_NR_umask:
7307 ret = get_errno(umask(arg1));
7308 break;
7309 case TARGET_NR_chroot:
7310 if (!(p = lock_user_string(arg1)))
7311 goto efault;
7312 ret = get_errno(chroot(p));
7313 unlock_user(p, arg1, 0);
7314 break;
7315 #ifdef TARGET_NR_ustat
7316 case TARGET_NR_ustat:
7317 goto unimplemented;
7318 #endif
7319 #ifdef TARGET_NR_dup2
7320 case TARGET_NR_dup2:
7321 ret = get_errno(dup2(arg1, arg2));
7322 if (ret >= 0) {
7323 fd_trans_dup(arg1, arg2);
7325 break;
7326 #endif
7327 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7328 case TARGET_NR_dup3:
7329 ret = get_errno(dup3(arg1, arg2, arg3));
7330 if (ret >= 0) {
7331 fd_trans_dup(arg1, arg2);
7333 break;
7334 #endif
7335 #ifdef TARGET_NR_getppid /* not on alpha */
7336 case TARGET_NR_getppid:
7337 ret = get_errno(getppid());
7338 break;
7339 #endif
7340 #ifdef TARGET_NR_getpgrp
7341 case TARGET_NR_getpgrp:
7342 ret = get_errno(getpgrp());
7343 break;
7344 #endif
7345 case TARGET_NR_setsid:
7346 ret = get_errno(setsid());
7347 break;
7348 #ifdef TARGET_NR_sigaction
7349 case TARGET_NR_sigaction:
7351 #if defined(TARGET_ALPHA)
7352 struct target_sigaction act, oact, *pact = 0;
7353 struct target_old_sigaction *old_act;
7354 if (arg2) {
7355 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7356 goto efault;
7357 act._sa_handler = old_act->_sa_handler;
7358 target_siginitset(&act.sa_mask, old_act->sa_mask);
7359 act.sa_flags = old_act->sa_flags;
7360 act.sa_restorer = 0;
7361 unlock_user_struct(old_act, arg2, 0);
7362 pact = &act;
7364 ret = get_errno(do_sigaction(arg1, pact, &oact));
7365 if (!is_error(ret) && arg3) {
7366 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7367 goto efault;
7368 old_act->_sa_handler = oact._sa_handler;
7369 old_act->sa_mask = oact.sa_mask.sig[0];
7370 old_act->sa_flags = oact.sa_flags;
7371 unlock_user_struct(old_act, arg3, 1);
7373 #elif defined(TARGET_MIPS)
7374 struct target_sigaction act, oact, *pact, *old_act;
7376 if (arg2) {
7377 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7378 goto efault;
7379 act._sa_handler = old_act->_sa_handler;
7380 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7381 act.sa_flags = old_act->sa_flags;
7382 unlock_user_struct(old_act, arg2, 0);
7383 pact = &act;
7384 } else {
7385 pact = NULL;
7388 ret = get_errno(do_sigaction(arg1, pact, &oact));
7390 if (!is_error(ret) && arg3) {
7391 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7392 goto efault;
7393 old_act->_sa_handler = oact._sa_handler;
7394 old_act->sa_flags = oact.sa_flags;
7395 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7396 old_act->sa_mask.sig[1] = 0;
7397 old_act->sa_mask.sig[2] = 0;
7398 old_act->sa_mask.sig[3] = 0;
7399 unlock_user_struct(old_act, arg3, 1);
7401 #else
7402 struct target_old_sigaction *old_act;
7403 struct target_sigaction act, oact, *pact;
7404 if (arg2) {
7405 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7406 goto efault;
7407 act._sa_handler = old_act->_sa_handler;
7408 target_siginitset(&act.sa_mask, old_act->sa_mask);
7409 act.sa_flags = old_act->sa_flags;
7410 act.sa_restorer = old_act->sa_restorer;
7411 unlock_user_struct(old_act, arg2, 0);
7412 pact = &act;
7413 } else {
7414 pact = NULL;
7416 ret = get_errno(do_sigaction(arg1, pact, &oact));
7417 if (!is_error(ret) && arg3) {
7418 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7419 goto efault;
7420 old_act->_sa_handler = oact._sa_handler;
7421 old_act->sa_mask = oact.sa_mask.sig[0];
7422 old_act->sa_flags = oact.sa_flags;
7423 old_act->sa_restorer = oact.sa_restorer;
7424 unlock_user_struct(old_act, arg3, 1);
7426 #endif
7428 break;
7429 #endif
7430 case TARGET_NR_rt_sigaction:
7432 #if defined(TARGET_ALPHA)
7433 struct target_sigaction act, oact, *pact = 0;
7434 struct target_rt_sigaction *rt_act;
7435 /* ??? arg4 == sizeof(sigset_t). */
7436 if (arg2) {
7437 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7438 goto efault;
7439 act._sa_handler = rt_act->_sa_handler;
7440 act.sa_mask = rt_act->sa_mask;
7441 act.sa_flags = rt_act->sa_flags;
7442 act.sa_restorer = arg5;
7443 unlock_user_struct(rt_act, arg2, 0);
7444 pact = &act;
7446 ret = get_errno(do_sigaction(arg1, pact, &oact));
7447 if (!is_error(ret) && arg3) {
7448 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7449 goto efault;
7450 rt_act->_sa_handler = oact._sa_handler;
7451 rt_act->sa_mask = oact.sa_mask;
7452 rt_act->sa_flags = oact.sa_flags;
7453 unlock_user_struct(rt_act, arg3, 1);
7455 #else
7456 struct target_sigaction *act;
7457 struct target_sigaction *oact;
7459 if (arg2) {
7460 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7461 goto efault;
7462 } else
7463 act = NULL;
7464 if (arg3) {
7465 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7466 ret = -TARGET_EFAULT;
7467 goto rt_sigaction_fail;
7469 } else
7470 oact = NULL;
7471 ret = get_errno(do_sigaction(arg1, act, oact));
7472 rt_sigaction_fail:
7473 if (act)
7474 unlock_user_struct(act, arg2, 0);
7475 if (oact)
7476 unlock_user_struct(oact, arg3, 1);
7477 #endif
7479 break;
7480 #ifdef TARGET_NR_sgetmask /* not on alpha */
7481 case TARGET_NR_sgetmask:
7483 sigset_t cur_set;
7484 abi_ulong target_set;
7485 do_sigprocmask(0, NULL, &cur_set);
7486 host_to_target_old_sigset(&target_set, &cur_set);
7487 ret = target_set;
7489 break;
7490 #endif
7491 #ifdef TARGET_NR_ssetmask /* not on alpha */
7492 case TARGET_NR_ssetmask:
7494 sigset_t set, oset, cur_set;
7495 abi_ulong target_set = arg1;
7496 do_sigprocmask(0, NULL, &cur_set);
7497 target_to_host_old_sigset(&set, &target_set);
7498 sigorset(&set, &set, &cur_set);
7499 do_sigprocmask(SIG_SETMASK, &set, &oset);
7500 host_to_target_old_sigset(&target_set, &oset);
7501 ret = target_set;
7503 break;
7504 #endif
7505 #ifdef TARGET_NR_sigprocmask
7506 case TARGET_NR_sigprocmask:
7508 #if defined(TARGET_ALPHA)
7509 sigset_t set, oldset;
7510 abi_ulong mask;
7511 int how;
7513 switch (arg1) {
7514 case TARGET_SIG_BLOCK:
7515 how = SIG_BLOCK;
7516 break;
7517 case TARGET_SIG_UNBLOCK:
7518 how = SIG_UNBLOCK;
7519 break;
7520 case TARGET_SIG_SETMASK:
7521 how = SIG_SETMASK;
7522 break;
7523 default:
7524 ret = -TARGET_EINVAL;
7525 goto fail;
7527 mask = arg2;
7528 target_to_host_old_sigset(&set, &mask);
7530 ret = get_errno(do_sigprocmask(how, &set, &oldset));
7531 if (!is_error(ret)) {
7532 host_to_target_old_sigset(&mask, &oldset);
7533 ret = mask;
7534 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7536 #else
7537 sigset_t set, oldset, *set_ptr;
7538 int how;
7540 if (arg2) {
7541 switch (arg1) {
7542 case TARGET_SIG_BLOCK:
7543 how = SIG_BLOCK;
7544 break;
7545 case TARGET_SIG_UNBLOCK:
7546 how = SIG_UNBLOCK;
7547 break;
7548 case TARGET_SIG_SETMASK:
7549 how = SIG_SETMASK;
7550 break;
7551 default:
7552 ret = -TARGET_EINVAL;
7553 goto fail;
7555 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7556 goto efault;
7557 target_to_host_old_sigset(&set, p);
7558 unlock_user(p, arg2, 0);
7559 set_ptr = &set;
7560 } else {
7561 how = 0;
7562 set_ptr = NULL;
7564 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
7565 if (!is_error(ret) && arg3) {
7566 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7567 goto efault;
7568 host_to_target_old_sigset(p, &oldset);
7569 unlock_user(p, arg3, sizeof(target_sigset_t));
7571 #endif
7573 break;
7574 #endif
7575 case TARGET_NR_rt_sigprocmask:
7577 int how = arg1;
7578 sigset_t set, oldset, *set_ptr;
7580 if (arg2) {
7581 switch(how) {
7582 case TARGET_SIG_BLOCK:
7583 how = SIG_BLOCK;
7584 break;
7585 case TARGET_SIG_UNBLOCK:
7586 how = SIG_UNBLOCK;
7587 break;
7588 case TARGET_SIG_SETMASK:
7589 how = SIG_SETMASK;
7590 break;
7591 default:
7592 ret = -TARGET_EINVAL;
7593 goto fail;
7595 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7596 goto efault;
7597 target_to_host_sigset(&set, p);
7598 unlock_user(p, arg2, 0);
7599 set_ptr = &set;
7600 } else {
7601 how = 0;
7602 set_ptr = NULL;
7604 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
7605 if (!is_error(ret) && arg3) {
7606 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7607 goto efault;
7608 host_to_target_sigset(p, &oldset);
7609 unlock_user(p, arg3, sizeof(target_sigset_t));
7612 break;
7613 #ifdef TARGET_NR_sigpending
7614 case TARGET_NR_sigpending:
7616 sigset_t set;
7617 ret = get_errno(sigpending(&set));
7618 if (!is_error(ret)) {
7619 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7620 goto efault;
7621 host_to_target_old_sigset(p, &set);
7622 unlock_user(p, arg1, sizeof(target_sigset_t));
7625 break;
7626 #endif
7627 case TARGET_NR_rt_sigpending:
7629 sigset_t set;
7630 ret = get_errno(sigpending(&set));
7631 if (!is_error(ret)) {
7632 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7633 goto efault;
7634 host_to_target_sigset(p, &set);
7635 unlock_user(p, arg1, sizeof(target_sigset_t));
7638 break;
7639 #ifdef TARGET_NR_sigsuspend
7640 case TARGET_NR_sigsuspend:
7642 sigset_t set;
7643 #if defined(TARGET_ALPHA)
7644 abi_ulong mask = arg1;
7645 target_to_host_old_sigset(&set, &mask);
7646 #else
7647 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7648 goto efault;
7649 target_to_host_old_sigset(&set, p);
7650 unlock_user(p, arg1, 0);
7651 #endif
7652 ret = get_errno(safe_rt_sigsuspend(&set, SIGSET_T_SIZE));
7654 break;
7655 #endif
7656 case TARGET_NR_rt_sigsuspend:
7658 sigset_t set;
7659 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7660 goto efault;
7661 target_to_host_sigset(&set, p);
7662 unlock_user(p, arg1, 0);
7663 ret = get_errno(safe_rt_sigsuspend(&set, SIGSET_T_SIZE));
7665 break;
7666 case TARGET_NR_rt_sigtimedwait:
7668 sigset_t set;
7669 struct timespec uts, *puts;
7670 siginfo_t uinfo;
7672 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7673 goto efault;
7674 target_to_host_sigset(&set, p);
7675 unlock_user(p, arg1, 0);
7676 if (arg3) {
7677 puts = &uts;
7678 target_to_host_timespec(puts, arg3);
7679 } else {
7680 puts = NULL;
7682 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
7683 if (!is_error(ret)) {
7684 if (arg2) {
7685 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7687 if (!p) {
7688 goto efault;
7690 host_to_target_siginfo(p, &uinfo);
7691 unlock_user(p, arg2, sizeof(target_siginfo_t));
7693 ret = host_to_target_signal(ret);
7696 break;
7697 case TARGET_NR_rt_sigqueueinfo:
7699 siginfo_t uinfo;
7700 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7701 goto efault;
7702 target_to_host_siginfo(&uinfo, p);
7703 unlock_user(p, arg1, 0);
7704 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7706 break;
7707 #ifdef TARGET_NR_sigreturn
7708 case TARGET_NR_sigreturn:
7709 ret = do_sigreturn(cpu_env);
7710 break;
7711 #endif
7712 case TARGET_NR_rt_sigreturn:
7713 ret = do_rt_sigreturn(cpu_env);
7714 break;
7715 case TARGET_NR_sethostname:
7716 if (!(p = lock_user_string(arg1)))
7717 goto efault;
7718 ret = get_errno(sethostname(p, arg2));
7719 unlock_user(p, arg1, 0);
7720 break;
7721 case TARGET_NR_setrlimit:
7723 int resource = target_to_host_resource(arg1);
7724 struct target_rlimit *target_rlim;
7725 struct rlimit rlim;
7726 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7727 goto efault;
7728 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7729 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7730 unlock_user_struct(target_rlim, arg2, 0);
7731 ret = get_errno(setrlimit(resource, &rlim));
7733 break;
7734 case TARGET_NR_getrlimit:
7736 int resource = target_to_host_resource(arg1);
7737 struct target_rlimit *target_rlim;
7738 struct rlimit rlim;
7740 ret = get_errno(getrlimit(resource, &rlim));
7741 if (!is_error(ret)) {
7742 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7743 goto efault;
7744 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7745 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7746 unlock_user_struct(target_rlim, arg2, 1);
7749 break;
7750 case TARGET_NR_getrusage:
7752 struct rusage rusage;
7753 ret = get_errno(getrusage(arg1, &rusage));
7754 if (!is_error(ret)) {
7755 ret = host_to_target_rusage(arg2, &rusage);
7758 break;
7759 case TARGET_NR_gettimeofday:
7761 struct timeval tv;
7762 ret = get_errno(gettimeofday(&tv, NULL));
7763 if (!is_error(ret)) {
7764 if (copy_to_user_timeval(arg1, &tv))
7765 goto efault;
7768 break;
7769 case TARGET_NR_settimeofday:
7771 struct timeval tv, *ptv = NULL;
7772 struct timezone tz, *ptz = NULL;
7774 if (arg1) {
7775 if (copy_from_user_timeval(&tv, arg1)) {
7776 goto efault;
7778 ptv = &tv;
7781 if (arg2) {
7782 if (copy_from_user_timezone(&tz, arg2)) {
7783 goto efault;
7785 ptz = &tz;
7788 ret = get_errno(settimeofday(ptv, ptz));
7790 break;
7791 #if defined(TARGET_NR_select)
7792 case TARGET_NR_select:
7793 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7794 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7795 #else
7797 struct target_sel_arg_struct *sel;
7798 abi_ulong inp, outp, exp, tvp;
7799 long nsel;
7801 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7802 goto efault;
7803 nsel = tswapal(sel->n);
7804 inp = tswapal(sel->inp);
7805 outp = tswapal(sel->outp);
7806 exp = tswapal(sel->exp);
7807 tvp = tswapal(sel->tvp);
7808 unlock_user_struct(sel, arg1, 0);
7809 ret = do_select(nsel, inp, outp, exp, tvp);
7811 #endif
7812 break;
7813 #endif
7814 #ifdef TARGET_NR_pselect6
7815 case TARGET_NR_pselect6:
7817 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7818 fd_set rfds, wfds, efds;
7819 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7820 struct timespec ts, *ts_ptr;
7823 * The 6th arg is actually two args smashed together,
7824 * so we cannot use the C library.
7826 sigset_t set;
7827 struct {
7828 sigset_t *set;
7829 size_t size;
7830 } sig, *sig_ptr;
7832 abi_ulong arg_sigset, arg_sigsize, *arg7;
7833 target_sigset_t *target_sigset;
7835 n = arg1;
7836 rfd_addr = arg2;
7837 wfd_addr = arg3;
7838 efd_addr = arg4;
7839 ts_addr = arg5;
7841 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7842 if (ret) {
7843 goto fail;
7845 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7846 if (ret) {
7847 goto fail;
7849 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7850 if (ret) {
7851 goto fail;
7855 * This takes a timespec, and not a timeval, so we cannot
7856 * use the do_select() helper ...
7858 if (ts_addr) {
7859 if (target_to_host_timespec(&ts, ts_addr)) {
7860 goto efault;
7862 ts_ptr = &ts;
7863 } else {
7864 ts_ptr = NULL;
7867 /* Extract the two packed args for the sigset */
7868 if (arg6) {
7869 sig_ptr = &sig;
7870 sig.size = SIGSET_T_SIZE;
7872 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7873 if (!arg7) {
7874 goto efault;
7876 arg_sigset = tswapal(arg7[0]);
7877 arg_sigsize = tswapal(arg7[1]);
7878 unlock_user(arg7, arg6, 0);
7880 if (arg_sigset) {
7881 sig.set = &set;
7882 if (arg_sigsize != sizeof(*target_sigset)) {
7883 /* Like the kernel, we enforce correct size sigsets */
7884 ret = -TARGET_EINVAL;
7885 goto fail;
7887 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7888 sizeof(*target_sigset), 1);
7889 if (!target_sigset) {
7890 goto efault;
7892 target_to_host_sigset(&set, target_sigset);
7893 unlock_user(target_sigset, arg_sigset, 0);
7894 } else {
7895 sig.set = NULL;
7897 } else {
7898 sig_ptr = NULL;
7901 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7902 ts_ptr, sig_ptr));
7904 if (!is_error(ret)) {
7905 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7906 goto efault;
7907 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7908 goto efault;
7909 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7910 goto efault;
7912 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7913 goto efault;
7916 break;
7917 #endif
7918 #ifdef TARGET_NR_symlink
7919 case TARGET_NR_symlink:
7921 void *p2;
7922 p = lock_user_string(arg1);
7923 p2 = lock_user_string(arg2);
7924 if (!p || !p2)
7925 ret = -TARGET_EFAULT;
7926 else
7927 ret = get_errno(symlink(p, p2));
7928 unlock_user(p2, arg2, 0);
7929 unlock_user(p, arg1, 0);
7931 break;
7932 #endif
7933 #if defined(TARGET_NR_symlinkat)
7934 case TARGET_NR_symlinkat:
7936 void *p2;
7937 p = lock_user_string(arg1);
7938 p2 = lock_user_string(arg3);
7939 if (!p || !p2)
7940 ret = -TARGET_EFAULT;
7941 else
7942 ret = get_errno(symlinkat(p, arg2, p2));
7943 unlock_user(p2, arg3, 0);
7944 unlock_user(p, arg1, 0);
7946 break;
7947 #endif
7948 #ifdef TARGET_NR_oldlstat
7949 case TARGET_NR_oldlstat:
7950 goto unimplemented;
7951 #endif
7952 #ifdef TARGET_NR_readlink
7953 case TARGET_NR_readlink:
7955 void *p2;
7956 p = lock_user_string(arg1);
7957 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7958 if (!p || !p2) {
7959 ret = -TARGET_EFAULT;
7960 } else if (!arg3) {
7961 /* Short circuit this for the magic exe check. */
7962 ret = -TARGET_EINVAL;
7963 } else if (is_proc_myself((const char *)p, "exe")) {
7964 char real[PATH_MAX], *temp;
7965 temp = realpath(exec_path, real);
7966 /* Return value is # of bytes that we wrote to the buffer. */
7967 if (temp == NULL) {
7968 ret = get_errno(-1);
7969 } else {
7970 /* Don't worry about sign mismatch as earlier mapping
7971 * logic would have thrown a bad address error. */
7972 ret = MIN(strlen(real), arg3);
7973 /* We cannot NUL terminate the string. */
7974 memcpy(p2, real, ret);
7976 } else {
7977 ret = get_errno(readlink(path(p), p2, arg3));
7979 unlock_user(p2, arg2, ret);
7980 unlock_user(p, arg1, 0);
7982 break;
7983 #endif
7984 #if defined(TARGET_NR_readlinkat)
7985 case TARGET_NR_readlinkat:
7987 void *p2;
7988 p = lock_user_string(arg2);
7989 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7990 if (!p || !p2) {
7991 ret = -TARGET_EFAULT;
7992 } else if (is_proc_myself((const char *)p, "exe")) {
7993 char real[PATH_MAX], *temp;
7994 temp = realpath(exec_path, real);
7995 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7996 snprintf((char *)p2, arg4, "%s", real);
7997 } else {
7998 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8000 unlock_user(p2, arg3, ret);
8001 unlock_user(p, arg2, 0);
8003 break;
8004 #endif
8005 #ifdef TARGET_NR_uselib
8006 case TARGET_NR_uselib:
8007 goto unimplemented;
8008 #endif
8009 #ifdef TARGET_NR_swapon
8010 case TARGET_NR_swapon:
8011 if (!(p = lock_user_string(arg1)))
8012 goto efault;
8013 ret = get_errno(swapon(p, arg2));
8014 unlock_user(p, arg1, 0);
8015 break;
8016 #endif
8017 case TARGET_NR_reboot:
8018 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8019 /* arg4 must be ignored in all other cases */
8020 p = lock_user_string(arg4);
8021 if (!p) {
8022 goto efault;
8024 ret = get_errno(reboot(arg1, arg2, arg3, p));
8025 unlock_user(p, arg4, 0);
8026 } else {
8027 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8029 break;
8030 #ifdef TARGET_NR_readdir
8031 case TARGET_NR_readdir:
8032 goto unimplemented;
8033 #endif
8034 #ifdef TARGET_NR_mmap
8035 case TARGET_NR_mmap:
8036 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8037 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8038 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8039 || defined(TARGET_S390X)
8041 abi_ulong *v;
8042 abi_ulong v1, v2, v3, v4, v5, v6;
8043 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8044 goto efault;
8045 v1 = tswapal(v[0]);
8046 v2 = tswapal(v[1]);
8047 v3 = tswapal(v[2]);
8048 v4 = tswapal(v[3]);
8049 v5 = tswapal(v[4]);
8050 v6 = tswapal(v[5]);
8051 unlock_user(v, arg1, 0);
8052 ret = get_errno(target_mmap(v1, v2, v3,
8053 target_to_host_bitmask(v4, mmap_flags_tbl),
8054 v5, v6));
8056 #else
8057 ret = get_errno(target_mmap(arg1, arg2, arg3,
8058 target_to_host_bitmask(arg4, mmap_flags_tbl),
8059 arg5,
8060 arg6));
8061 #endif
8062 break;
8063 #endif
8064 #ifdef TARGET_NR_mmap2
8065 case TARGET_NR_mmap2:
8066 #ifndef MMAP_SHIFT
8067 #define MMAP_SHIFT 12
8068 #endif
8069 ret = get_errno(target_mmap(arg1, arg2, arg3,
8070 target_to_host_bitmask(arg4, mmap_flags_tbl),
8071 arg5,
8072 arg6 << MMAP_SHIFT));
8073 break;
8074 #endif
8075 case TARGET_NR_munmap:
8076 ret = get_errno(target_munmap(arg1, arg2));
8077 break;
8078 case TARGET_NR_mprotect:
8080 TaskState *ts = cpu->opaque;
8081 /* Special hack to detect libc making the stack executable. */
8082 if ((arg3 & PROT_GROWSDOWN)
8083 && arg1 >= ts->info->stack_limit
8084 && arg1 <= ts->info->start_stack) {
8085 arg3 &= ~PROT_GROWSDOWN;
8086 arg2 = arg2 + arg1 - ts->info->stack_limit;
8087 arg1 = ts->info->stack_limit;
8090 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8091 break;
8092 #ifdef TARGET_NR_mremap
8093 case TARGET_NR_mremap:
8094 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8095 break;
8096 #endif
8097 /* ??? msync/mlock/munlock are broken for softmmu. */
8098 #ifdef TARGET_NR_msync
8099 case TARGET_NR_msync:
8100 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8101 break;
8102 #endif
8103 #ifdef TARGET_NR_mlock
8104 case TARGET_NR_mlock:
8105 ret = get_errno(mlock(g2h(arg1), arg2));
8106 break;
8107 #endif
8108 #ifdef TARGET_NR_munlock
8109 case TARGET_NR_munlock:
8110 ret = get_errno(munlock(g2h(arg1), arg2));
8111 break;
8112 #endif
8113 #ifdef TARGET_NR_mlockall
8114 case TARGET_NR_mlockall:
8115 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8116 break;
8117 #endif
8118 #ifdef TARGET_NR_munlockall
8119 case TARGET_NR_munlockall:
8120 ret = get_errno(munlockall());
8121 break;
8122 #endif
8123 case TARGET_NR_truncate:
8124 if (!(p = lock_user_string(arg1)))
8125 goto efault;
8126 ret = get_errno(truncate(p, arg2));
8127 unlock_user(p, arg1, 0);
8128 break;
8129 case TARGET_NR_ftruncate:
8130 ret = get_errno(ftruncate(arg1, arg2));
8131 break;
8132 case TARGET_NR_fchmod:
8133 ret = get_errno(fchmod(arg1, arg2));
8134 break;
8135 #if defined(TARGET_NR_fchmodat)
8136 case TARGET_NR_fchmodat:
8137 if (!(p = lock_user_string(arg2)))
8138 goto efault;
8139 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8140 unlock_user(p, arg2, 0);
8141 break;
8142 #endif
8143 case TARGET_NR_getpriority:
8144 /* Note that negative values are valid for getpriority, so we must
8145 differentiate based on errno settings. */
8146 errno = 0;
8147 ret = getpriority(arg1, arg2);
8148 if (ret == -1 && errno != 0) {
8149 ret = -host_to_target_errno(errno);
8150 break;
8152 #ifdef TARGET_ALPHA
8153 /* Return value is the unbiased priority. Signal no error. */
8154 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8155 #else
8156 /* Return value is a biased priority to avoid negative numbers. */
8157 ret = 20 - ret;
8158 #endif
8159 break;
8160 case TARGET_NR_setpriority:
8161 ret = get_errno(setpriority(arg1, arg2, arg3));
8162 break;
8163 #ifdef TARGET_NR_profil
8164 case TARGET_NR_profil:
8165 goto unimplemented;
8166 #endif
8167 case TARGET_NR_statfs:
8168 if (!(p = lock_user_string(arg1)))
8169 goto efault;
8170 ret = get_errno(statfs(path(p), &stfs));
8171 unlock_user(p, arg1, 0);
8172 convert_statfs:
8173 if (!is_error(ret)) {
8174 struct target_statfs *target_stfs;
8176 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8177 goto efault;
8178 __put_user(stfs.f_type, &target_stfs->f_type);
8179 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8180 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8181 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8182 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8183 __put_user(stfs.f_files, &target_stfs->f_files);
8184 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8185 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8186 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8187 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8188 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8189 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8190 unlock_user_struct(target_stfs, arg2, 1);
8192 break;
8193 case TARGET_NR_fstatfs:
8194 ret = get_errno(fstatfs(arg1, &stfs));
8195 goto convert_statfs;
8196 #ifdef TARGET_NR_statfs64
8197 case TARGET_NR_statfs64:
8198 if (!(p = lock_user_string(arg1)))
8199 goto efault;
8200 ret = get_errno(statfs(path(p), &stfs));
8201 unlock_user(p, arg1, 0);
8202 convert_statfs64:
8203 if (!is_error(ret)) {
8204 struct target_statfs64 *target_stfs;
8206 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8207 goto efault;
8208 __put_user(stfs.f_type, &target_stfs->f_type);
8209 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8210 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8211 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8212 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8213 __put_user(stfs.f_files, &target_stfs->f_files);
8214 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8215 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8216 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8217 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8218 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8219 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8220 unlock_user_struct(target_stfs, arg3, 1);
8222 break;
8223 case TARGET_NR_fstatfs64:
8224 ret = get_errno(fstatfs(arg1, &stfs));
8225 goto convert_statfs64;
8226 #endif
8227 #ifdef TARGET_NR_ioperm
8228 case TARGET_NR_ioperm:
8229 goto unimplemented;
8230 #endif
8231 #ifdef TARGET_NR_socketcall
8232 case TARGET_NR_socketcall:
8233 ret = do_socketcall(arg1, arg2);
8234 break;
8235 #endif
8236 #ifdef TARGET_NR_accept
8237 case TARGET_NR_accept:
8238 ret = do_accept4(arg1, arg2, arg3, 0);
8239 break;
8240 #endif
8241 #ifdef TARGET_NR_accept4
8242 case TARGET_NR_accept4:
8243 #ifdef CONFIG_ACCEPT4
8244 ret = do_accept4(arg1, arg2, arg3, arg4);
8245 #else
8246 goto unimplemented;
8247 #endif
8248 break;
8249 #endif
8250 #ifdef TARGET_NR_bind
8251 case TARGET_NR_bind:
8252 ret = do_bind(arg1, arg2, arg3);
8253 break;
8254 #endif
8255 #ifdef TARGET_NR_connect
8256 case TARGET_NR_connect:
8257 ret = do_connect(arg1, arg2, arg3);
8258 break;
8259 #endif
8260 #ifdef TARGET_NR_getpeername
8261 case TARGET_NR_getpeername:
8262 ret = do_getpeername(arg1, arg2, arg3);
8263 break;
8264 #endif
8265 #ifdef TARGET_NR_getsockname
8266 case TARGET_NR_getsockname:
8267 ret = do_getsockname(arg1, arg2, arg3);
8268 break;
8269 #endif
8270 #ifdef TARGET_NR_getsockopt
8271 case TARGET_NR_getsockopt:
8272 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8273 break;
8274 #endif
8275 #ifdef TARGET_NR_listen
8276 case TARGET_NR_listen:
8277 ret = get_errno(listen(arg1, arg2));
8278 break;
8279 #endif
8280 #ifdef TARGET_NR_recv
8281 case TARGET_NR_recv:
8282 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8283 break;
8284 #endif
8285 #ifdef TARGET_NR_recvfrom
8286 case TARGET_NR_recvfrom:
8287 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8288 break;
8289 #endif
8290 #ifdef TARGET_NR_recvmsg
8291 case TARGET_NR_recvmsg:
8292 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8293 break;
8294 #endif
8295 #ifdef TARGET_NR_send
8296 case TARGET_NR_send:
8297 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8298 break;
8299 #endif
8300 #ifdef TARGET_NR_sendmsg
8301 case TARGET_NR_sendmsg:
8302 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8303 break;
8304 #endif
8305 #ifdef TARGET_NR_sendmmsg
8306 case TARGET_NR_sendmmsg:
8307 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8308 break;
8309 case TARGET_NR_recvmmsg:
8310 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8311 break;
8312 #endif
8313 #ifdef TARGET_NR_sendto
8314 case TARGET_NR_sendto:
8315 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8316 break;
8317 #endif
8318 #ifdef TARGET_NR_shutdown
8319 case TARGET_NR_shutdown:
8320 ret = get_errno(shutdown(arg1, arg2));
8321 break;
8322 #endif
8323 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8324 case TARGET_NR_getrandom:
8325 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8326 if (!p) {
8327 goto efault;
8329 ret = get_errno(getrandom(p, arg2, arg3));
8330 unlock_user(p, arg1, ret);
8331 break;
8332 #endif
8333 #ifdef TARGET_NR_socket
8334 case TARGET_NR_socket:
8335 ret = do_socket(arg1, arg2, arg3);
8336 fd_trans_unregister(ret);
8337 break;
8338 #endif
8339 #ifdef TARGET_NR_socketpair
8340 case TARGET_NR_socketpair:
8341 ret = do_socketpair(arg1, arg2, arg3, arg4);
8342 break;
8343 #endif
8344 #ifdef TARGET_NR_setsockopt
8345 case TARGET_NR_setsockopt:
8346 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8347 break;
8348 #endif
8350 case TARGET_NR_syslog:
8351 if (!(p = lock_user_string(arg2)))
8352 goto efault;
8353 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8354 unlock_user(p, arg2, 0);
8355 break;
8357 case TARGET_NR_setitimer:
8359 struct itimerval value, ovalue, *pvalue;
8361 if (arg2) {
8362 pvalue = &value;
8363 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8364 || copy_from_user_timeval(&pvalue->it_value,
8365 arg2 + sizeof(struct target_timeval)))
8366 goto efault;
8367 } else {
8368 pvalue = NULL;
8370 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8371 if (!is_error(ret) && arg3) {
8372 if (copy_to_user_timeval(arg3,
8373 &ovalue.it_interval)
8374 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8375 &ovalue.it_value))
8376 goto efault;
8379 break;
8380 case TARGET_NR_getitimer:
8382 struct itimerval value;
8384 ret = get_errno(getitimer(arg1, &value));
8385 if (!is_error(ret) && arg2) {
8386 if (copy_to_user_timeval(arg2,
8387 &value.it_interval)
8388 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8389 &value.it_value))
8390 goto efault;
8393 break;
8394 #ifdef TARGET_NR_stat
8395 case TARGET_NR_stat:
8396 if (!(p = lock_user_string(arg1)))
8397 goto efault;
8398 ret = get_errno(stat(path(p), &st));
8399 unlock_user(p, arg1, 0);
8400 goto do_stat;
8401 #endif
8402 #ifdef TARGET_NR_lstat
8403 case TARGET_NR_lstat:
8404 if (!(p = lock_user_string(arg1)))
8405 goto efault;
8406 ret = get_errno(lstat(path(p), &st));
8407 unlock_user(p, arg1, 0);
8408 goto do_stat;
8409 #endif
8410 case TARGET_NR_fstat:
8412 ret = get_errno(fstat(arg1, &st));
8413 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8414 do_stat:
8415 #endif
8416 if (!is_error(ret)) {
8417 struct target_stat *target_st;
8419 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8420 goto efault;
8421 memset(target_st, 0, sizeof(*target_st));
8422 __put_user(st.st_dev, &target_st->st_dev);
8423 __put_user(st.st_ino, &target_st->st_ino);
8424 __put_user(st.st_mode, &target_st->st_mode);
8425 __put_user(st.st_uid, &target_st->st_uid);
8426 __put_user(st.st_gid, &target_st->st_gid);
8427 __put_user(st.st_nlink, &target_st->st_nlink);
8428 __put_user(st.st_rdev, &target_st->st_rdev);
8429 __put_user(st.st_size, &target_st->st_size);
8430 __put_user(st.st_blksize, &target_st->st_blksize);
8431 __put_user(st.st_blocks, &target_st->st_blocks);
8432 __put_user(st.st_atime, &target_st->target_st_atime);
8433 __put_user(st.st_mtime, &target_st->target_st_mtime);
8434 __put_user(st.st_ctime, &target_st->target_st_ctime);
8435 unlock_user_struct(target_st, arg2, 1);
8438 break;
8439 #ifdef TARGET_NR_olduname
8440 case TARGET_NR_olduname:
8441 goto unimplemented;
8442 #endif
8443 #ifdef TARGET_NR_iopl
8444 case TARGET_NR_iopl:
8445 goto unimplemented;
8446 #endif
8447 case TARGET_NR_vhangup:
8448 ret = get_errno(vhangup());
8449 break;
8450 #ifdef TARGET_NR_idle
8451 case TARGET_NR_idle:
8452 goto unimplemented;
8453 #endif
8454 #ifdef TARGET_NR_syscall
8455 case TARGET_NR_syscall:
8456 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8457 arg6, arg7, arg8, 0);
8458 break;
8459 #endif
8460 case TARGET_NR_wait4:
8462 int status;
8463 abi_long status_ptr = arg2;
8464 struct rusage rusage, *rusage_ptr;
8465 abi_ulong target_rusage = arg4;
8466 abi_long rusage_err;
8467 if (target_rusage)
8468 rusage_ptr = &rusage;
8469 else
8470 rusage_ptr = NULL;
8471 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8472 if (!is_error(ret)) {
8473 if (status_ptr && ret) {
8474 status = host_to_target_waitstatus(status);
8475 if (put_user_s32(status, status_ptr))
8476 goto efault;
8478 if (target_rusage) {
8479 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8480 if (rusage_err) {
8481 ret = rusage_err;
8486 break;
8487 #ifdef TARGET_NR_swapoff
8488 case TARGET_NR_swapoff:
8489 if (!(p = lock_user_string(arg1)))
8490 goto efault;
8491 ret = get_errno(swapoff(p));
8492 unlock_user(p, arg1, 0);
8493 break;
8494 #endif
8495 case TARGET_NR_sysinfo:
8497 struct target_sysinfo *target_value;
8498 struct sysinfo value;
8499 ret = get_errno(sysinfo(&value));
8500 if (!is_error(ret) && arg1)
8502 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8503 goto efault;
8504 __put_user(value.uptime, &target_value->uptime);
8505 __put_user(value.loads[0], &target_value->loads[0]);
8506 __put_user(value.loads[1], &target_value->loads[1]);
8507 __put_user(value.loads[2], &target_value->loads[2]);
8508 __put_user(value.totalram, &target_value->totalram);
8509 __put_user(value.freeram, &target_value->freeram);
8510 __put_user(value.sharedram, &target_value->sharedram);
8511 __put_user(value.bufferram, &target_value->bufferram);
8512 __put_user(value.totalswap, &target_value->totalswap);
8513 __put_user(value.freeswap, &target_value->freeswap);
8514 __put_user(value.procs, &target_value->procs);
8515 __put_user(value.totalhigh, &target_value->totalhigh);
8516 __put_user(value.freehigh, &target_value->freehigh);
8517 __put_user(value.mem_unit, &target_value->mem_unit);
8518 unlock_user_struct(target_value, arg1, 1);
8521 break;
8522 #ifdef TARGET_NR_ipc
8523 case TARGET_NR_ipc:
8524 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8525 break;
8526 #endif
8527 #ifdef TARGET_NR_semget
8528 case TARGET_NR_semget:
8529 ret = get_errno(semget(arg1, arg2, arg3));
8530 break;
8531 #endif
8532 #ifdef TARGET_NR_semop
8533 case TARGET_NR_semop:
8534 ret = do_semop(arg1, arg2, arg3);
8535 break;
8536 #endif
8537 #ifdef TARGET_NR_semctl
8538 case TARGET_NR_semctl:
8539 ret = do_semctl(arg1, arg2, arg3, arg4);
8540 break;
8541 #endif
8542 #ifdef TARGET_NR_msgctl
8543 case TARGET_NR_msgctl:
8544 ret = do_msgctl(arg1, arg2, arg3);
8545 break;
8546 #endif
8547 #ifdef TARGET_NR_msgget
8548 case TARGET_NR_msgget:
8549 ret = get_errno(msgget(arg1, arg2));
8550 break;
8551 #endif
8552 #ifdef TARGET_NR_msgrcv
8553 case TARGET_NR_msgrcv:
8554 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8555 break;
8556 #endif
8557 #ifdef TARGET_NR_msgsnd
8558 case TARGET_NR_msgsnd:
8559 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8560 break;
8561 #endif
8562 #ifdef TARGET_NR_shmget
8563 case TARGET_NR_shmget:
8564 ret = get_errno(shmget(arg1, arg2, arg3));
8565 break;
8566 #endif
8567 #ifdef TARGET_NR_shmctl
8568 case TARGET_NR_shmctl:
8569 ret = do_shmctl(arg1, arg2, arg3);
8570 break;
8571 #endif
8572 #ifdef TARGET_NR_shmat
8573 case TARGET_NR_shmat:
8574 ret = do_shmat(arg1, arg2, arg3);
8575 break;
8576 #endif
8577 #ifdef TARGET_NR_shmdt
8578 case TARGET_NR_shmdt:
8579 ret = do_shmdt(arg1);
8580 break;
8581 #endif
8582 case TARGET_NR_fsync:
8583 ret = get_errno(fsync(arg1));
8584 break;
8585 case TARGET_NR_clone:
8586 /* Linux manages to have three different orderings for its
8587 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8588 * match the kernel's CONFIG_CLONE_* settings.
8589 * Microblaze is further special in that it uses a sixth
8590 * implicit argument to clone for the TLS pointer.
8592 #if defined(TARGET_MICROBLAZE)
8593 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8594 #elif defined(TARGET_CLONE_BACKWARDS)
8595 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8596 #elif defined(TARGET_CLONE_BACKWARDS2)
8597 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8598 #else
8599 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8600 #endif
8601 break;
8602 #ifdef __NR_exit_group
8603 /* new thread calls */
8604 case TARGET_NR_exit_group:
8605 #ifdef TARGET_GPROF
8606 _mcleanup();
8607 #endif
8608 gdb_exit(cpu_env, arg1);
8609 ret = get_errno(exit_group(arg1));
8610 break;
8611 #endif
8612 case TARGET_NR_setdomainname:
8613 if (!(p = lock_user_string(arg1)))
8614 goto efault;
8615 ret = get_errno(setdomainname(p, arg2));
8616 unlock_user(p, arg1, 0);
8617 break;
8618 case TARGET_NR_uname:
8619 /* no need to transcode because we use the linux syscall */
8621 struct new_utsname * buf;
8623 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8624 goto efault;
8625 ret = get_errno(sys_uname(buf));
8626 if (!is_error(ret)) {
8627 /* Overrite the native machine name with whatever is being
8628 emulated. */
8629 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8630 /* Allow the user to override the reported release. */
8631 if (qemu_uname_release && *qemu_uname_release)
8632 strcpy (buf->release, qemu_uname_release);
8634 unlock_user_struct(buf, arg1, 1);
8636 break;
8637 #ifdef TARGET_I386
8638 case TARGET_NR_modify_ldt:
8639 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8640 break;
8641 #if !defined(TARGET_X86_64)
8642 case TARGET_NR_vm86old:
8643 goto unimplemented;
8644 case TARGET_NR_vm86:
8645 ret = do_vm86(cpu_env, arg1, arg2);
8646 break;
8647 #endif
8648 #endif
8649 case TARGET_NR_adjtimex:
8650 goto unimplemented;
8651 #ifdef TARGET_NR_create_module
8652 case TARGET_NR_create_module:
8653 #endif
8654 case TARGET_NR_init_module:
8655 case TARGET_NR_delete_module:
8656 #ifdef TARGET_NR_get_kernel_syms
8657 case TARGET_NR_get_kernel_syms:
8658 #endif
8659 goto unimplemented;
8660 case TARGET_NR_quotactl:
8661 goto unimplemented;
8662 case TARGET_NR_getpgid:
8663 ret = get_errno(getpgid(arg1));
8664 break;
8665 case TARGET_NR_fchdir:
8666 ret = get_errno(fchdir(arg1));
8667 break;
8668 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8669 case TARGET_NR_bdflush:
8670 goto unimplemented;
8671 #endif
8672 #ifdef TARGET_NR_sysfs
8673 case TARGET_NR_sysfs:
8674 goto unimplemented;
8675 #endif
8676 case TARGET_NR_personality:
8677 ret = get_errno(personality(arg1));
8678 break;
8679 #ifdef TARGET_NR_afs_syscall
8680 case TARGET_NR_afs_syscall:
8681 goto unimplemented;
8682 #endif
8683 #ifdef TARGET_NR__llseek /* Not on alpha */
8684 case TARGET_NR__llseek:
8686 int64_t res;
8687 #if !defined(__NR_llseek)
8688 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8689 if (res == -1) {
8690 ret = get_errno(res);
8691 } else {
8692 ret = 0;
8694 #else
8695 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8696 #endif
8697 if ((ret == 0) && put_user_s64(res, arg4)) {
8698 goto efault;
8701 break;
8702 #endif
8703 #ifdef TARGET_NR_getdents
8704 case TARGET_NR_getdents:
8705 #ifdef __NR_getdents
8706 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8708 struct target_dirent *target_dirp;
8709 struct linux_dirent *dirp;
8710 abi_long count = arg3;
8712 dirp = g_try_malloc(count);
8713 if (!dirp) {
8714 ret = -TARGET_ENOMEM;
8715 goto fail;
8718 ret = get_errno(sys_getdents(arg1, dirp, count));
8719 if (!is_error(ret)) {
8720 struct linux_dirent *de;
8721 struct target_dirent *tde;
8722 int len = ret;
8723 int reclen, treclen;
8724 int count1, tnamelen;
8726 count1 = 0;
8727 de = dirp;
8728 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8729 goto efault;
8730 tde = target_dirp;
8731 while (len > 0) {
8732 reclen = de->d_reclen;
8733 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8734 assert(tnamelen >= 0);
8735 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8736 assert(count1 + treclen <= count);
8737 tde->d_reclen = tswap16(treclen);
8738 tde->d_ino = tswapal(de->d_ino);
8739 tde->d_off = tswapal(de->d_off);
8740 memcpy(tde->d_name, de->d_name, tnamelen);
8741 de = (struct linux_dirent *)((char *)de + reclen);
8742 len -= reclen;
8743 tde = (struct target_dirent *)((char *)tde + treclen);
8744 count1 += treclen;
8746 ret = count1;
8747 unlock_user(target_dirp, arg2, ret);
8749 g_free(dirp);
8751 #else
8753 struct linux_dirent *dirp;
8754 abi_long count = arg3;
8756 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8757 goto efault;
8758 ret = get_errno(sys_getdents(arg1, dirp, count));
8759 if (!is_error(ret)) {
8760 struct linux_dirent *de;
8761 int len = ret;
8762 int reclen;
8763 de = dirp;
8764 while (len > 0) {
8765 reclen = de->d_reclen;
8766 if (reclen > len)
8767 break;
8768 de->d_reclen = tswap16(reclen);
8769 tswapls(&de->d_ino);
8770 tswapls(&de->d_off);
8771 de = (struct linux_dirent *)((char *)de + reclen);
8772 len -= reclen;
8775 unlock_user(dirp, arg2, ret);
8777 #endif
8778 #else
8779 /* Implement getdents in terms of getdents64 */
8781 struct linux_dirent64 *dirp;
8782 abi_long count = arg3;
8784 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8785 if (!dirp) {
8786 goto efault;
8788 ret = get_errno(sys_getdents64(arg1, dirp, count));
8789 if (!is_error(ret)) {
8790 /* Convert the dirent64 structs to target dirent. We do this
8791 * in-place, since we can guarantee that a target_dirent is no
8792 * larger than a dirent64; however this means we have to be
8793 * careful to read everything before writing in the new format.
8795 struct linux_dirent64 *de;
8796 struct target_dirent *tde;
8797 int len = ret;
8798 int tlen = 0;
8800 de = dirp;
8801 tde = (struct target_dirent *)dirp;
8802 while (len > 0) {
8803 int namelen, treclen;
8804 int reclen = de->d_reclen;
8805 uint64_t ino = de->d_ino;
8806 int64_t off = de->d_off;
8807 uint8_t type = de->d_type;
8809 namelen = strlen(de->d_name);
8810 treclen = offsetof(struct target_dirent, d_name)
8811 + namelen + 2;
8812 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8814 memmove(tde->d_name, de->d_name, namelen + 1);
8815 tde->d_ino = tswapal(ino);
8816 tde->d_off = tswapal(off);
8817 tde->d_reclen = tswap16(treclen);
8818 /* The target_dirent type is in what was formerly a padding
8819 * byte at the end of the structure:
8821 *(((char *)tde) + treclen - 1) = type;
8823 de = (struct linux_dirent64 *)((char *)de + reclen);
8824 tde = (struct target_dirent *)((char *)tde + treclen);
8825 len -= reclen;
8826 tlen += treclen;
8828 ret = tlen;
8830 unlock_user(dirp, arg2, ret);
8832 #endif
8833 break;
8834 #endif /* TARGET_NR_getdents */
8835 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8836 case TARGET_NR_getdents64:
8838 struct linux_dirent64 *dirp;
8839 abi_long count = arg3;
8840 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8841 goto efault;
8842 ret = get_errno(sys_getdents64(arg1, dirp, count));
8843 if (!is_error(ret)) {
8844 struct linux_dirent64 *de;
8845 int len = ret;
8846 int reclen;
8847 de = dirp;
8848 while (len > 0) {
8849 reclen = de->d_reclen;
8850 if (reclen > len)
8851 break;
8852 de->d_reclen = tswap16(reclen);
8853 tswap64s((uint64_t *)&de->d_ino);
8854 tswap64s((uint64_t *)&de->d_off);
8855 de = (struct linux_dirent64 *)((char *)de + reclen);
8856 len -= reclen;
8859 unlock_user(dirp, arg2, ret);
8861 break;
8862 #endif /* TARGET_NR_getdents64 */
8863 #if defined(TARGET_NR__newselect)
8864 case TARGET_NR__newselect:
8865 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8866 break;
8867 #endif
8868 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8869 # ifdef TARGET_NR_poll
8870 case TARGET_NR_poll:
8871 # endif
8872 # ifdef TARGET_NR_ppoll
8873 case TARGET_NR_ppoll:
8874 # endif
8876 struct target_pollfd *target_pfd;
8877 unsigned int nfds = arg2;
8878 int timeout = arg3;
8879 struct pollfd *pfd;
8880 unsigned int i;
8882 pfd = NULL;
8883 target_pfd = NULL;
8884 if (nfds) {
8885 target_pfd = lock_user(VERIFY_WRITE, arg1,
8886 sizeof(struct target_pollfd) * nfds, 1);
8887 if (!target_pfd) {
8888 goto efault;
8891 pfd = alloca(sizeof(struct pollfd) * nfds);
8892 for (i = 0; i < nfds; i++) {
8893 pfd[i].fd = tswap32(target_pfd[i].fd);
8894 pfd[i].events = tswap16(target_pfd[i].events);
8898 # ifdef TARGET_NR_ppoll
8899 if (num == TARGET_NR_ppoll) {
8900 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8901 target_sigset_t *target_set;
8902 sigset_t _set, *set = &_set;
8904 if (arg3) {
8905 if (target_to_host_timespec(timeout_ts, arg3)) {
8906 unlock_user(target_pfd, arg1, 0);
8907 goto efault;
8909 } else {
8910 timeout_ts = NULL;
8913 if (arg4) {
8914 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8915 if (!target_set) {
8916 unlock_user(target_pfd, arg1, 0);
8917 goto efault;
8919 target_to_host_sigset(set, target_set);
8920 } else {
8921 set = NULL;
8924 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts,
8925 set, SIGSET_T_SIZE));
8927 if (!is_error(ret) && arg3) {
8928 host_to_target_timespec(arg3, timeout_ts);
8930 if (arg4) {
8931 unlock_user(target_set, arg4, 0);
8933 } else
8934 # endif
8935 ret = get_errno(poll(pfd, nfds, timeout));
8937 if (!is_error(ret)) {
8938 for(i = 0; i < nfds; i++) {
8939 target_pfd[i].revents = tswap16(pfd[i].revents);
8942 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8944 break;
8945 #endif
8946 case TARGET_NR_flock:
8947 /* NOTE: the flock constant seems to be the same for every
8948 Linux platform */
8949 ret = get_errno(flock(arg1, arg2));
8950 break;
8951 case TARGET_NR_readv:
8953 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8954 if (vec != NULL) {
8955 ret = get_errno(readv(arg1, vec, arg3));
8956 unlock_iovec(vec, arg2, arg3, 1);
8957 } else {
8958 ret = -host_to_target_errno(errno);
8961 break;
8962 case TARGET_NR_writev:
8964 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8965 if (vec != NULL) {
8966 ret = get_errno(writev(arg1, vec, arg3));
8967 unlock_iovec(vec, arg2, arg3, 0);
8968 } else {
8969 ret = -host_to_target_errno(errno);
8972 break;
8973 case TARGET_NR_getsid:
8974 ret = get_errno(getsid(arg1));
8975 break;
8976 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8977 case TARGET_NR_fdatasync:
8978 ret = get_errno(fdatasync(arg1));
8979 break;
8980 #endif
8981 #ifdef TARGET_NR__sysctl
8982 case TARGET_NR__sysctl:
8983 /* We don't implement this, but ENOTDIR is always a safe
8984 return value. */
8985 ret = -TARGET_ENOTDIR;
8986 break;
8987 #endif
8988 case TARGET_NR_sched_getaffinity:
8990 unsigned int mask_size;
8991 unsigned long *mask;
8994 * sched_getaffinity needs multiples of ulong, so need to take
8995 * care of mismatches between target ulong and host ulong sizes.
8997 if (arg2 & (sizeof(abi_ulong) - 1)) {
8998 ret = -TARGET_EINVAL;
8999 break;
9001 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9003 mask = alloca(mask_size);
9004 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9006 if (!is_error(ret)) {
9007 if (ret > arg2) {
9008 /* More data returned than the caller's buffer will fit.
9009 * This only happens if sizeof(abi_long) < sizeof(long)
9010 * and the caller passed us a buffer holding an odd number
9011 * of abi_longs. If the host kernel is actually using the
9012 * extra 4 bytes then fail EINVAL; otherwise we can just
9013 * ignore them and only copy the interesting part.
9015 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9016 if (numcpus > arg2 * 8) {
9017 ret = -TARGET_EINVAL;
9018 break;
9020 ret = arg2;
9023 if (copy_to_user(arg3, mask, ret)) {
9024 goto efault;
9028 break;
9029 case TARGET_NR_sched_setaffinity:
9031 unsigned int mask_size;
9032 unsigned long *mask;
9035 * sched_setaffinity needs multiples of ulong, so need to take
9036 * care of mismatches between target ulong and host ulong sizes.
9038 if (arg2 & (sizeof(abi_ulong) - 1)) {
9039 ret = -TARGET_EINVAL;
9040 break;
9042 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9044 mask = alloca(mask_size);
9045 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9046 goto efault;
9048 memcpy(mask, p, arg2);
9049 unlock_user_struct(p, arg2, 0);
9051 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9053 break;
9054 case TARGET_NR_sched_setparam:
9056 struct sched_param *target_schp;
9057 struct sched_param schp;
9059 if (arg2 == 0) {
9060 return -TARGET_EINVAL;
9062 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9063 goto efault;
9064 schp.sched_priority = tswap32(target_schp->sched_priority);
9065 unlock_user_struct(target_schp, arg2, 0);
9066 ret = get_errno(sched_setparam(arg1, &schp));
9068 break;
9069 case TARGET_NR_sched_getparam:
9071 struct sched_param *target_schp;
9072 struct sched_param schp;
9074 if (arg2 == 0) {
9075 return -TARGET_EINVAL;
9077 ret = get_errno(sched_getparam(arg1, &schp));
9078 if (!is_error(ret)) {
9079 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9080 goto efault;
9081 target_schp->sched_priority = tswap32(schp.sched_priority);
9082 unlock_user_struct(target_schp, arg2, 1);
9085 break;
9086 case TARGET_NR_sched_setscheduler:
9088 struct sched_param *target_schp;
9089 struct sched_param schp;
9090 if (arg3 == 0) {
9091 return -TARGET_EINVAL;
9093 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9094 goto efault;
9095 schp.sched_priority = tswap32(target_schp->sched_priority);
9096 unlock_user_struct(target_schp, arg3, 0);
9097 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9099 break;
9100 case TARGET_NR_sched_getscheduler:
9101 ret = get_errno(sched_getscheduler(arg1));
9102 break;
9103 case TARGET_NR_sched_yield:
9104 ret = get_errno(sched_yield());
9105 break;
9106 case TARGET_NR_sched_get_priority_max:
9107 ret = get_errno(sched_get_priority_max(arg1));
9108 break;
9109 case TARGET_NR_sched_get_priority_min:
9110 ret = get_errno(sched_get_priority_min(arg1));
9111 break;
9112 case TARGET_NR_sched_rr_get_interval:
9114 struct timespec ts;
9115 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9116 if (!is_error(ret)) {
9117 ret = host_to_target_timespec(arg2, &ts);
9120 break;
9121 case TARGET_NR_nanosleep:
9123 struct timespec req, rem;
9124 target_to_host_timespec(&req, arg1);
9125 ret = get_errno(nanosleep(&req, &rem));
9126 if (is_error(ret) && arg2) {
9127 host_to_target_timespec(arg2, &rem);
9130 break;
9131 #ifdef TARGET_NR_query_module
9132 case TARGET_NR_query_module:
9133 goto unimplemented;
9134 #endif
9135 #ifdef TARGET_NR_nfsservctl
9136 case TARGET_NR_nfsservctl:
9137 goto unimplemented;
9138 #endif
9139 case TARGET_NR_prctl:
9140 switch (arg1) {
9141 case PR_GET_PDEATHSIG:
9143 int deathsig;
9144 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9145 if (!is_error(ret) && arg2
9146 && put_user_ual(deathsig, arg2)) {
9147 goto efault;
9149 break;
9151 #ifdef PR_GET_NAME
9152 case PR_GET_NAME:
9154 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9155 if (!name) {
9156 goto efault;
9158 ret = get_errno(prctl(arg1, (unsigned long)name,
9159 arg3, arg4, arg5));
9160 unlock_user(name, arg2, 16);
9161 break;
9163 case PR_SET_NAME:
9165 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9166 if (!name) {
9167 goto efault;
9169 ret = get_errno(prctl(arg1, (unsigned long)name,
9170 arg3, arg4, arg5));
9171 unlock_user(name, arg2, 0);
9172 break;
9174 #endif
9175 default:
9176 /* Most prctl options have no pointer arguments */
9177 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9178 break;
9180 break;
9181 #ifdef TARGET_NR_arch_prctl
9182 case TARGET_NR_arch_prctl:
9183 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9184 ret = do_arch_prctl(cpu_env, arg1, arg2);
9185 break;
9186 #else
9187 goto unimplemented;
9188 #endif
9189 #endif
9190 #ifdef TARGET_NR_pread64
9191 case TARGET_NR_pread64:
9192 if (regpairs_aligned(cpu_env)) {
9193 arg4 = arg5;
9194 arg5 = arg6;
9196 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9197 goto efault;
9198 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9199 unlock_user(p, arg2, ret);
9200 break;
9201 case TARGET_NR_pwrite64:
9202 if (regpairs_aligned(cpu_env)) {
9203 arg4 = arg5;
9204 arg5 = arg6;
9206 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9207 goto efault;
9208 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9209 unlock_user(p, arg2, 0);
9210 break;
9211 #endif
9212 case TARGET_NR_getcwd:
9213 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9214 goto efault;
9215 ret = get_errno(sys_getcwd1(p, arg2));
9216 unlock_user(p, arg1, ret);
9217 break;
9218 case TARGET_NR_capget:
9219 case TARGET_NR_capset:
9221 struct target_user_cap_header *target_header;
9222 struct target_user_cap_data *target_data = NULL;
9223 struct __user_cap_header_struct header;
9224 struct __user_cap_data_struct data[2];
9225 struct __user_cap_data_struct *dataptr = NULL;
9226 int i, target_datalen;
9227 int data_items = 1;
9229 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9230 goto efault;
9232 header.version = tswap32(target_header->version);
9233 header.pid = tswap32(target_header->pid);
9235 if (header.version != _LINUX_CAPABILITY_VERSION) {
9236 /* Version 2 and up takes pointer to two user_data structs */
9237 data_items = 2;
9240 target_datalen = sizeof(*target_data) * data_items;
9242 if (arg2) {
9243 if (num == TARGET_NR_capget) {
9244 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9245 } else {
9246 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9248 if (!target_data) {
9249 unlock_user_struct(target_header, arg1, 0);
9250 goto efault;
9253 if (num == TARGET_NR_capset) {
9254 for (i = 0; i < data_items; i++) {
9255 data[i].effective = tswap32(target_data[i].effective);
9256 data[i].permitted = tswap32(target_data[i].permitted);
9257 data[i].inheritable = tswap32(target_data[i].inheritable);
9261 dataptr = data;
9264 if (num == TARGET_NR_capget) {
9265 ret = get_errno(capget(&header, dataptr));
9266 } else {
9267 ret = get_errno(capset(&header, dataptr));
9270 /* The kernel always updates version for both capget and capset */
9271 target_header->version = tswap32(header.version);
9272 unlock_user_struct(target_header, arg1, 1);
9274 if (arg2) {
9275 if (num == TARGET_NR_capget) {
9276 for (i = 0; i < data_items; i++) {
9277 target_data[i].effective = tswap32(data[i].effective);
9278 target_data[i].permitted = tswap32(data[i].permitted);
9279 target_data[i].inheritable = tswap32(data[i].inheritable);
9281 unlock_user(target_data, arg2, target_datalen);
9282 } else {
9283 unlock_user(target_data, arg2, 0);
9286 break;
9288 case TARGET_NR_sigaltstack:
9289 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9290 break;
9292 #ifdef CONFIG_SENDFILE
9293 case TARGET_NR_sendfile:
9295 off_t *offp = NULL;
9296 off_t off;
9297 if (arg3) {
9298 ret = get_user_sal(off, arg3);
9299 if (is_error(ret)) {
9300 break;
9302 offp = &off;
9304 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9305 if (!is_error(ret) && arg3) {
9306 abi_long ret2 = put_user_sal(off, arg3);
9307 if (is_error(ret2)) {
9308 ret = ret2;
9311 break;
9313 #ifdef TARGET_NR_sendfile64
9314 case TARGET_NR_sendfile64:
9316 off_t *offp = NULL;
9317 off_t off;
9318 if (arg3) {
9319 ret = get_user_s64(off, arg3);
9320 if (is_error(ret)) {
9321 break;
9323 offp = &off;
9325 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9326 if (!is_error(ret) && arg3) {
9327 abi_long ret2 = put_user_s64(off, arg3);
9328 if (is_error(ret2)) {
9329 ret = ret2;
9332 break;
9334 #endif
9335 #else
9336 case TARGET_NR_sendfile:
9337 #ifdef TARGET_NR_sendfile64
9338 case TARGET_NR_sendfile64:
9339 #endif
9340 goto unimplemented;
9341 #endif
9343 #ifdef TARGET_NR_getpmsg
9344 case TARGET_NR_getpmsg:
9345 goto unimplemented;
9346 #endif
9347 #ifdef TARGET_NR_putpmsg
9348 case TARGET_NR_putpmsg:
9349 goto unimplemented;
9350 #endif
9351 #ifdef TARGET_NR_vfork
9352 case TARGET_NR_vfork:
9353 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9354 0, 0, 0, 0));
9355 break;
9356 #endif
9357 #ifdef TARGET_NR_ugetrlimit
9358 case TARGET_NR_ugetrlimit:
9360 struct rlimit rlim;
9361 int resource = target_to_host_resource(arg1);
9362 ret = get_errno(getrlimit(resource, &rlim));
9363 if (!is_error(ret)) {
9364 struct target_rlimit *target_rlim;
9365 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9366 goto efault;
9367 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9368 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9369 unlock_user_struct(target_rlim, arg2, 1);
9371 break;
9373 #endif
9374 #ifdef TARGET_NR_truncate64
9375 case TARGET_NR_truncate64:
9376 if (!(p = lock_user_string(arg1)))
9377 goto efault;
9378 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9379 unlock_user(p, arg1, 0);
9380 break;
9381 #endif
9382 #ifdef TARGET_NR_ftruncate64
9383 case TARGET_NR_ftruncate64:
9384 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9385 break;
9386 #endif
9387 #ifdef TARGET_NR_stat64
9388 case TARGET_NR_stat64:
9389 if (!(p = lock_user_string(arg1)))
9390 goto efault;
9391 ret = get_errno(stat(path(p), &st));
9392 unlock_user(p, arg1, 0);
9393 if (!is_error(ret))
9394 ret = host_to_target_stat64(cpu_env, arg2, &st);
9395 break;
9396 #endif
9397 #ifdef TARGET_NR_lstat64
9398 case TARGET_NR_lstat64:
9399 if (!(p = lock_user_string(arg1)))
9400 goto efault;
9401 ret = get_errno(lstat(path(p), &st));
9402 unlock_user(p, arg1, 0);
9403 if (!is_error(ret))
9404 ret = host_to_target_stat64(cpu_env, arg2, &st);
9405 break;
9406 #endif
9407 #ifdef TARGET_NR_fstat64
9408 case TARGET_NR_fstat64:
9409 ret = get_errno(fstat(arg1, &st));
9410 if (!is_error(ret))
9411 ret = host_to_target_stat64(cpu_env, arg2, &st);
9412 break;
9413 #endif
9414 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9415 #ifdef TARGET_NR_fstatat64
9416 case TARGET_NR_fstatat64:
9417 #endif
9418 #ifdef TARGET_NR_newfstatat
9419 case TARGET_NR_newfstatat:
9420 #endif
9421 if (!(p = lock_user_string(arg2)))
9422 goto efault;
9423 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9424 if (!is_error(ret))
9425 ret = host_to_target_stat64(cpu_env, arg3, &st);
9426 break;
9427 #endif
9428 #ifdef TARGET_NR_lchown
9429 case TARGET_NR_lchown:
9430 if (!(p = lock_user_string(arg1)))
9431 goto efault;
9432 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9433 unlock_user(p, arg1, 0);
9434 break;
9435 #endif
9436 #ifdef TARGET_NR_getuid
9437 case TARGET_NR_getuid:
9438 ret = get_errno(high2lowuid(getuid()));
9439 break;
9440 #endif
9441 #ifdef TARGET_NR_getgid
9442 case TARGET_NR_getgid:
9443 ret = get_errno(high2lowgid(getgid()));
9444 break;
9445 #endif
9446 #ifdef TARGET_NR_geteuid
9447 case TARGET_NR_geteuid:
9448 ret = get_errno(high2lowuid(geteuid()));
9449 break;
9450 #endif
9451 #ifdef TARGET_NR_getegid
9452 case TARGET_NR_getegid:
9453 ret = get_errno(high2lowgid(getegid()));
9454 break;
9455 #endif
9456 case TARGET_NR_setreuid:
9457 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9458 break;
9459 case TARGET_NR_setregid:
9460 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9461 break;
9462 case TARGET_NR_getgroups:
9464 int gidsetsize = arg1;
9465 target_id *target_grouplist;
9466 gid_t *grouplist;
9467 int i;
9469 grouplist = alloca(gidsetsize * sizeof(gid_t));
9470 ret = get_errno(getgroups(gidsetsize, grouplist));
9471 if (gidsetsize == 0)
9472 break;
9473 if (!is_error(ret)) {
9474 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9475 if (!target_grouplist)
9476 goto efault;
9477 for(i = 0;i < ret; i++)
9478 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9479 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9482 break;
9483 case TARGET_NR_setgroups:
9485 int gidsetsize = arg1;
9486 target_id *target_grouplist;
9487 gid_t *grouplist = NULL;
9488 int i;
9489 if (gidsetsize) {
9490 grouplist = alloca(gidsetsize * sizeof(gid_t));
9491 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9492 if (!target_grouplist) {
9493 ret = -TARGET_EFAULT;
9494 goto fail;
9496 for (i = 0; i < gidsetsize; i++) {
9497 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9499 unlock_user(target_grouplist, arg2, 0);
9501 ret = get_errno(setgroups(gidsetsize, grouplist));
9503 break;
9504 case TARGET_NR_fchown:
9505 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9506 break;
9507 #if defined(TARGET_NR_fchownat)
9508 case TARGET_NR_fchownat:
9509 if (!(p = lock_user_string(arg2)))
9510 goto efault;
9511 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9512 low2highgid(arg4), arg5));
9513 unlock_user(p, arg2, 0);
9514 break;
9515 #endif
9516 #ifdef TARGET_NR_setresuid
9517 case TARGET_NR_setresuid:
9518 ret = get_errno(sys_setresuid(low2highuid(arg1),
9519 low2highuid(arg2),
9520 low2highuid(arg3)));
9521 break;
9522 #endif
9523 #ifdef TARGET_NR_getresuid
9524 case TARGET_NR_getresuid:
9526 uid_t ruid, euid, suid;
9527 ret = get_errno(getresuid(&ruid, &euid, &suid));
9528 if (!is_error(ret)) {
9529 if (put_user_id(high2lowuid(ruid), arg1)
9530 || put_user_id(high2lowuid(euid), arg2)
9531 || put_user_id(high2lowuid(suid), arg3))
9532 goto efault;
9535 break;
9536 #endif
9537 #ifdef TARGET_NR_getresgid
9538 case TARGET_NR_setresgid:
9539 ret = get_errno(sys_setresgid(low2highgid(arg1),
9540 low2highgid(arg2),
9541 low2highgid(arg3)));
9542 break;
9543 #endif
9544 #ifdef TARGET_NR_getresgid
9545 case TARGET_NR_getresgid:
9547 gid_t rgid, egid, sgid;
9548 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9549 if (!is_error(ret)) {
9550 if (put_user_id(high2lowgid(rgid), arg1)
9551 || put_user_id(high2lowgid(egid), arg2)
9552 || put_user_id(high2lowgid(sgid), arg3))
9553 goto efault;
9556 break;
9557 #endif
9558 #ifdef TARGET_NR_chown
9559 case TARGET_NR_chown:
9560 if (!(p = lock_user_string(arg1)))
9561 goto efault;
9562 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9563 unlock_user(p, arg1, 0);
9564 break;
9565 #endif
9566 case TARGET_NR_setuid:
9567 ret = get_errno(sys_setuid(low2highuid(arg1)));
9568 break;
9569 case TARGET_NR_setgid:
9570 ret = get_errno(sys_setgid(low2highgid(arg1)));
9571 break;
9572 case TARGET_NR_setfsuid:
9573 ret = get_errno(setfsuid(arg1));
9574 break;
9575 case TARGET_NR_setfsgid:
9576 ret = get_errno(setfsgid(arg1));
9577 break;
9579 #ifdef TARGET_NR_lchown32
9580 case TARGET_NR_lchown32:
9581 if (!(p = lock_user_string(arg1)))
9582 goto efault;
9583 ret = get_errno(lchown(p, arg2, arg3));
9584 unlock_user(p, arg1, 0);
9585 break;
9586 #endif
9587 #ifdef TARGET_NR_getuid32
9588 case TARGET_NR_getuid32:
9589 ret = get_errno(getuid());
9590 break;
9591 #endif
9593 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9594 /* Alpha specific */
9595 case TARGET_NR_getxuid:
9597 uid_t euid;
9598 euid=geteuid();
9599 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9601 ret = get_errno(getuid());
9602 break;
9603 #endif
9604 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9605 /* Alpha specific */
9606 case TARGET_NR_getxgid:
9608 uid_t egid;
9609 egid=getegid();
9610 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9612 ret = get_errno(getgid());
9613 break;
9614 #endif
9615 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9616 /* Alpha specific */
9617 case TARGET_NR_osf_getsysinfo:
9618 ret = -TARGET_EOPNOTSUPP;
9619 switch (arg1) {
9620 case TARGET_GSI_IEEE_FP_CONTROL:
9622 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9624 /* Copied from linux ieee_fpcr_to_swcr. */
9625 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9626 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9627 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9628 | SWCR_TRAP_ENABLE_DZE
9629 | SWCR_TRAP_ENABLE_OVF);
9630 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9631 | SWCR_TRAP_ENABLE_INE);
9632 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9633 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9635 if (put_user_u64 (swcr, arg2))
9636 goto efault;
9637 ret = 0;
9639 break;
9641 /* case GSI_IEEE_STATE_AT_SIGNAL:
9642 -- Not implemented in linux kernel.
9643 case GSI_UACPROC:
9644 -- Retrieves current unaligned access state; not much used.
9645 case GSI_PROC_TYPE:
9646 -- Retrieves implver information; surely not used.
9647 case GSI_GET_HWRPB:
9648 -- Grabs a copy of the HWRPB; surely not used.
9651 break;
9652 #endif
9653 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9654 /* Alpha specific */
9655 case TARGET_NR_osf_setsysinfo:
9656 ret = -TARGET_EOPNOTSUPP;
9657 switch (arg1) {
9658 case TARGET_SSI_IEEE_FP_CONTROL:
9660 uint64_t swcr, fpcr, orig_fpcr;
9662 if (get_user_u64 (swcr, arg2)) {
9663 goto efault;
9665 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9666 fpcr = orig_fpcr & FPCR_DYN_MASK;
9668 /* Copied from linux ieee_swcr_to_fpcr. */
9669 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9670 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9671 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9672 | SWCR_TRAP_ENABLE_DZE
9673 | SWCR_TRAP_ENABLE_OVF)) << 48;
9674 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9675 | SWCR_TRAP_ENABLE_INE)) << 57;
9676 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9677 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9679 cpu_alpha_store_fpcr(cpu_env, fpcr);
9680 ret = 0;
9682 break;
9684 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9686 uint64_t exc, fpcr, orig_fpcr;
9687 int si_code;
9689 if (get_user_u64(exc, arg2)) {
9690 goto efault;
9693 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9695 /* We only add to the exception status here. */
9696 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9698 cpu_alpha_store_fpcr(cpu_env, fpcr);
9699 ret = 0;
9701 /* Old exceptions are not signaled. */
9702 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9704 /* If any exceptions set by this call,
9705 and are unmasked, send a signal. */
9706 si_code = 0;
9707 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9708 si_code = TARGET_FPE_FLTRES;
9710 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9711 si_code = TARGET_FPE_FLTUND;
9713 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9714 si_code = TARGET_FPE_FLTOVF;
9716 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9717 si_code = TARGET_FPE_FLTDIV;
9719 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9720 si_code = TARGET_FPE_FLTINV;
9722 if (si_code != 0) {
9723 target_siginfo_t info;
9724 info.si_signo = SIGFPE;
9725 info.si_errno = 0;
9726 info.si_code = si_code;
9727 info._sifields._sigfault._addr
9728 = ((CPUArchState *)cpu_env)->pc;
9729 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9732 break;
9734 /* case SSI_NVPAIRS:
9735 -- Used with SSIN_UACPROC to enable unaligned accesses.
9736 case SSI_IEEE_STATE_AT_SIGNAL:
9737 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9738 -- Not implemented in linux kernel
9741 break;
9742 #endif
9743 #ifdef TARGET_NR_osf_sigprocmask
9744 /* Alpha specific. */
9745 case TARGET_NR_osf_sigprocmask:
9747 abi_ulong mask;
9748 int how;
9749 sigset_t set, oldset;
9751 switch(arg1) {
9752 case TARGET_SIG_BLOCK:
9753 how = SIG_BLOCK;
9754 break;
9755 case TARGET_SIG_UNBLOCK:
9756 how = SIG_UNBLOCK;
9757 break;
9758 case TARGET_SIG_SETMASK:
9759 how = SIG_SETMASK;
9760 break;
9761 default:
9762 ret = -TARGET_EINVAL;
9763 goto fail;
9765 mask = arg2;
9766 target_to_host_old_sigset(&set, &mask);
9767 do_sigprocmask(how, &set, &oldset);
9768 host_to_target_old_sigset(&mask, &oldset);
9769 ret = mask;
9771 break;
9772 #endif
9774 #ifdef TARGET_NR_getgid32
9775 case TARGET_NR_getgid32:
9776 ret = get_errno(getgid());
9777 break;
9778 #endif
9779 #ifdef TARGET_NR_geteuid32
9780 case TARGET_NR_geteuid32:
9781 ret = get_errno(geteuid());
9782 break;
9783 #endif
9784 #ifdef TARGET_NR_getegid32
9785 case TARGET_NR_getegid32:
9786 ret = get_errno(getegid());
9787 break;
9788 #endif
9789 #ifdef TARGET_NR_setreuid32
9790 case TARGET_NR_setreuid32:
9791 ret = get_errno(setreuid(arg1, arg2));
9792 break;
9793 #endif
9794 #ifdef TARGET_NR_setregid32
9795 case TARGET_NR_setregid32:
9796 ret = get_errno(setregid(arg1, arg2));
9797 break;
9798 #endif
9799 #ifdef TARGET_NR_getgroups32
9800 case TARGET_NR_getgroups32:
9802 int gidsetsize = arg1;
9803 uint32_t *target_grouplist;
9804 gid_t *grouplist;
9805 int i;
9807 grouplist = alloca(gidsetsize * sizeof(gid_t));
9808 ret = get_errno(getgroups(gidsetsize, grouplist));
9809 if (gidsetsize == 0)
9810 break;
9811 if (!is_error(ret)) {
9812 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9813 if (!target_grouplist) {
9814 ret = -TARGET_EFAULT;
9815 goto fail;
9817 for(i = 0;i < ret; i++)
9818 target_grouplist[i] = tswap32(grouplist[i]);
9819 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9822 break;
9823 #endif
9824 #ifdef TARGET_NR_setgroups32
9825 case TARGET_NR_setgroups32:
9827 int gidsetsize = arg1;
9828 uint32_t *target_grouplist;
9829 gid_t *grouplist;
9830 int i;
9832 grouplist = alloca(gidsetsize * sizeof(gid_t));
9833 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9834 if (!target_grouplist) {
9835 ret = -TARGET_EFAULT;
9836 goto fail;
9838 for(i = 0;i < gidsetsize; i++)
9839 grouplist[i] = tswap32(target_grouplist[i]);
9840 unlock_user(target_grouplist, arg2, 0);
9841 ret = get_errno(setgroups(gidsetsize, grouplist));
9843 break;
9844 #endif
9845 #ifdef TARGET_NR_fchown32
9846 case TARGET_NR_fchown32:
9847 ret = get_errno(fchown(arg1, arg2, arg3));
9848 break;
9849 #endif
9850 #ifdef TARGET_NR_setresuid32
9851 case TARGET_NR_setresuid32:
9852 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9853 break;
9854 #endif
9855 #ifdef TARGET_NR_getresuid32
9856 case TARGET_NR_getresuid32:
9858 uid_t ruid, euid, suid;
9859 ret = get_errno(getresuid(&ruid, &euid, &suid));
9860 if (!is_error(ret)) {
9861 if (put_user_u32(ruid, arg1)
9862 || put_user_u32(euid, arg2)
9863 || put_user_u32(suid, arg3))
9864 goto efault;
9867 break;
9868 #endif
9869 #ifdef TARGET_NR_setresgid32
9870 case TARGET_NR_setresgid32:
9871 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9872 break;
9873 #endif
9874 #ifdef TARGET_NR_getresgid32
9875 case TARGET_NR_getresgid32:
9877 gid_t rgid, egid, sgid;
9878 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9879 if (!is_error(ret)) {
9880 if (put_user_u32(rgid, arg1)
9881 || put_user_u32(egid, arg2)
9882 || put_user_u32(sgid, arg3))
9883 goto efault;
9886 break;
9887 #endif
9888 #ifdef TARGET_NR_chown32
9889 case TARGET_NR_chown32:
9890 if (!(p = lock_user_string(arg1)))
9891 goto efault;
9892 ret = get_errno(chown(p, arg2, arg3));
9893 unlock_user(p, arg1, 0);
9894 break;
9895 #endif
9896 #ifdef TARGET_NR_setuid32
9897 case TARGET_NR_setuid32:
9898 ret = get_errno(sys_setuid(arg1));
9899 break;
9900 #endif
9901 #ifdef TARGET_NR_setgid32
9902 case TARGET_NR_setgid32:
9903 ret = get_errno(sys_setgid(arg1));
9904 break;
9905 #endif
9906 #ifdef TARGET_NR_setfsuid32
9907 case TARGET_NR_setfsuid32:
9908 ret = get_errno(setfsuid(arg1));
9909 break;
9910 #endif
9911 #ifdef TARGET_NR_setfsgid32
9912 case TARGET_NR_setfsgid32:
9913 ret = get_errno(setfsgid(arg1));
9914 break;
9915 #endif
9917 case TARGET_NR_pivot_root:
9918 goto unimplemented;
9919 #ifdef TARGET_NR_mincore
9920 case TARGET_NR_mincore:
9922 void *a;
9923 ret = -TARGET_EFAULT;
9924 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9925 goto efault;
9926 if (!(p = lock_user_string(arg3)))
9927 goto mincore_fail;
9928 ret = get_errno(mincore(a, arg2, p));
9929 unlock_user(p, arg3, ret);
9930 mincore_fail:
9931 unlock_user(a, arg1, 0);
9933 break;
9934 #endif
9935 #ifdef TARGET_NR_arm_fadvise64_64
9936 case TARGET_NR_arm_fadvise64_64:
9939 * arm_fadvise64_64 looks like fadvise64_64 but
9940 * with different argument order
9942 abi_long temp;
9943 temp = arg3;
9944 arg3 = arg4;
9945 arg4 = temp;
9947 #endif
9948 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9949 #ifdef TARGET_NR_fadvise64_64
9950 case TARGET_NR_fadvise64_64:
9951 #endif
9952 #ifdef TARGET_NR_fadvise64
9953 case TARGET_NR_fadvise64:
9954 #endif
9955 #ifdef TARGET_S390X
9956 switch (arg4) {
9957 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9958 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9959 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9960 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9961 default: break;
9963 #endif
9964 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9965 break;
9966 #endif
9967 #ifdef TARGET_NR_madvise
9968 case TARGET_NR_madvise:
9969 /* A straight passthrough may not be safe because qemu sometimes
9970 turns private file-backed mappings into anonymous mappings.
9971 This will break MADV_DONTNEED.
9972 This is a hint, so ignoring and returning success is ok. */
9973 ret = get_errno(0);
9974 break;
9975 #endif
9976 #if TARGET_ABI_BITS == 32
9977 case TARGET_NR_fcntl64:
9979 int cmd;
9980 struct flock64 fl;
9981 struct target_flock64 *target_fl;
9982 #ifdef TARGET_ARM
9983 struct target_eabi_flock64 *target_efl;
9984 #endif
9986 cmd = target_to_host_fcntl_cmd(arg2);
9987 if (cmd == -TARGET_EINVAL) {
9988 ret = cmd;
9989 break;
9992 switch(arg2) {
9993 case TARGET_F_GETLK64:
9994 #ifdef TARGET_ARM
9995 if (((CPUARMState *)cpu_env)->eabi) {
9996 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9997 goto efault;
9998 fl.l_type = tswap16(target_efl->l_type);
9999 fl.l_whence = tswap16(target_efl->l_whence);
10000 fl.l_start = tswap64(target_efl->l_start);
10001 fl.l_len = tswap64(target_efl->l_len);
10002 fl.l_pid = tswap32(target_efl->l_pid);
10003 unlock_user_struct(target_efl, arg3, 0);
10004 } else
10005 #endif
10007 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10008 goto efault;
10009 fl.l_type = tswap16(target_fl->l_type);
10010 fl.l_whence = tswap16(target_fl->l_whence);
10011 fl.l_start = tswap64(target_fl->l_start);
10012 fl.l_len = tswap64(target_fl->l_len);
10013 fl.l_pid = tswap32(target_fl->l_pid);
10014 unlock_user_struct(target_fl, arg3, 0);
10016 ret = get_errno(fcntl(arg1, cmd, &fl));
10017 if (ret == 0) {
10018 #ifdef TARGET_ARM
10019 if (((CPUARMState *)cpu_env)->eabi) {
10020 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
10021 goto efault;
10022 target_efl->l_type = tswap16(fl.l_type);
10023 target_efl->l_whence = tswap16(fl.l_whence);
10024 target_efl->l_start = tswap64(fl.l_start);
10025 target_efl->l_len = tswap64(fl.l_len);
10026 target_efl->l_pid = tswap32(fl.l_pid);
10027 unlock_user_struct(target_efl, arg3, 1);
10028 } else
10029 #endif
10031 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
10032 goto efault;
10033 target_fl->l_type = tswap16(fl.l_type);
10034 target_fl->l_whence = tswap16(fl.l_whence);
10035 target_fl->l_start = tswap64(fl.l_start);
10036 target_fl->l_len = tswap64(fl.l_len);
10037 target_fl->l_pid = tswap32(fl.l_pid);
10038 unlock_user_struct(target_fl, arg3, 1);
10041 break;
10043 case TARGET_F_SETLK64:
10044 case TARGET_F_SETLKW64:
10045 #ifdef TARGET_ARM
10046 if (((CPUARMState *)cpu_env)->eabi) {
10047 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10048 goto efault;
10049 fl.l_type = tswap16(target_efl->l_type);
10050 fl.l_whence = tswap16(target_efl->l_whence);
10051 fl.l_start = tswap64(target_efl->l_start);
10052 fl.l_len = tswap64(target_efl->l_len);
10053 fl.l_pid = tswap32(target_efl->l_pid);
10054 unlock_user_struct(target_efl, arg3, 0);
10055 } else
10056 #endif
10058 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10059 goto efault;
10060 fl.l_type = tswap16(target_fl->l_type);
10061 fl.l_whence = tswap16(target_fl->l_whence);
10062 fl.l_start = tswap64(target_fl->l_start);
10063 fl.l_len = tswap64(target_fl->l_len);
10064 fl.l_pid = tswap32(target_fl->l_pid);
10065 unlock_user_struct(target_fl, arg3, 0);
10067 ret = get_errno(fcntl(arg1, cmd, &fl));
10068 break;
10069 default:
10070 ret = do_fcntl(arg1, arg2, arg3);
10071 break;
10073 break;
10075 #endif
10076 #ifdef TARGET_NR_cacheflush
10077 case TARGET_NR_cacheflush:
10078 /* self-modifying code is handled automatically, so nothing needed */
10079 ret = 0;
10080 break;
10081 #endif
10082 #ifdef TARGET_NR_security
10083 case TARGET_NR_security:
10084 goto unimplemented;
10085 #endif
10086 #ifdef TARGET_NR_getpagesize
10087 case TARGET_NR_getpagesize:
10088 ret = TARGET_PAGE_SIZE;
10089 break;
10090 #endif
10091 case TARGET_NR_gettid:
10092 ret = get_errno(gettid());
10093 break;
10094 #ifdef TARGET_NR_readahead
10095 case TARGET_NR_readahead:
10096 #if TARGET_ABI_BITS == 32
10097 if (regpairs_aligned(cpu_env)) {
10098 arg2 = arg3;
10099 arg3 = arg4;
10100 arg4 = arg5;
10102 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10103 #else
10104 ret = get_errno(readahead(arg1, arg2, arg3));
10105 #endif
10106 break;
10107 #endif
10108 #ifdef CONFIG_ATTR
10109 #ifdef TARGET_NR_setxattr
10110 case TARGET_NR_listxattr:
10111 case TARGET_NR_llistxattr:
10113 void *p, *b = 0;
10114 if (arg2) {
10115 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10116 if (!b) {
10117 ret = -TARGET_EFAULT;
10118 break;
10121 p = lock_user_string(arg1);
10122 if (p) {
10123 if (num == TARGET_NR_listxattr) {
10124 ret = get_errno(listxattr(p, b, arg3));
10125 } else {
10126 ret = get_errno(llistxattr(p, b, arg3));
10128 } else {
10129 ret = -TARGET_EFAULT;
10131 unlock_user(p, arg1, 0);
10132 unlock_user(b, arg2, arg3);
10133 break;
10135 case TARGET_NR_flistxattr:
10137 void *b = 0;
10138 if (arg2) {
10139 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10140 if (!b) {
10141 ret = -TARGET_EFAULT;
10142 break;
10145 ret = get_errno(flistxattr(arg1, b, arg3));
10146 unlock_user(b, arg2, arg3);
10147 break;
10149 case TARGET_NR_setxattr:
10150 case TARGET_NR_lsetxattr:
10152 void *p, *n, *v = 0;
10153 if (arg3) {
10154 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10155 if (!v) {
10156 ret = -TARGET_EFAULT;
10157 break;
10160 p = lock_user_string(arg1);
10161 n = lock_user_string(arg2);
10162 if (p && n) {
10163 if (num == TARGET_NR_setxattr) {
10164 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10165 } else {
10166 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10168 } else {
10169 ret = -TARGET_EFAULT;
10171 unlock_user(p, arg1, 0);
10172 unlock_user(n, arg2, 0);
10173 unlock_user(v, arg3, 0);
10175 break;
10176 case TARGET_NR_fsetxattr:
10178 void *n, *v = 0;
10179 if (arg3) {
10180 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10181 if (!v) {
10182 ret = -TARGET_EFAULT;
10183 break;
10186 n = lock_user_string(arg2);
10187 if (n) {
10188 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10189 } else {
10190 ret = -TARGET_EFAULT;
10192 unlock_user(n, arg2, 0);
10193 unlock_user(v, arg3, 0);
10195 break;
10196 case TARGET_NR_getxattr:
10197 case TARGET_NR_lgetxattr:
10199 void *p, *n, *v = 0;
10200 if (arg3) {
10201 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10202 if (!v) {
10203 ret = -TARGET_EFAULT;
10204 break;
10207 p = lock_user_string(arg1);
10208 n = lock_user_string(arg2);
10209 if (p && n) {
10210 if (num == TARGET_NR_getxattr) {
10211 ret = get_errno(getxattr(p, n, v, arg4));
10212 } else {
10213 ret = get_errno(lgetxattr(p, n, v, arg4));
10215 } else {
10216 ret = -TARGET_EFAULT;
10218 unlock_user(p, arg1, 0);
10219 unlock_user(n, arg2, 0);
10220 unlock_user(v, arg3, arg4);
10222 break;
10223 case TARGET_NR_fgetxattr:
10225 void *n, *v = 0;
10226 if (arg3) {
10227 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10228 if (!v) {
10229 ret = -TARGET_EFAULT;
10230 break;
10233 n = lock_user_string(arg2);
10234 if (n) {
10235 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10236 } else {
10237 ret = -TARGET_EFAULT;
10239 unlock_user(n, arg2, 0);
10240 unlock_user(v, arg3, arg4);
10242 break;
10243 case TARGET_NR_removexattr:
10244 case TARGET_NR_lremovexattr:
10246 void *p, *n;
10247 p = lock_user_string(arg1);
10248 n = lock_user_string(arg2);
10249 if (p && n) {
10250 if (num == TARGET_NR_removexattr) {
10251 ret = get_errno(removexattr(p, n));
10252 } else {
10253 ret = get_errno(lremovexattr(p, n));
10255 } else {
10256 ret = -TARGET_EFAULT;
10258 unlock_user(p, arg1, 0);
10259 unlock_user(n, arg2, 0);
10261 break;
10262 case TARGET_NR_fremovexattr:
10264 void *n;
10265 n = lock_user_string(arg2);
10266 if (n) {
10267 ret = get_errno(fremovexattr(arg1, n));
10268 } else {
10269 ret = -TARGET_EFAULT;
10271 unlock_user(n, arg2, 0);
10273 break;
10274 #endif
10275 #endif /* CONFIG_ATTR */
10276 #ifdef TARGET_NR_set_thread_area
10277 case TARGET_NR_set_thread_area:
10278 #if defined(TARGET_MIPS)
10279 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10280 ret = 0;
10281 break;
10282 #elif defined(TARGET_CRIS)
10283 if (arg1 & 0xff)
10284 ret = -TARGET_EINVAL;
10285 else {
10286 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10287 ret = 0;
10289 break;
10290 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10291 ret = do_set_thread_area(cpu_env, arg1);
10292 break;
10293 #elif defined(TARGET_M68K)
10295 TaskState *ts = cpu->opaque;
10296 ts->tp_value = arg1;
10297 ret = 0;
10298 break;
10300 #else
10301 goto unimplemented_nowarn;
10302 #endif
10303 #endif
10304 #ifdef TARGET_NR_get_thread_area
10305 case TARGET_NR_get_thread_area:
10306 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10307 ret = do_get_thread_area(cpu_env, arg1);
10308 break;
10309 #elif defined(TARGET_M68K)
10311 TaskState *ts = cpu->opaque;
10312 ret = ts->tp_value;
10313 break;
10315 #else
10316 goto unimplemented_nowarn;
10317 #endif
10318 #endif
10319 #ifdef TARGET_NR_getdomainname
10320 case TARGET_NR_getdomainname:
10321 goto unimplemented_nowarn;
10322 #endif
10324 #ifdef TARGET_NR_clock_gettime
10325 case TARGET_NR_clock_gettime:
10327 struct timespec ts;
10328 ret = get_errno(clock_gettime(arg1, &ts));
10329 if (!is_error(ret)) {
10330 host_to_target_timespec(arg2, &ts);
10332 break;
10334 #endif
10335 #ifdef TARGET_NR_clock_getres
10336 case TARGET_NR_clock_getres:
10338 struct timespec ts;
10339 ret = get_errno(clock_getres(arg1, &ts));
10340 if (!is_error(ret)) {
10341 host_to_target_timespec(arg2, &ts);
10343 break;
10345 #endif
10346 #ifdef TARGET_NR_clock_nanosleep
10347 case TARGET_NR_clock_nanosleep:
10349 struct timespec ts;
10350 target_to_host_timespec(&ts, arg3);
10351 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
10352 if (arg4)
10353 host_to_target_timespec(arg4, &ts);
10355 #if defined(TARGET_PPC)
10356 /* clock_nanosleep is odd in that it returns positive errno values.
10357 * On PPC, CR0 bit 3 should be set in such a situation. */
10358 if (ret) {
10359 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10361 #endif
10362 break;
10364 #endif
10366 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10367 case TARGET_NR_set_tid_address:
10368 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10369 break;
10370 #endif
10372 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
10373 case TARGET_NR_tkill:
10374 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
10375 break;
10376 #endif
10378 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
10379 case TARGET_NR_tgkill:
10380 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
10381 target_to_host_signal(arg3)));
10382 break;
10383 #endif
10385 #ifdef TARGET_NR_set_robust_list
10386 case TARGET_NR_set_robust_list:
10387 case TARGET_NR_get_robust_list:
10388 /* The ABI for supporting robust futexes has userspace pass
10389 * the kernel a pointer to a linked list which is updated by
10390 * userspace after the syscall; the list is walked by the kernel
10391 * when the thread exits. Since the linked list in QEMU guest
10392 * memory isn't a valid linked list for the host and we have
10393 * no way to reliably intercept the thread-death event, we can't
10394 * support these. Silently return ENOSYS so that guest userspace
10395 * falls back to a non-robust futex implementation (which should
10396 * be OK except in the corner case of the guest crashing while
10397 * holding a mutex that is shared with another process via
10398 * shared memory).
10400 goto unimplemented_nowarn;
10401 #endif
10403 #if defined(TARGET_NR_utimensat)
10404 case TARGET_NR_utimensat:
10406 struct timespec *tsp, ts[2];
10407 if (!arg3) {
10408 tsp = NULL;
10409 } else {
10410 target_to_host_timespec(ts, arg3);
10411 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10412 tsp = ts;
10414 if (!arg2)
10415 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10416 else {
10417 if (!(p = lock_user_string(arg2))) {
10418 ret = -TARGET_EFAULT;
10419 goto fail;
10421 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10422 unlock_user(p, arg2, 0);
10425 break;
10426 #endif
10427 case TARGET_NR_futex:
10428 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10429 break;
10430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10431 case TARGET_NR_inotify_init:
10432 ret = get_errno(sys_inotify_init());
10433 break;
10434 #endif
10435 #ifdef CONFIG_INOTIFY1
10436 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10437 case TARGET_NR_inotify_init1:
10438 ret = get_errno(sys_inotify_init1(arg1));
10439 break;
10440 #endif
10441 #endif
10442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10443 case TARGET_NR_inotify_add_watch:
10444 p = lock_user_string(arg2);
10445 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10446 unlock_user(p, arg2, 0);
10447 break;
10448 #endif
10449 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10450 case TARGET_NR_inotify_rm_watch:
10451 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10452 break;
10453 #endif
10455 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10456 case TARGET_NR_mq_open:
10458 struct mq_attr posix_mq_attr, *attrp;
10460 p = lock_user_string(arg1 - 1);
10461 if (arg4 != 0) {
10462 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10463 attrp = &posix_mq_attr;
10464 } else {
10465 attrp = 0;
10467 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10468 unlock_user (p, arg1, 0);
10470 break;
10472 case TARGET_NR_mq_unlink:
10473 p = lock_user_string(arg1 - 1);
10474 ret = get_errno(mq_unlink(p));
10475 unlock_user (p, arg1, 0);
10476 break;
10478 case TARGET_NR_mq_timedsend:
10480 struct timespec ts;
10482 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10483 if (arg5 != 0) {
10484 target_to_host_timespec(&ts, arg5);
10485 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
10486 host_to_target_timespec(arg5, &ts);
10488 else
10489 ret = get_errno(mq_send(arg1, p, arg3, arg4));
10490 unlock_user (p, arg2, arg3);
10492 break;
10494 case TARGET_NR_mq_timedreceive:
10496 struct timespec ts;
10497 unsigned int prio;
10499 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10500 if (arg5 != 0) {
10501 target_to_host_timespec(&ts, arg5);
10502 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
10503 host_to_target_timespec(arg5, &ts);
10505 else
10506 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
10507 unlock_user (p, arg2, arg3);
10508 if (arg4 != 0)
10509 put_user_u32(prio, arg4);
10511 break;
10513 /* Not implemented for now... */
10514 /* case TARGET_NR_mq_notify: */
10515 /* break; */
10517 case TARGET_NR_mq_getsetattr:
10519 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10520 ret = 0;
10521 if (arg3 != 0) {
10522 ret = mq_getattr(arg1, &posix_mq_attr_out);
10523 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10525 if (arg2 != 0) {
10526 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10527 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10531 break;
10532 #endif
10534 #ifdef CONFIG_SPLICE
10535 #ifdef TARGET_NR_tee
10536 case TARGET_NR_tee:
10538 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10540 break;
10541 #endif
10542 #ifdef TARGET_NR_splice
10543 case TARGET_NR_splice:
10545 loff_t loff_in, loff_out;
10546 loff_t *ploff_in = NULL, *ploff_out = NULL;
10547 if (arg2) {
10548 if (get_user_u64(loff_in, arg2)) {
10549 goto efault;
10551 ploff_in = &loff_in;
10553 if (arg4) {
10554 if (get_user_u64(loff_out, arg4)) {
10555 goto efault;
10557 ploff_out = &loff_out;
10559 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10560 if (arg2) {
10561 if (put_user_u64(loff_in, arg2)) {
10562 goto efault;
10565 if (arg4) {
10566 if (put_user_u64(loff_out, arg4)) {
10567 goto efault;
10571 break;
10572 #endif
10573 #ifdef TARGET_NR_vmsplice
10574 case TARGET_NR_vmsplice:
10576 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10577 if (vec != NULL) {
10578 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10579 unlock_iovec(vec, arg2, arg3, 0);
10580 } else {
10581 ret = -host_to_target_errno(errno);
10584 break;
10585 #endif
10586 #endif /* CONFIG_SPLICE */
10587 #ifdef CONFIG_EVENTFD
10588 #if defined(TARGET_NR_eventfd)
10589 case TARGET_NR_eventfd:
10590 ret = get_errno(eventfd(arg1, 0));
10591 fd_trans_unregister(ret);
10592 break;
10593 #endif
10594 #if defined(TARGET_NR_eventfd2)
10595 case TARGET_NR_eventfd2:
10597 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10598 if (arg2 & TARGET_O_NONBLOCK) {
10599 host_flags |= O_NONBLOCK;
10601 if (arg2 & TARGET_O_CLOEXEC) {
10602 host_flags |= O_CLOEXEC;
10604 ret = get_errno(eventfd(arg1, host_flags));
10605 fd_trans_unregister(ret);
10606 break;
10608 #endif
10609 #endif /* CONFIG_EVENTFD */
10610 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10611 case TARGET_NR_fallocate:
10612 #if TARGET_ABI_BITS == 32
10613 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10614 target_offset64(arg5, arg6)));
10615 #else
10616 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10617 #endif
10618 break;
10619 #endif
10620 #if defined(CONFIG_SYNC_FILE_RANGE)
10621 #if defined(TARGET_NR_sync_file_range)
10622 case TARGET_NR_sync_file_range:
10623 #if TARGET_ABI_BITS == 32
10624 #if defined(TARGET_MIPS)
10625 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10626 target_offset64(arg5, arg6), arg7));
10627 #else
10628 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10629 target_offset64(arg4, arg5), arg6));
10630 #endif /* !TARGET_MIPS */
10631 #else
10632 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10633 #endif
10634 break;
10635 #endif
10636 #if defined(TARGET_NR_sync_file_range2)
10637 case TARGET_NR_sync_file_range2:
10638 /* This is like sync_file_range but the arguments are reordered */
10639 #if TARGET_ABI_BITS == 32
10640 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10641 target_offset64(arg5, arg6), arg2));
10642 #else
10643 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10644 #endif
10645 break;
10646 #endif
10647 #endif
10648 #if defined(TARGET_NR_signalfd4)
10649 case TARGET_NR_signalfd4:
10650 ret = do_signalfd4(arg1, arg2, arg4);
10651 break;
10652 #endif
10653 #if defined(TARGET_NR_signalfd)
10654 case TARGET_NR_signalfd:
10655 ret = do_signalfd4(arg1, arg2, 0);
10656 break;
10657 #endif
10658 #if defined(CONFIG_EPOLL)
10659 #if defined(TARGET_NR_epoll_create)
10660 case TARGET_NR_epoll_create:
10661 ret = get_errno(epoll_create(arg1));
10662 break;
10663 #endif
10664 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10665 case TARGET_NR_epoll_create1:
10666 ret = get_errno(epoll_create1(arg1));
10667 break;
10668 #endif
10669 #if defined(TARGET_NR_epoll_ctl)
10670 case TARGET_NR_epoll_ctl:
10672 struct epoll_event ep;
10673 struct epoll_event *epp = 0;
10674 if (arg4) {
10675 struct target_epoll_event *target_ep;
10676 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10677 goto efault;
10679 ep.events = tswap32(target_ep->events);
10680 /* The epoll_data_t union is just opaque data to the kernel,
10681 * so we transfer all 64 bits across and need not worry what
10682 * actual data type it is.
10684 ep.data.u64 = tswap64(target_ep->data.u64);
10685 unlock_user_struct(target_ep, arg4, 0);
10686 epp = &ep;
10688 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10689 break;
10691 #endif
10693 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10694 #define IMPLEMENT_EPOLL_PWAIT
10695 #endif
10696 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10697 #if defined(TARGET_NR_epoll_wait)
10698 case TARGET_NR_epoll_wait:
10699 #endif
10700 #if defined(IMPLEMENT_EPOLL_PWAIT)
10701 case TARGET_NR_epoll_pwait:
10702 #endif
10704 struct target_epoll_event *target_ep;
10705 struct epoll_event *ep;
10706 int epfd = arg1;
10707 int maxevents = arg3;
10708 int timeout = arg4;
10710 target_ep = lock_user(VERIFY_WRITE, arg2,
10711 maxevents * sizeof(struct target_epoll_event), 1);
10712 if (!target_ep) {
10713 goto efault;
10716 ep = alloca(maxevents * sizeof(struct epoll_event));
10718 switch (num) {
10719 #if defined(IMPLEMENT_EPOLL_PWAIT)
10720 case TARGET_NR_epoll_pwait:
10722 target_sigset_t *target_set;
10723 sigset_t _set, *set = &_set;
10725 if (arg5) {
10726 target_set = lock_user(VERIFY_READ, arg5,
10727 sizeof(target_sigset_t), 1);
10728 if (!target_set) {
10729 unlock_user(target_ep, arg2, 0);
10730 goto efault;
10732 target_to_host_sigset(set, target_set);
10733 unlock_user(target_set, arg5, 0);
10734 } else {
10735 set = NULL;
10738 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10739 break;
10741 #endif
10742 #if defined(TARGET_NR_epoll_wait)
10743 case TARGET_NR_epoll_wait:
10744 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10745 break;
10746 #endif
10747 default:
10748 ret = -TARGET_ENOSYS;
10750 if (!is_error(ret)) {
10751 int i;
10752 for (i = 0; i < ret; i++) {
10753 target_ep[i].events = tswap32(ep[i].events);
10754 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10757 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10758 break;
10760 #endif
10761 #endif
10762 #ifdef TARGET_NR_prlimit64
10763 case TARGET_NR_prlimit64:
10765 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10766 struct target_rlimit64 *target_rnew, *target_rold;
10767 struct host_rlimit64 rnew, rold, *rnewp = 0;
10768 int resource = target_to_host_resource(arg2);
10769 if (arg3) {
10770 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10771 goto efault;
10773 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10774 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10775 unlock_user_struct(target_rnew, arg3, 0);
10776 rnewp = &rnew;
10779 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10780 if (!is_error(ret) && arg4) {
10781 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10782 goto efault;
10784 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10785 target_rold->rlim_max = tswap64(rold.rlim_max);
10786 unlock_user_struct(target_rold, arg4, 1);
10788 break;
10790 #endif
10791 #ifdef TARGET_NR_gethostname
10792 case TARGET_NR_gethostname:
10794 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10795 if (name) {
10796 ret = get_errno(gethostname(name, arg2));
10797 unlock_user(name, arg1, arg2);
10798 } else {
10799 ret = -TARGET_EFAULT;
10801 break;
10803 #endif
10804 #ifdef TARGET_NR_atomic_cmpxchg_32
10805 case TARGET_NR_atomic_cmpxchg_32:
10807 /* should use start_exclusive from main.c */
10808 abi_ulong mem_value;
10809 if (get_user_u32(mem_value, arg6)) {
10810 target_siginfo_t info;
10811 info.si_signo = SIGSEGV;
10812 info.si_errno = 0;
10813 info.si_code = TARGET_SEGV_MAPERR;
10814 info._sifields._sigfault._addr = arg6;
10815 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10816 ret = 0xdeadbeef;
10819 if (mem_value == arg2)
10820 put_user_u32(arg1, arg6);
10821 ret = mem_value;
10822 break;
10824 #endif
10825 #ifdef TARGET_NR_atomic_barrier
10826 case TARGET_NR_atomic_barrier:
10828 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10829 ret = 0;
10830 break;
10832 #endif
10834 #ifdef TARGET_NR_timer_create
10835 case TARGET_NR_timer_create:
10837 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10839 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10841 int clkid = arg1;
10842 int timer_index = next_free_host_timer();
10844 if (timer_index < 0) {
10845 ret = -TARGET_EAGAIN;
10846 } else {
10847 timer_t *phtimer = g_posix_timers + timer_index;
10849 if (arg2) {
10850 phost_sevp = &host_sevp;
10851 ret = target_to_host_sigevent(phost_sevp, arg2);
10852 if (ret != 0) {
10853 break;
10857 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10858 if (ret) {
10859 phtimer = NULL;
10860 } else {
10861 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10862 goto efault;
10866 break;
10868 #endif
10870 #ifdef TARGET_NR_timer_settime
10871 case TARGET_NR_timer_settime:
10873 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10874 * struct itimerspec * old_value */
10875 target_timer_t timerid = get_timer_id(arg1);
10877 if (timerid < 0) {
10878 ret = timerid;
10879 } else if (arg3 == 0) {
10880 ret = -TARGET_EINVAL;
10881 } else {
10882 timer_t htimer = g_posix_timers[timerid];
10883 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10885 target_to_host_itimerspec(&hspec_new, arg3);
10886 ret = get_errno(
10887 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10888 host_to_target_itimerspec(arg2, &hspec_old);
10890 break;
10892 #endif
10894 #ifdef TARGET_NR_timer_gettime
10895 case TARGET_NR_timer_gettime:
10897 /* args: timer_t timerid, struct itimerspec *curr_value */
10898 target_timer_t timerid = get_timer_id(arg1);
10900 if (timerid < 0) {
10901 ret = timerid;
10902 } else if (!arg2) {
10903 ret = -TARGET_EFAULT;
10904 } else {
10905 timer_t htimer = g_posix_timers[timerid];
10906 struct itimerspec hspec;
10907 ret = get_errno(timer_gettime(htimer, &hspec));
10909 if (host_to_target_itimerspec(arg2, &hspec)) {
10910 ret = -TARGET_EFAULT;
10913 break;
10915 #endif
10917 #ifdef TARGET_NR_timer_getoverrun
10918 case TARGET_NR_timer_getoverrun:
10920 /* args: timer_t timerid */
10921 target_timer_t timerid = get_timer_id(arg1);
10923 if (timerid < 0) {
10924 ret = timerid;
10925 } else {
10926 timer_t htimer = g_posix_timers[timerid];
10927 ret = get_errno(timer_getoverrun(htimer));
10929 fd_trans_unregister(ret);
10930 break;
10932 #endif
10934 #ifdef TARGET_NR_timer_delete
10935 case TARGET_NR_timer_delete:
10937 /* args: timer_t timerid */
10938 target_timer_t timerid = get_timer_id(arg1);
10940 if (timerid < 0) {
10941 ret = timerid;
10942 } else {
10943 timer_t htimer = g_posix_timers[timerid];
10944 ret = get_errno(timer_delete(htimer));
10945 g_posix_timers[timerid] = 0;
10947 break;
10949 #endif
10951 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10952 case TARGET_NR_timerfd_create:
10953 ret = get_errno(timerfd_create(arg1,
10954 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10955 break;
10956 #endif
10958 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10959 case TARGET_NR_timerfd_gettime:
10961 struct itimerspec its_curr;
10963 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10965 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10966 goto efault;
10969 break;
10970 #endif
10972 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10973 case TARGET_NR_timerfd_settime:
10975 struct itimerspec its_new, its_old, *p_new;
10977 if (arg3) {
10978 if (target_to_host_itimerspec(&its_new, arg3)) {
10979 goto efault;
10981 p_new = &its_new;
10982 } else {
10983 p_new = NULL;
10986 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10988 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10989 goto efault;
10992 break;
10993 #endif
10995 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10996 case TARGET_NR_ioprio_get:
10997 ret = get_errno(ioprio_get(arg1, arg2));
10998 break;
10999 #endif
11001 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11002 case TARGET_NR_ioprio_set:
11003 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11004 break;
11005 #endif
11007 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11008 case TARGET_NR_setns:
11009 ret = get_errno(setns(arg1, arg2));
11010 break;
11011 #endif
11012 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11013 case TARGET_NR_unshare:
11014 ret = get_errno(unshare(arg1));
11015 break;
11016 #endif
11018 default:
11019 unimplemented:
11020 gemu_log("qemu: Unsupported syscall: %d\n", num);
11021 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11022 unimplemented_nowarn:
11023 #endif
11024 ret = -TARGET_ENOSYS;
11025 break;
11027 fail:
11028 #ifdef DEBUG
11029 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11030 #endif
11031 if(do_strace)
11032 print_syscall_ret(num, ret);
11033 return ret;
11034 efault:
11035 ret = -TARGET_EFAULT;
11036 goto fail;