syscall.c: Fix build with older linux-headers
[qemu/ar7.git] / linux-user / syscall.c
blob7edfe4a228fa4e2b9415167d7ee07d1dc606671a
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 //#define DEBUG
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 defined(__s390x__)
197 #define __NR__llseek __NR_lseek
198 #endif
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
216 #endif
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group,int,error_code)
229 #endif
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address,int *,tidptr)
232 #endif
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235 const struct timespec *,timeout,int *,uaddr2,int,val3)
236 #endif
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
244 void *, arg);
245 _syscall2(int, capget, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 _syscall2(int, capset, struct __user_cap_header_struct *, header,
248 struct __user_cap_data_struct *, data);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get, int, which, int, who)
251 #endif
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
254 #endif
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
257 #endif
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
275 #endif
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
278 #endif
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 #endif
282 #if defined(O_PATH)
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
284 #endif
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 #endif
289 { 0, 0, 0, 0 }
292 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
293 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
294 typedef struct TargetFdTrans {
295 TargetFdDataFunc host_to_target_data;
296 TargetFdDataFunc target_to_host_data;
297 TargetFdAddrFunc target_to_host_addr;
298 } TargetFdTrans;
300 static TargetFdTrans **target_fd_trans;
302 static unsigned int target_fd_max;
304 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
306 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
307 return target_fd_trans[fd]->target_to_host_data;
309 return NULL;
312 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
314 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
315 return target_fd_trans[fd]->host_to_target_data;
317 return NULL;
320 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
322 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
323 return target_fd_trans[fd]->target_to_host_addr;
325 return NULL;
328 static void fd_trans_register(int fd, TargetFdTrans *trans)
330 unsigned int oldmax;
332 if (fd >= target_fd_max) {
333 oldmax = target_fd_max;
334 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
335 target_fd_trans = g_renew(TargetFdTrans *,
336 target_fd_trans, target_fd_max);
337 memset((void *)(target_fd_trans + oldmax), 0,
338 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
340 target_fd_trans[fd] = trans;
343 static void fd_trans_unregister(int fd)
345 if (fd >= 0 && fd < target_fd_max) {
346 target_fd_trans[fd] = NULL;
350 static void fd_trans_dup(int oldfd, int newfd)
352 fd_trans_unregister(newfd);
353 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
354 fd_trans_register(newfd, target_fd_trans[oldfd]);
358 static int sys_getcwd1(char *buf, size_t size)
360 if (getcwd(buf, size) == NULL) {
361 /* getcwd() sets errno */
362 return (-1);
364 return strlen(buf)+1;
367 #ifdef TARGET_NR_utimensat
368 #ifdef CONFIG_UTIMENSAT
369 static int sys_utimensat(int dirfd, const char *pathname,
370 const struct timespec times[2], int flags)
372 if (pathname == NULL)
373 return futimens(dirfd, times);
374 else
375 return utimensat(dirfd, pathname, times, flags);
377 #elif defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
380 const struct timespec *,tsp,int,flags)
381 #else
382 static int sys_utimensat(int dirfd, const char *pathname,
383 const struct timespec times[2], int flags)
385 errno = ENOSYS;
386 return -1;
388 #endif
389 #endif /* TARGET_NR_utimensat */
391 #ifdef CONFIG_INOTIFY
392 #include <sys/inotify.h>
394 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
395 static int sys_inotify_init(void)
397 return (inotify_init());
399 #endif
400 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
401 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
403 return (inotify_add_watch(fd, pathname, mask));
405 #endif
406 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
407 static int sys_inotify_rm_watch(int fd, int32_t wd)
409 return (inotify_rm_watch(fd, wd));
411 #endif
412 #ifdef CONFIG_INOTIFY1
413 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
414 static int sys_inotify_init1(int flags)
416 return (inotify_init1(flags));
418 #endif
419 #endif
420 #else
421 /* Userspace can usually survive runtime without inotify */
422 #undef TARGET_NR_inotify_init
423 #undef TARGET_NR_inotify_init1
424 #undef TARGET_NR_inotify_add_watch
425 #undef TARGET_NR_inotify_rm_watch
426 #endif /* CONFIG_INOTIFY */
428 #if defined(TARGET_NR_prlimit64)
429 #ifndef __NR_prlimit64
430 # define __NR_prlimit64 -1
431 #endif
432 #define __NR_sys_prlimit64 __NR_prlimit64
433 /* The glibc rlimit structure may not be that used by the underlying syscall */
434 struct host_rlimit64 {
435 uint64_t rlim_cur;
436 uint64_t rlim_max;
438 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
439 const struct host_rlimit64 *, new_limit,
440 struct host_rlimit64 *, old_limit)
441 #endif
444 #if defined(TARGET_NR_timer_create)
445 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
446 static timer_t g_posix_timers[32] = { 0, } ;
448 static inline int next_free_host_timer(void)
450 int k ;
451 /* FIXME: Does finding the next free slot require a lock? */
452 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
453 if (g_posix_timers[k] == 0) {
454 g_posix_timers[k] = (timer_t) 1;
455 return k;
458 return -1;
460 #endif
462 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
463 #ifdef TARGET_ARM
464 static inline int regpairs_aligned(void *cpu_env) {
465 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
467 #elif defined(TARGET_MIPS)
468 static inline int regpairs_aligned(void *cpu_env) { return 1; }
469 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
470 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
471 * of registers which translates to the same as ARM/MIPS, because we start with
472 * r3 as arg1 */
473 static inline int regpairs_aligned(void *cpu_env) { return 1; }
474 #else
475 static inline int regpairs_aligned(void *cpu_env) { return 0; }
476 #endif
478 #define ERRNO_TABLE_SIZE 1200
480 /* target_to_host_errno_table[] is initialized from
481 * host_to_target_errno_table[] in syscall_init(). */
482 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
486 * This list is the union of errno values overridden in asm-<arch>/errno.h
487 * minus the errnos that are not actually generic to all archs.
489 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
490 [EAGAIN] = TARGET_EAGAIN,
491 [EIDRM] = TARGET_EIDRM,
492 [ECHRNG] = TARGET_ECHRNG,
493 [EL2NSYNC] = TARGET_EL2NSYNC,
494 [EL3HLT] = TARGET_EL3HLT,
495 [EL3RST] = TARGET_EL3RST,
496 [ELNRNG] = TARGET_ELNRNG,
497 [EUNATCH] = TARGET_EUNATCH,
498 [ENOCSI] = TARGET_ENOCSI,
499 [EL2HLT] = TARGET_EL2HLT,
500 [EDEADLK] = TARGET_EDEADLK,
501 [ENOLCK] = TARGET_ENOLCK,
502 [EBADE] = TARGET_EBADE,
503 [EBADR] = TARGET_EBADR,
504 [EXFULL] = TARGET_EXFULL,
505 [ENOANO] = TARGET_ENOANO,
506 [EBADRQC] = TARGET_EBADRQC,
507 [EBADSLT] = TARGET_EBADSLT,
508 [EBFONT] = TARGET_EBFONT,
509 [ENOSTR] = TARGET_ENOSTR,
510 [ENODATA] = TARGET_ENODATA,
511 [ETIME] = TARGET_ETIME,
512 [ENOSR] = TARGET_ENOSR,
513 [ENONET] = TARGET_ENONET,
514 [ENOPKG] = TARGET_ENOPKG,
515 [EREMOTE] = TARGET_EREMOTE,
516 [ENOLINK] = TARGET_ENOLINK,
517 [EADV] = TARGET_EADV,
518 [ESRMNT] = TARGET_ESRMNT,
519 [ECOMM] = TARGET_ECOMM,
520 [EPROTO] = TARGET_EPROTO,
521 [EDOTDOT] = TARGET_EDOTDOT,
522 [EMULTIHOP] = TARGET_EMULTIHOP,
523 [EBADMSG] = TARGET_EBADMSG,
524 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
525 [EOVERFLOW] = TARGET_EOVERFLOW,
526 [ENOTUNIQ] = TARGET_ENOTUNIQ,
527 [EBADFD] = TARGET_EBADFD,
528 [EREMCHG] = TARGET_EREMCHG,
529 [ELIBACC] = TARGET_ELIBACC,
530 [ELIBBAD] = TARGET_ELIBBAD,
531 [ELIBSCN] = TARGET_ELIBSCN,
532 [ELIBMAX] = TARGET_ELIBMAX,
533 [ELIBEXEC] = TARGET_ELIBEXEC,
534 [EILSEQ] = TARGET_EILSEQ,
535 [ENOSYS] = TARGET_ENOSYS,
536 [ELOOP] = TARGET_ELOOP,
537 [ERESTART] = TARGET_ERESTART,
538 [ESTRPIPE] = TARGET_ESTRPIPE,
539 [ENOTEMPTY] = TARGET_ENOTEMPTY,
540 [EUSERS] = TARGET_EUSERS,
541 [ENOTSOCK] = TARGET_ENOTSOCK,
542 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
543 [EMSGSIZE] = TARGET_EMSGSIZE,
544 [EPROTOTYPE] = TARGET_EPROTOTYPE,
545 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
546 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
547 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
548 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
549 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
550 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
551 [EADDRINUSE] = TARGET_EADDRINUSE,
552 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
553 [ENETDOWN] = TARGET_ENETDOWN,
554 [ENETUNREACH] = TARGET_ENETUNREACH,
555 [ENETRESET] = TARGET_ENETRESET,
556 [ECONNABORTED] = TARGET_ECONNABORTED,
557 [ECONNRESET] = TARGET_ECONNRESET,
558 [ENOBUFS] = TARGET_ENOBUFS,
559 [EISCONN] = TARGET_EISCONN,
560 [ENOTCONN] = TARGET_ENOTCONN,
561 [EUCLEAN] = TARGET_EUCLEAN,
562 [ENOTNAM] = TARGET_ENOTNAM,
563 [ENAVAIL] = TARGET_ENAVAIL,
564 [EISNAM] = TARGET_EISNAM,
565 [EREMOTEIO] = TARGET_EREMOTEIO,
566 [ESHUTDOWN] = TARGET_ESHUTDOWN,
567 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
568 [ETIMEDOUT] = TARGET_ETIMEDOUT,
569 [ECONNREFUSED] = TARGET_ECONNREFUSED,
570 [EHOSTDOWN] = TARGET_EHOSTDOWN,
571 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
572 [EALREADY] = TARGET_EALREADY,
573 [EINPROGRESS] = TARGET_EINPROGRESS,
574 [ESTALE] = TARGET_ESTALE,
575 [ECANCELED] = TARGET_ECANCELED,
576 [ENOMEDIUM] = TARGET_ENOMEDIUM,
577 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
578 #ifdef ENOKEY
579 [ENOKEY] = TARGET_ENOKEY,
580 #endif
581 #ifdef EKEYEXPIRED
582 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
583 #endif
584 #ifdef EKEYREVOKED
585 [EKEYREVOKED] = TARGET_EKEYREVOKED,
586 #endif
587 #ifdef EKEYREJECTED
588 [EKEYREJECTED] = TARGET_EKEYREJECTED,
589 #endif
590 #ifdef EOWNERDEAD
591 [EOWNERDEAD] = TARGET_EOWNERDEAD,
592 #endif
593 #ifdef ENOTRECOVERABLE
594 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
595 #endif
598 static inline int host_to_target_errno(int err)
600 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
601 host_to_target_errno_table[err]) {
602 return host_to_target_errno_table[err];
604 return err;
607 static inline int target_to_host_errno(int err)
609 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
610 target_to_host_errno_table[err]) {
611 return target_to_host_errno_table[err];
613 return err;
616 static inline abi_long get_errno(abi_long ret)
618 if (ret == -1)
619 return -host_to_target_errno(errno);
620 else
621 return ret;
624 static inline int is_error(abi_long ret)
626 return (abi_ulong)ret >= (abi_ulong)(-4096);
629 const char *target_strerror(int err)
631 if (err == TARGET_ERESTARTSYS) {
632 return "To be restarted";
634 if (err == TARGET_QEMU_ESIGRETURN) {
635 return "Successful exit from sigreturn";
638 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
639 return NULL;
641 return strerror(target_to_host_errno(err));
644 #define safe_syscall0(type, name) \
645 static type safe_##name(void) \
647 return safe_syscall(__NR_##name); \
650 #define safe_syscall1(type, name, type1, arg1) \
651 static type safe_##name(type1 arg1) \
653 return safe_syscall(__NR_##name, arg1); \
656 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
657 static type safe_##name(type1 arg1, type2 arg2) \
659 return safe_syscall(__NR_##name, arg1, arg2); \
662 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
665 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
668 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
669 type4, arg4) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
675 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4, type5, arg5) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678 type5 arg5) \
680 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
683 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
684 type4, arg4, type5, arg5, type6, arg6) \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
686 type5 arg5, type6 arg6) \
688 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
691 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
692 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
693 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
694 int, flags, mode_t, mode)
695 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
696 struct rusage *, rusage)
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698 int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
701 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
702 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
703 struct timespec *, tsp, const sigset_t *, sigmask,
704 size_t, sigsetsize)
705 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
706 int, maxevents, int, timeout, const sigset_t *, sigmask,
707 size_t, sigsetsize)
708 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
709 const struct timespec *,timeout,int *,uaddr2,int,val3)
710 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
711 safe_syscall2(int, kill, pid_t, pid, int, sig)
712 safe_syscall2(int, tkill, int, tid, int, sig)
713 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
714 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
715 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
717 socklen_t, addrlen)
718 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
719 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
720 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
721 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
722 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
723 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
724 safe_syscall2(int, flock, int, fd, int, operation)
725 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
726 const struct timespec *, uts, size_t, sigsetsize)
727 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
728 int, flags)
729 safe_syscall2(int, nanosleep, const struct timespec *, req,
730 struct timespec *, rem)
731 #ifdef TARGET_NR_clock_nanosleep
732 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
733 const struct timespec *, req, struct timespec *, rem)
734 #endif
735 #ifdef __NR_msgsnd
736 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
737 int, flags)
738 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
739 long, msgtype, int, flags)
740 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
741 unsigned, nsops, const struct timespec *, timeout)
742 #else
743 /* This host kernel architecture uses a single ipc syscall; fake up
744 * wrappers for the sub-operations to hide this implementation detail.
745 * Annoyingly we can't include linux/ipc.h to get the constant definitions
746 * for the call parameter because some structs in there conflict with the
747 * sys/ipc.h ones. So we just define them here, and rely on them being
748 * the same for all host architectures.
750 #define Q_SEMTIMEDOP 4
751 #define Q_MSGSND 11
752 #define Q_MSGRCV 12
753 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
755 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
756 void *, ptr, long, fifth)
757 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
759 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
761 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
763 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
765 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
766 const struct timespec *timeout)
768 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
769 (long)timeout);
771 #endif
772 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
773 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
774 size_t, len, unsigned, prio, const struct timespec *, timeout)
775 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
776 size_t, len, unsigned *, prio, const struct timespec *, timeout)
777 #endif
778 /* We do ioctl like this rather than via safe_syscall3 to preserve the
779 * "third argument might be integer or pointer or not present" behaviour of
780 * the libc function.
782 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
783 /* Similarly for fcntl. Note that callers must always:
784 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
785 * use the flock64 struct rather than unsuffixed flock
786 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
788 #ifdef __NR_fcntl64
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
790 #else
791 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
792 #endif
794 static inline int host_to_target_sock_type(int host_type)
796 int target_type;
798 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
799 case SOCK_DGRAM:
800 target_type = TARGET_SOCK_DGRAM;
801 break;
802 case SOCK_STREAM:
803 target_type = TARGET_SOCK_STREAM;
804 break;
805 default:
806 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
807 break;
810 #if defined(SOCK_CLOEXEC)
811 if (host_type & SOCK_CLOEXEC) {
812 target_type |= TARGET_SOCK_CLOEXEC;
814 #endif
816 #if defined(SOCK_NONBLOCK)
817 if (host_type & SOCK_NONBLOCK) {
818 target_type |= TARGET_SOCK_NONBLOCK;
820 #endif
822 return target_type;
825 static abi_ulong target_brk;
826 static abi_ulong target_original_brk;
827 static abi_ulong brk_page;
829 void target_set_brk(abi_ulong new_brk)
831 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
832 brk_page = HOST_PAGE_ALIGN(target_brk);
835 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
836 #define DEBUGF_BRK(message, args...)
838 /* do_brk() must return target values and target errnos. */
839 abi_long do_brk(abi_ulong new_brk)
841 abi_long mapped_addr;
842 abi_ulong new_alloc_size;
844 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
846 if (!new_brk) {
847 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
848 return target_brk;
850 if (new_brk < target_original_brk) {
851 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
852 target_brk);
853 return target_brk;
856 /* If the new brk is less than the highest page reserved to the
857 * target heap allocation, set it and we're almost done... */
858 if (new_brk <= brk_page) {
859 /* Heap contents are initialized to zero, as for anonymous
860 * mapped pages. */
861 if (new_brk > target_brk) {
862 memset(g2h(target_brk), 0, new_brk - target_brk);
864 target_brk = new_brk;
865 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
866 return target_brk;
869 /* We need to allocate more memory after the brk... Note that
870 * we don't use MAP_FIXED because that will map over the top of
871 * any existing mapping (like the one with the host libc or qemu
872 * itself); instead we treat "mapped but at wrong address" as
873 * a failure and unmap again.
875 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
876 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
877 PROT_READ|PROT_WRITE,
878 MAP_ANON|MAP_PRIVATE, 0, 0));
880 if (mapped_addr == brk_page) {
881 /* Heap contents are initialized to zero, as for anonymous
882 * mapped pages. Technically the new pages are already
883 * initialized to zero since they *are* anonymous mapped
884 * pages, however we have to take care with the contents that
885 * come from the remaining part of the previous page: it may
886 * contains garbage data due to a previous heap usage (grown
887 * then shrunken). */
888 memset(g2h(target_brk), 0, brk_page - target_brk);
890 target_brk = new_brk;
891 brk_page = HOST_PAGE_ALIGN(target_brk);
892 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
893 target_brk);
894 return target_brk;
895 } else if (mapped_addr != -1) {
896 /* Mapped but at wrong address, meaning there wasn't actually
897 * enough space for this brk.
899 target_munmap(mapped_addr, new_alloc_size);
900 mapped_addr = -1;
901 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
903 else {
904 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
907 #if defined(TARGET_ALPHA)
908 /* We (partially) emulate OSF/1 on Alpha, which requires we
909 return a proper errno, not an unchanged brk value. */
910 return -TARGET_ENOMEM;
911 #endif
912 /* For everything else, return the previous break. */
913 return target_brk;
916 static inline abi_long copy_from_user_fdset(fd_set *fds,
917 abi_ulong target_fds_addr,
918 int n)
920 int i, nw, j, k;
921 abi_ulong b, *target_fds;
923 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
924 if (!(target_fds = lock_user(VERIFY_READ,
925 target_fds_addr,
926 sizeof(abi_ulong) * nw,
927 1)))
928 return -TARGET_EFAULT;
930 FD_ZERO(fds);
931 k = 0;
932 for (i = 0; i < nw; i++) {
933 /* grab the abi_ulong */
934 __get_user(b, &target_fds[i]);
935 for (j = 0; j < TARGET_ABI_BITS; j++) {
936 /* check the bit inside the abi_ulong */
937 if ((b >> j) & 1)
938 FD_SET(k, fds);
939 k++;
943 unlock_user(target_fds, target_fds_addr, 0);
945 return 0;
948 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
949 abi_ulong target_fds_addr,
950 int n)
952 if (target_fds_addr) {
953 if (copy_from_user_fdset(fds, target_fds_addr, n))
954 return -TARGET_EFAULT;
955 *fds_ptr = fds;
956 } else {
957 *fds_ptr = NULL;
959 return 0;
962 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
963 const fd_set *fds,
964 int n)
966 int i, nw, j, k;
967 abi_long v;
968 abi_ulong *target_fds;
970 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
971 if (!(target_fds = lock_user(VERIFY_WRITE,
972 target_fds_addr,
973 sizeof(abi_ulong) * nw,
974 0)))
975 return -TARGET_EFAULT;
977 k = 0;
978 for (i = 0; i < nw; i++) {
979 v = 0;
980 for (j = 0; j < TARGET_ABI_BITS; j++) {
981 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
982 k++;
984 __put_user(v, &target_fds[i]);
987 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
989 return 0;
992 #if defined(__alpha__)
993 #define HOST_HZ 1024
994 #else
995 #define HOST_HZ 100
996 #endif
998 static inline abi_long host_to_target_clock_t(long ticks)
1000 #if HOST_HZ == TARGET_HZ
1001 return ticks;
1002 #else
1003 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1004 #endif
1007 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1008 const struct rusage *rusage)
1010 struct target_rusage *target_rusage;
1012 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1013 return -TARGET_EFAULT;
1014 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1015 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1016 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1017 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1018 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1019 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1020 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1021 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1022 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1023 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1024 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1025 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1026 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1027 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1028 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1029 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1030 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1031 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1032 unlock_user_struct(target_rusage, target_addr, 1);
1034 return 0;
1037 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1039 abi_ulong target_rlim_swap;
1040 rlim_t result;
1042 target_rlim_swap = tswapal(target_rlim);
1043 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1044 return RLIM_INFINITY;
1046 result = target_rlim_swap;
1047 if (target_rlim_swap != (rlim_t)result)
1048 return RLIM_INFINITY;
1050 return result;
1053 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1055 abi_ulong target_rlim_swap;
1056 abi_ulong result;
1058 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1059 target_rlim_swap = TARGET_RLIM_INFINITY;
1060 else
1061 target_rlim_swap = rlim;
1062 result = tswapal(target_rlim_swap);
1064 return result;
1067 static inline int target_to_host_resource(int code)
1069 switch (code) {
1070 case TARGET_RLIMIT_AS:
1071 return RLIMIT_AS;
1072 case TARGET_RLIMIT_CORE:
1073 return RLIMIT_CORE;
1074 case TARGET_RLIMIT_CPU:
1075 return RLIMIT_CPU;
1076 case TARGET_RLIMIT_DATA:
1077 return RLIMIT_DATA;
1078 case TARGET_RLIMIT_FSIZE:
1079 return RLIMIT_FSIZE;
1080 case TARGET_RLIMIT_LOCKS:
1081 return RLIMIT_LOCKS;
1082 case TARGET_RLIMIT_MEMLOCK:
1083 return RLIMIT_MEMLOCK;
1084 case TARGET_RLIMIT_MSGQUEUE:
1085 return RLIMIT_MSGQUEUE;
1086 case TARGET_RLIMIT_NICE:
1087 return RLIMIT_NICE;
1088 case TARGET_RLIMIT_NOFILE:
1089 return RLIMIT_NOFILE;
1090 case TARGET_RLIMIT_NPROC:
1091 return RLIMIT_NPROC;
1092 case TARGET_RLIMIT_RSS:
1093 return RLIMIT_RSS;
1094 case TARGET_RLIMIT_RTPRIO:
1095 return RLIMIT_RTPRIO;
1096 case TARGET_RLIMIT_SIGPENDING:
1097 return RLIMIT_SIGPENDING;
1098 case TARGET_RLIMIT_STACK:
1099 return RLIMIT_STACK;
1100 default:
1101 return code;
1105 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1106 abi_ulong target_tv_addr)
1108 struct target_timeval *target_tv;
1110 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1111 return -TARGET_EFAULT;
1113 __get_user(tv->tv_sec, &target_tv->tv_sec);
1114 __get_user(tv->tv_usec, &target_tv->tv_usec);
1116 unlock_user_struct(target_tv, target_tv_addr, 0);
1118 return 0;
1121 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1122 const struct timeval *tv)
1124 struct target_timeval *target_tv;
1126 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1127 return -TARGET_EFAULT;
1129 __put_user(tv->tv_sec, &target_tv->tv_sec);
1130 __put_user(tv->tv_usec, &target_tv->tv_usec);
1132 unlock_user_struct(target_tv, target_tv_addr, 1);
1134 return 0;
1137 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1138 abi_ulong target_tz_addr)
1140 struct target_timezone *target_tz;
1142 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1143 return -TARGET_EFAULT;
1146 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1147 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1149 unlock_user_struct(target_tz, target_tz_addr, 0);
1151 return 0;
1154 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1155 #include <mqueue.h>
1157 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1158 abi_ulong target_mq_attr_addr)
1160 struct target_mq_attr *target_mq_attr;
1162 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1163 target_mq_attr_addr, 1))
1164 return -TARGET_EFAULT;
1166 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1167 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1168 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1169 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1171 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1173 return 0;
1176 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1177 const struct mq_attr *attr)
1179 struct target_mq_attr *target_mq_attr;
1181 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1182 target_mq_attr_addr, 0))
1183 return -TARGET_EFAULT;
1185 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1186 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1187 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1188 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1190 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1192 return 0;
1194 #endif
1196 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1197 /* do_select() must return target values and target errnos. */
1198 static abi_long do_select(int n,
1199 abi_ulong rfd_addr, abi_ulong wfd_addr,
1200 abi_ulong efd_addr, abi_ulong target_tv_addr)
1202 fd_set rfds, wfds, efds;
1203 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1204 struct timeval tv;
1205 struct timespec ts, *ts_ptr;
1206 abi_long ret;
1208 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1209 if (ret) {
1210 return ret;
1212 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1213 if (ret) {
1214 return ret;
1216 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1217 if (ret) {
1218 return ret;
1221 if (target_tv_addr) {
1222 if (copy_from_user_timeval(&tv, target_tv_addr))
1223 return -TARGET_EFAULT;
1224 ts.tv_sec = tv.tv_sec;
1225 ts.tv_nsec = tv.tv_usec * 1000;
1226 ts_ptr = &ts;
1227 } else {
1228 ts_ptr = NULL;
1231 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1232 ts_ptr, NULL));
1234 if (!is_error(ret)) {
1235 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1236 return -TARGET_EFAULT;
1237 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1238 return -TARGET_EFAULT;
1239 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1240 return -TARGET_EFAULT;
1242 if (target_tv_addr) {
1243 tv.tv_sec = ts.tv_sec;
1244 tv.tv_usec = ts.tv_nsec / 1000;
1245 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1246 return -TARGET_EFAULT;
1251 return ret;
1253 #endif
1255 static abi_long do_pipe2(int host_pipe[], int flags)
1257 #ifdef CONFIG_PIPE2
1258 return pipe2(host_pipe, flags);
1259 #else
1260 return -ENOSYS;
1261 #endif
1264 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1265 int flags, int is_pipe2)
1267 int host_pipe[2];
1268 abi_long ret;
1269 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1271 if (is_error(ret))
1272 return get_errno(ret);
1274 /* Several targets have special calling conventions for the original
1275 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1276 if (!is_pipe2) {
1277 #if defined(TARGET_ALPHA)
1278 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1279 return host_pipe[0];
1280 #elif defined(TARGET_MIPS)
1281 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1282 return host_pipe[0];
1283 #elif defined(TARGET_SH4)
1284 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1285 return host_pipe[0];
1286 #elif defined(TARGET_SPARC)
1287 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1288 return host_pipe[0];
1289 #endif
1292 if (put_user_s32(host_pipe[0], pipedes)
1293 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1294 return -TARGET_EFAULT;
1295 return get_errno(ret);
1298 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1299 abi_ulong target_addr,
1300 socklen_t len)
1302 struct target_ip_mreqn *target_smreqn;
1304 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1305 if (!target_smreqn)
1306 return -TARGET_EFAULT;
1307 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1308 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1309 if (len == sizeof(struct target_ip_mreqn))
1310 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1311 unlock_user(target_smreqn, target_addr, 0);
1313 return 0;
1316 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1317 abi_ulong target_addr,
1318 socklen_t len)
1320 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1321 sa_family_t sa_family;
1322 struct target_sockaddr *target_saddr;
1324 if (fd_trans_target_to_host_addr(fd)) {
1325 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1328 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1329 if (!target_saddr)
1330 return -TARGET_EFAULT;
1332 sa_family = tswap16(target_saddr->sa_family);
1334 /* Oops. The caller might send a incomplete sun_path; sun_path
1335 * must be terminated by \0 (see the manual page), but
1336 * unfortunately it is quite common to specify sockaddr_un
1337 * length as "strlen(x->sun_path)" while it should be
1338 * "strlen(...) + 1". We'll fix that here if needed.
1339 * Linux kernel has a similar feature.
1342 if (sa_family == AF_UNIX) {
1343 if (len < unix_maxlen && len > 0) {
1344 char *cp = (char*)target_saddr;
1346 if ( cp[len-1] && !cp[len] )
1347 len++;
1349 if (len > unix_maxlen)
1350 len = unix_maxlen;
1353 memcpy(addr, target_saddr, len);
1354 addr->sa_family = sa_family;
1355 if (sa_family == AF_NETLINK) {
1356 struct sockaddr_nl *nladdr;
1358 nladdr = (struct sockaddr_nl *)addr;
1359 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1360 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1361 } else if (sa_family == AF_PACKET) {
1362 struct target_sockaddr_ll *lladdr;
1364 lladdr = (struct target_sockaddr_ll *)addr;
1365 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1366 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1368 unlock_user(target_saddr, target_addr, 0);
1370 return 0;
1373 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1374 struct sockaddr *addr,
1375 socklen_t len)
1377 struct target_sockaddr *target_saddr;
1379 if (len == 0) {
1380 return 0;
1383 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1384 if (!target_saddr)
1385 return -TARGET_EFAULT;
1386 memcpy(target_saddr, addr, len);
1387 if (len >= offsetof(struct target_sockaddr, sa_family) +
1388 sizeof(target_saddr->sa_family)) {
1389 target_saddr->sa_family = tswap16(addr->sa_family);
1391 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1392 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1393 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1394 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1395 } else if (addr->sa_family == AF_PACKET) {
1396 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1397 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1398 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1400 unlock_user(target_saddr, target_addr, len);
1402 return 0;
1405 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1406 struct target_msghdr *target_msgh)
1408 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1409 abi_long msg_controllen;
1410 abi_ulong target_cmsg_addr;
1411 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1412 socklen_t space = 0;
1414 msg_controllen = tswapal(target_msgh->msg_controllen);
1415 if (msg_controllen < sizeof (struct target_cmsghdr))
1416 goto the_end;
1417 target_cmsg_addr = tswapal(target_msgh->msg_control);
1418 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1419 target_cmsg_start = target_cmsg;
1420 if (!target_cmsg)
1421 return -TARGET_EFAULT;
1423 while (cmsg && target_cmsg) {
1424 void *data = CMSG_DATA(cmsg);
1425 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1427 int len = tswapal(target_cmsg->cmsg_len)
1428 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1430 space += CMSG_SPACE(len);
1431 if (space > msgh->msg_controllen) {
1432 space -= CMSG_SPACE(len);
1433 /* This is a QEMU bug, since we allocated the payload
1434 * area ourselves (unlike overflow in host-to-target
1435 * conversion, which is just the guest giving us a buffer
1436 * that's too small). It can't happen for the payload types
1437 * we currently support; if it becomes an issue in future
1438 * we would need to improve our allocation strategy to
1439 * something more intelligent than "twice the size of the
1440 * target buffer we're reading from".
1442 gemu_log("Host cmsg overflow\n");
1443 break;
1446 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1447 cmsg->cmsg_level = SOL_SOCKET;
1448 } else {
1449 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1451 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1452 cmsg->cmsg_len = CMSG_LEN(len);
1454 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1455 int *fd = (int *)data;
1456 int *target_fd = (int *)target_data;
1457 int i, numfds = len / sizeof(int);
1459 for (i = 0; i < numfds; i++) {
1460 __get_user(fd[i], target_fd + i);
1462 } else if (cmsg->cmsg_level == SOL_SOCKET
1463 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1464 struct ucred *cred = (struct ucred *)data;
1465 struct target_ucred *target_cred =
1466 (struct target_ucred *)target_data;
1468 __get_user(cred->pid, &target_cred->pid);
1469 __get_user(cred->uid, &target_cred->uid);
1470 __get_user(cred->gid, &target_cred->gid);
1471 } else {
1472 gemu_log("Unsupported ancillary data: %d/%d\n",
1473 cmsg->cmsg_level, cmsg->cmsg_type);
1474 memcpy(data, target_data, len);
1477 cmsg = CMSG_NXTHDR(msgh, cmsg);
1478 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1479 target_cmsg_start);
1481 unlock_user(target_cmsg, target_cmsg_addr, 0);
1482 the_end:
1483 msgh->msg_controllen = space;
1484 return 0;
1487 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1488 struct msghdr *msgh)
1490 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1491 abi_long msg_controllen;
1492 abi_ulong target_cmsg_addr;
1493 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1494 socklen_t space = 0;
1496 msg_controllen = tswapal(target_msgh->msg_controllen);
1497 if (msg_controllen < sizeof (struct target_cmsghdr))
1498 goto the_end;
1499 target_cmsg_addr = tswapal(target_msgh->msg_control);
1500 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1501 target_cmsg_start = target_cmsg;
1502 if (!target_cmsg)
1503 return -TARGET_EFAULT;
1505 while (cmsg && target_cmsg) {
1506 void *data = CMSG_DATA(cmsg);
1507 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1509 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1510 int tgt_len, tgt_space;
1512 /* We never copy a half-header but may copy half-data;
1513 * this is Linux's behaviour in put_cmsg(). Note that
1514 * truncation here is a guest problem (which we report
1515 * to the guest via the CTRUNC bit), unlike truncation
1516 * in target_to_host_cmsg, which is a QEMU bug.
1518 if (msg_controllen < sizeof(struct cmsghdr)) {
1519 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1520 break;
1523 if (cmsg->cmsg_level == SOL_SOCKET) {
1524 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1525 } else {
1526 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1528 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1530 tgt_len = TARGET_CMSG_LEN(len);
1532 /* Payload types which need a different size of payload on
1533 * the target must adjust tgt_len here.
1535 switch (cmsg->cmsg_level) {
1536 case SOL_SOCKET:
1537 switch (cmsg->cmsg_type) {
1538 case SO_TIMESTAMP:
1539 tgt_len = sizeof(struct target_timeval);
1540 break;
1541 default:
1542 break;
1544 default:
1545 break;
1548 if (msg_controllen < tgt_len) {
1549 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1550 tgt_len = msg_controllen;
1553 /* We must now copy-and-convert len bytes of payload
1554 * into tgt_len bytes of destination space. Bear in mind
1555 * that in both source and destination we may be dealing
1556 * with a truncated value!
1558 switch (cmsg->cmsg_level) {
1559 case SOL_SOCKET:
1560 switch (cmsg->cmsg_type) {
1561 case SCM_RIGHTS:
1563 int *fd = (int *)data;
1564 int *target_fd = (int *)target_data;
1565 int i, numfds = tgt_len / sizeof(int);
1567 for (i = 0; i < numfds; i++) {
1568 __put_user(fd[i], target_fd + i);
1570 break;
1572 case SO_TIMESTAMP:
1574 struct timeval *tv = (struct timeval *)data;
1575 struct target_timeval *target_tv =
1576 (struct target_timeval *)target_data;
1578 if (len != sizeof(struct timeval) ||
1579 tgt_len != sizeof(struct target_timeval)) {
1580 goto unimplemented;
1583 /* copy struct timeval to target */
1584 __put_user(tv->tv_sec, &target_tv->tv_sec);
1585 __put_user(tv->tv_usec, &target_tv->tv_usec);
1586 break;
1588 case SCM_CREDENTIALS:
1590 struct ucred *cred = (struct ucred *)data;
1591 struct target_ucred *target_cred =
1592 (struct target_ucred *)target_data;
1594 __put_user(cred->pid, &target_cred->pid);
1595 __put_user(cred->uid, &target_cred->uid);
1596 __put_user(cred->gid, &target_cred->gid);
1597 break;
1599 default:
1600 goto unimplemented;
1602 break;
1604 default:
1605 unimplemented:
1606 gemu_log("Unsupported ancillary data: %d/%d\n",
1607 cmsg->cmsg_level, cmsg->cmsg_type);
1608 memcpy(target_data, data, MIN(len, tgt_len));
1609 if (tgt_len > len) {
1610 memset(target_data + len, 0, tgt_len - len);
1614 target_cmsg->cmsg_len = tswapal(tgt_len);
1615 tgt_space = TARGET_CMSG_SPACE(len);
1616 if (msg_controllen < tgt_space) {
1617 tgt_space = msg_controllen;
1619 msg_controllen -= tgt_space;
1620 space += tgt_space;
1621 cmsg = CMSG_NXTHDR(msgh, cmsg);
1622 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1623 target_cmsg_start);
1625 unlock_user(target_cmsg, target_cmsg_addr, space);
1626 the_end:
1627 target_msgh->msg_controllen = tswapal(space);
1628 return 0;
1631 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1633 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1634 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1635 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1636 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1637 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1640 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1641 size_t len,
1642 abi_long (*host_to_target_nlmsg)
1643 (struct nlmsghdr *))
1645 uint32_t nlmsg_len;
1646 abi_long ret;
1648 while (len > sizeof(struct nlmsghdr)) {
1650 nlmsg_len = nlh->nlmsg_len;
1651 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1652 nlmsg_len > len) {
1653 break;
1656 switch (nlh->nlmsg_type) {
1657 case NLMSG_DONE:
1658 tswap_nlmsghdr(nlh);
1659 return 0;
1660 case NLMSG_NOOP:
1661 break;
1662 case NLMSG_ERROR:
1664 struct nlmsgerr *e = NLMSG_DATA(nlh);
1665 e->error = tswap32(e->error);
1666 tswap_nlmsghdr(&e->msg);
1667 tswap_nlmsghdr(nlh);
1668 return 0;
1670 default:
1671 ret = host_to_target_nlmsg(nlh);
1672 if (ret < 0) {
1673 tswap_nlmsghdr(nlh);
1674 return ret;
1676 break;
1678 tswap_nlmsghdr(nlh);
1679 len -= NLMSG_ALIGN(nlmsg_len);
1680 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1682 return 0;
1685 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1686 size_t len,
1687 abi_long (*target_to_host_nlmsg)
1688 (struct nlmsghdr *))
1690 int ret;
1692 while (len > sizeof(struct nlmsghdr)) {
1693 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1694 tswap32(nlh->nlmsg_len) > len) {
1695 break;
1697 tswap_nlmsghdr(nlh);
1698 switch (nlh->nlmsg_type) {
1699 case NLMSG_DONE:
1700 return 0;
1701 case NLMSG_NOOP:
1702 break;
1703 case NLMSG_ERROR:
1705 struct nlmsgerr *e = NLMSG_DATA(nlh);
1706 e->error = tswap32(e->error);
1707 tswap_nlmsghdr(&e->msg);
1708 return 0;
1710 default:
1711 ret = target_to_host_nlmsg(nlh);
1712 if (ret < 0) {
1713 return ret;
1716 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1717 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1719 return 0;
1722 #ifdef CONFIG_RTNETLINK
1723 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1724 size_t len, void *context,
1725 abi_long (*host_to_target_nlattr)
1726 (struct nlattr *,
1727 void *context))
1729 unsigned short nla_len;
1730 abi_long ret;
1732 while (len > sizeof(struct nlattr)) {
1733 nla_len = nlattr->nla_len;
1734 if (nla_len < sizeof(struct nlattr) ||
1735 nla_len > len) {
1736 break;
1738 ret = host_to_target_nlattr(nlattr, context);
1739 nlattr->nla_len = tswap16(nlattr->nla_len);
1740 nlattr->nla_type = tswap16(nlattr->nla_type);
1741 if (ret < 0) {
1742 return ret;
1744 len -= NLA_ALIGN(nla_len);
1745 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1747 return 0;
1750 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1751 size_t len,
1752 abi_long (*host_to_target_rtattr)
1753 (struct rtattr *))
1755 unsigned short rta_len;
1756 abi_long ret;
1758 while (len > sizeof(struct rtattr)) {
1759 rta_len = rtattr->rta_len;
1760 if (rta_len < sizeof(struct rtattr) ||
1761 rta_len > len) {
1762 break;
1764 ret = host_to_target_rtattr(rtattr);
1765 rtattr->rta_len = tswap16(rtattr->rta_len);
1766 rtattr->rta_type = tswap16(rtattr->rta_type);
1767 if (ret < 0) {
1768 return ret;
1770 len -= RTA_ALIGN(rta_len);
1771 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1773 return 0;
1776 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1778 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1779 void *context)
1781 uint16_t *u16;
1782 uint32_t *u32;
1783 uint64_t *u64;
1785 switch (nlattr->nla_type) {
1786 #ifdef IFLA_BR_FDB_FLUSH
1787 /* no data */
1788 case IFLA_BR_FDB_FLUSH:
1789 break;
1790 #endif
1791 #ifdef IFLA_BR_GROUP_ADDR
1792 /* binary */
1793 case IFLA_BR_GROUP_ADDR:
1794 break;
1795 #endif
1796 /* uint8_t */
1797 case IFLA_BR_VLAN_FILTERING:
1798 #ifdef IFLA_BR_TOPOLOGY_CHANGE
1799 case IFLA_BR_TOPOLOGY_CHANGE:
1800 #endif
1801 #ifdef IFLA_BR_TOPOLOGY_CHANGE_DETECTED
1802 case IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
1803 #endif
1804 #ifdef IFLA_BR_MCAST_ROUTER
1805 case IFLA_BR_MCAST_ROUTER:
1806 #endif
1807 #ifdef IFLA_BR_MCAST_SNOOPING
1808 case IFLA_BR_MCAST_SNOOPING:
1809 #endif
1810 #ifdef IFLA_BR_MCAST_QUERY_USE_IFADDR
1811 case IFLA_BR_MCAST_QUERY_USE_IFADDR:
1812 #endif
1813 #ifdef IFLA_BR_MCAST_QUERIER
1814 case IFLA_BR_MCAST_QUERIER:
1815 #endif
1816 #ifdef IFLA_BR_NF_CALL_IPTABLES
1817 case IFLA_BR_NF_CALL_IPTABLES:
1818 #endif
1819 #ifdef IFLA_BR_NF_CALL_IP6TABLES
1820 case IFLA_BR_NF_CALL_IP6TABLES:
1821 #endif
1822 #ifdef IFLA_BR_NF_CALL_ARPTABLES
1823 case IFLA_BR_NF_CALL_ARPTABLES:
1824 #endif
1825 break;
1826 /* uint16_t */
1827 case IFLA_BR_PRIORITY:
1828 case IFLA_BR_VLAN_PROTOCOL:
1829 #ifdef IFLA_BR_GROUP_FWD_MASK
1830 case IFLA_BR_GROUP_FWD_MASK:
1831 #endif
1832 #ifdef IFLA_BR_ROOT_PORT
1833 case IFLA_BR_ROOT_PORT:
1834 #endif
1835 #ifdef IFLA_BR_VLAN_DEFAULT_PVID
1836 case IFLA_BR_VLAN_DEFAULT_PVID:
1837 #endif
1838 u16 = NLA_DATA(nlattr);
1839 *u16 = tswap16(*u16);
1840 break;
1841 /* uint32_t */
1842 case IFLA_BR_FORWARD_DELAY:
1843 case IFLA_BR_HELLO_TIME:
1844 case IFLA_BR_MAX_AGE:
1845 case IFLA_BR_AGEING_TIME:
1846 case IFLA_BR_STP_STATE:
1847 #ifdef IFLA_BR_ROOT_PATH_COST
1848 case IFLA_BR_ROOT_PATH_COST:
1849 #endif
1850 #ifdef IFLA_BR_MCAST_HASH_ELASTICITY
1851 case IFLA_BR_MCAST_HASH_ELASTICITY:
1852 #endif
1853 #ifdef IFLA_BR_MCAST_HASH_MAX
1854 case IFLA_BR_MCAST_HASH_MAX:
1855 #endif
1856 #ifdef IFLA_BR_MCAST_LAST_MEMBER_CNT
1857 case IFLA_BR_MCAST_LAST_MEMBER_CNT:
1858 #endif
1859 #ifdef IFLA_BR_MCAST_STARTUP_QUERY_CNT
1860 case IFLA_BR_MCAST_STARTUP_QUERY_CNT:
1861 #endif
1862 u32 = NLA_DATA(nlattr);
1863 *u32 = tswap32(*u32);
1864 break;
1865 /* uint64_t */
1866 #ifdef IFLA_BR_HELLO_TIMER
1867 case IFLA_BR_HELLO_TIMER:
1868 #endif
1869 #ifdef IFLA_BR_TCN_TIMER
1870 case IFLA_BR_TCN_TIMER:
1871 #endif
1872 #ifdef IFLA_BR_GC_TIMER
1873 case IFLA_BR_GC_TIMER:
1874 #endif
1875 #ifdef IFLA_BR_TOPOLOGY_CHANGE_TIMER
1876 case IFLA_BR_TOPOLOGY_CHANGE_TIMER:
1877 #endif
1878 #ifdef IFLA_BR_MCAST_LAST_MEMBER_INTVL
1879 case IFLA_BR_MCAST_LAST_MEMBER_INTVL:
1880 #endif
1881 #ifdef IFLA_BR_MCAST_MEMBERSHIP_INTVL
1882 case IFLA_BR_MCAST_MEMBERSHIP_INTVL:
1883 #endif
1884 #ifdef IFLA_BR_MCAST_QUERIER_INTVL
1885 case IFLA_BR_MCAST_QUERIER_INTVL:
1886 #endif
1887 #ifdef IFLA_BR_MCAST_QUERY_INTVL
1888 case IFLA_BR_MCAST_QUERY_INTVL:
1889 #endif
1890 #ifdef IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
1891 case IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
1892 #endif
1893 #ifdef IFLA_BR_MCAST_STARTUP_QUERY_INTVL
1894 case IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
1895 #endif
1896 u64 = NLA_DATA(nlattr);
1897 *u64 = tswap64(*u64);
1898 break;
1899 /* ifla_bridge_id: uin8_t[] */
1900 #ifdef IFLA_BR_ROOT_ID
1901 case IFLA_BR_ROOT_ID:
1902 #endif
1903 #ifdef IFLA_BR_BRIDGE_ID
1904 case IFLA_BR_BRIDGE_ID:
1905 #endif
1906 break;
1907 default:
1908 gemu_log("Unknown IFLA_BR type %d\n", nlattr->nla_type);
1909 break;
1911 return 0;
1914 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
1915 void *context)
1917 uint16_t *u16;
1918 uint32_t *u32;
1919 uint64_t *u64;
1921 switch (nlattr->nla_type) {
1922 /* uint8_t */
1923 case IFLA_BRPORT_STATE:
1924 case IFLA_BRPORT_MODE:
1925 case IFLA_BRPORT_GUARD:
1926 case IFLA_BRPORT_PROTECT:
1927 case IFLA_BRPORT_FAST_LEAVE:
1928 case IFLA_BRPORT_LEARNING:
1929 case IFLA_BRPORT_UNICAST_FLOOD:
1930 case IFLA_BRPORT_PROXYARP:
1931 case IFLA_BRPORT_LEARNING_SYNC:
1932 case IFLA_BRPORT_PROXYARP_WIFI:
1933 #ifdef IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
1934 case IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
1935 #endif
1936 #ifdef IFLA_BRPORT_CONFIG_PENDING
1937 case IFLA_BRPORT_CONFIG_PENDING:
1938 #endif
1939 #ifdef IFLA_BRPORT_MULTICAST_ROUTER
1940 case IFLA_BRPORT_MULTICAST_ROUTER:
1941 #endif
1942 break;
1943 /* uint16_t */
1944 case IFLA_BRPORT_PRIORITY:
1945 #ifdef IFLA_BRPORT_DESIGNATED_PORT
1946 case IFLA_BRPORT_DESIGNATED_PORT:
1947 #endif
1948 #ifdef IFLA_BRPORT_DESIGNATED_COST
1949 case IFLA_BRPORT_DESIGNATED_COST:
1950 #endif
1951 #ifdef IFLA_BRPORT_ID
1952 case IFLA_BRPORT_ID:
1953 #endif
1954 #ifdef IFLA_BRPORT_NO
1955 case IFLA_BRPORT_NO:
1956 #endif
1957 u16 = NLA_DATA(nlattr);
1958 *u16 = tswap16(*u16);
1959 break;
1960 /* uin32_t */
1961 case IFLA_BRPORT_COST:
1962 u32 = NLA_DATA(nlattr);
1963 *u32 = tswap32(*u32);
1964 break;
1965 /* uint64_t */
1966 #ifdef IFLA_BRPORT_MESSAGE_AGE_TIMER
1967 case IFLA_BRPORT_MESSAGE_AGE_TIMER:
1968 #endif
1969 #ifdef IFLA_BRPORT_FORWARD_DELAY_TIMER
1970 case IFLA_BRPORT_FORWARD_DELAY_TIMER:
1971 #endif
1972 #ifdef IFLA_BRPORT_HOLD_TIMER
1973 case IFLA_BRPORT_HOLD_TIMER:
1974 #endif
1975 u64 = NLA_DATA(nlattr);
1976 *u64 = tswap64(*u64);
1977 break;
1978 /* ifla_bridge_id: uint8_t[] */
1979 #ifdef IFLA_BRPORT_ROOT_ID
1980 case IFLA_BRPORT_ROOT_ID:
1981 #endif
1982 #ifdef IFLA_BRPORT_BRIDGE_ID
1983 case IFLA_BRPORT_BRIDGE_ID:
1984 #endif
1985 break;
1986 default:
1987 gemu_log("Unknown IFLA_BRPORT type %d\n", nlattr->nla_type);
1988 break;
1990 return 0;
1993 struct linkinfo_context {
1994 int len;
1995 char *name;
1996 int slave_len;
1997 char *slave_name;
2000 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2001 void *context)
2003 struct linkinfo_context *li_context = context;
2005 switch (nlattr->nla_type) {
2006 /* string */
2007 case IFLA_INFO_KIND:
2008 li_context->name = NLA_DATA(nlattr);
2009 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2010 break;
2011 case IFLA_INFO_SLAVE_KIND:
2012 li_context->slave_name = NLA_DATA(nlattr);
2013 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2014 break;
2015 /* stats */
2016 case IFLA_INFO_XSTATS:
2017 /* FIXME: only used by CAN */
2018 break;
2019 /* nested */
2020 case IFLA_INFO_DATA:
2021 if (strncmp(li_context->name, "bridge",
2022 li_context->len) == 0) {
2023 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2024 nlattr->nla_len,
2025 NULL,
2026 host_to_target_data_bridge_nlattr);
2027 } else {
2028 gemu_log("Unknown IFLA_INFO_KIND %s\n", li_context->name);
2030 break;
2031 case IFLA_INFO_SLAVE_DATA:
2032 if (strncmp(li_context->slave_name, "bridge",
2033 li_context->slave_len) == 0) {
2034 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2035 nlattr->nla_len,
2036 NULL,
2037 host_to_target_slave_data_bridge_nlattr);
2038 } else {
2039 gemu_log("Unknown IFLA_INFO_SLAVE_KIND %s\n",
2040 li_context->slave_name);
2042 break;
2043 default:
2044 gemu_log("Unknown host IFLA_INFO type: %d\n", nlattr->nla_type);
2045 break;
2048 return 0;
2051 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2052 void *context)
2054 uint32_t *u32;
2055 int i;
2057 switch (nlattr->nla_type) {
2058 case IFLA_INET_CONF:
2059 u32 = NLA_DATA(nlattr);
2060 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2061 i++) {
2062 u32[i] = tswap32(u32[i]);
2064 break;
2065 default:
2066 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2068 return 0;
2071 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2072 void *context)
2074 uint32_t *u32;
2075 uint64_t *u64;
2076 struct ifla_cacheinfo *ci;
2077 int i;
2079 switch (nlattr->nla_type) {
2080 /* binaries */
2081 case IFLA_INET6_TOKEN:
2082 break;
2083 /* uint8_t */
2084 case IFLA_INET6_ADDR_GEN_MODE:
2085 break;
2086 /* uint32_t */
2087 case IFLA_INET6_FLAGS:
2088 u32 = NLA_DATA(nlattr);
2089 *u32 = tswap32(*u32);
2090 break;
2091 /* uint32_t[] */
2092 case IFLA_INET6_CONF:
2093 u32 = NLA_DATA(nlattr);
2094 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2095 i++) {
2096 u32[i] = tswap32(u32[i]);
2098 break;
2099 /* ifla_cacheinfo */
2100 case IFLA_INET6_CACHEINFO:
2101 ci = NLA_DATA(nlattr);
2102 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2103 ci->tstamp = tswap32(ci->tstamp);
2104 ci->reachable_time = tswap32(ci->reachable_time);
2105 ci->retrans_time = tswap32(ci->retrans_time);
2106 break;
2107 /* uint64_t[] */
2108 case IFLA_INET6_STATS:
2109 case IFLA_INET6_ICMP6STATS:
2110 u64 = NLA_DATA(nlattr);
2111 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2112 i++) {
2113 u64[i] = tswap64(u64[i]);
2115 break;
2116 default:
2117 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2119 return 0;
2122 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2123 void *context)
2125 switch (nlattr->nla_type) {
2126 case AF_INET:
2127 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2128 NULL,
2129 host_to_target_data_inet_nlattr);
2130 case AF_INET6:
2131 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2132 NULL,
2133 host_to_target_data_inet6_nlattr);
2134 default:
2135 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2136 break;
2138 return 0;
2141 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2143 uint32_t *u32;
2144 struct rtnl_link_stats *st;
2145 struct rtnl_link_stats64 *st64;
2146 struct rtnl_link_ifmap *map;
2147 struct linkinfo_context li_context;
2149 switch (rtattr->rta_type) {
2150 /* binary stream */
2151 case IFLA_ADDRESS:
2152 case IFLA_BROADCAST:
2153 /* string */
2154 case IFLA_IFNAME:
2155 case IFLA_QDISC:
2156 break;
2157 /* uin8_t */
2158 case IFLA_OPERSTATE:
2159 case IFLA_LINKMODE:
2160 case IFLA_CARRIER:
2161 case IFLA_PROTO_DOWN:
2162 break;
2163 /* uint32_t */
2164 case IFLA_MTU:
2165 case IFLA_LINK:
2166 case IFLA_WEIGHT:
2167 case IFLA_TXQLEN:
2168 case IFLA_CARRIER_CHANGES:
2169 case IFLA_NUM_RX_QUEUES:
2170 case IFLA_NUM_TX_QUEUES:
2171 case IFLA_PROMISCUITY:
2172 case IFLA_EXT_MASK:
2173 case IFLA_LINK_NETNSID:
2174 case IFLA_GROUP:
2175 case IFLA_MASTER:
2176 case IFLA_NUM_VF:
2177 u32 = RTA_DATA(rtattr);
2178 *u32 = tswap32(*u32);
2179 break;
2180 /* struct rtnl_link_stats */
2181 case IFLA_STATS:
2182 st = RTA_DATA(rtattr);
2183 st->rx_packets = tswap32(st->rx_packets);
2184 st->tx_packets = tswap32(st->tx_packets);
2185 st->rx_bytes = tswap32(st->rx_bytes);
2186 st->tx_bytes = tswap32(st->tx_bytes);
2187 st->rx_errors = tswap32(st->rx_errors);
2188 st->tx_errors = tswap32(st->tx_errors);
2189 st->rx_dropped = tswap32(st->rx_dropped);
2190 st->tx_dropped = tswap32(st->tx_dropped);
2191 st->multicast = tswap32(st->multicast);
2192 st->collisions = tswap32(st->collisions);
2194 /* detailed rx_errors: */
2195 st->rx_length_errors = tswap32(st->rx_length_errors);
2196 st->rx_over_errors = tswap32(st->rx_over_errors);
2197 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2198 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2199 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2200 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2202 /* detailed tx_errors */
2203 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2204 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2205 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2206 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2207 st->tx_window_errors = tswap32(st->tx_window_errors);
2209 /* for cslip etc */
2210 st->rx_compressed = tswap32(st->rx_compressed);
2211 st->tx_compressed = tswap32(st->tx_compressed);
2212 break;
2213 /* struct rtnl_link_stats64 */
2214 case IFLA_STATS64:
2215 st64 = RTA_DATA(rtattr);
2216 st64->rx_packets = tswap64(st64->rx_packets);
2217 st64->tx_packets = tswap64(st64->tx_packets);
2218 st64->rx_bytes = tswap64(st64->rx_bytes);
2219 st64->tx_bytes = tswap64(st64->tx_bytes);
2220 st64->rx_errors = tswap64(st64->rx_errors);
2221 st64->tx_errors = tswap64(st64->tx_errors);
2222 st64->rx_dropped = tswap64(st64->rx_dropped);
2223 st64->tx_dropped = tswap64(st64->tx_dropped);
2224 st64->multicast = tswap64(st64->multicast);
2225 st64->collisions = tswap64(st64->collisions);
2227 /* detailed rx_errors: */
2228 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2229 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2230 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2231 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2232 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2233 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2235 /* detailed tx_errors */
2236 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2237 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2238 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2239 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2240 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2242 /* for cslip etc */
2243 st64->rx_compressed = tswap64(st64->rx_compressed);
2244 st64->tx_compressed = tswap64(st64->tx_compressed);
2245 break;
2246 /* struct rtnl_link_ifmap */
2247 case IFLA_MAP:
2248 map = RTA_DATA(rtattr);
2249 map->mem_start = tswap64(map->mem_start);
2250 map->mem_end = tswap64(map->mem_end);
2251 map->base_addr = tswap64(map->base_addr);
2252 map->irq = tswap16(map->irq);
2253 break;
2254 /* nested */
2255 case IFLA_LINKINFO:
2256 memset(&li_context, 0, sizeof(li_context));
2257 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2258 &li_context,
2259 host_to_target_data_linkinfo_nlattr);
2260 case IFLA_AF_SPEC:
2261 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2262 NULL,
2263 host_to_target_data_spec_nlattr);
2264 default:
2265 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
2266 break;
2268 return 0;
2271 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2273 uint32_t *u32;
2274 struct ifa_cacheinfo *ci;
2276 switch (rtattr->rta_type) {
2277 /* binary: depends on family type */
2278 case IFA_ADDRESS:
2279 case IFA_LOCAL:
2280 break;
2281 /* string */
2282 case IFA_LABEL:
2283 break;
2284 /* u32 */
2285 case IFA_FLAGS:
2286 case IFA_BROADCAST:
2287 u32 = RTA_DATA(rtattr);
2288 *u32 = tswap32(*u32);
2289 break;
2290 /* struct ifa_cacheinfo */
2291 case IFA_CACHEINFO:
2292 ci = RTA_DATA(rtattr);
2293 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2294 ci->ifa_valid = tswap32(ci->ifa_valid);
2295 ci->cstamp = tswap32(ci->cstamp);
2296 ci->tstamp = tswap32(ci->tstamp);
2297 break;
2298 default:
2299 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2300 break;
2302 return 0;
2305 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2307 uint32_t *u32;
2308 switch (rtattr->rta_type) {
2309 /* binary: depends on family type */
2310 case RTA_GATEWAY:
2311 case RTA_DST:
2312 case RTA_PREFSRC:
2313 break;
2314 /* u32 */
2315 case RTA_PRIORITY:
2316 case RTA_TABLE:
2317 case RTA_OIF:
2318 u32 = RTA_DATA(rtattr);
2319 *u32 = tswap32(*u32);
2320 break;
2321 default:
2322 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2323 break;
2325 return 0;
2328 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2329 uint32_t rtattr_len)
2331 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2332 host_to_target_data_link_rtattr);
2335 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2336 uint32_t rtattr_len)
2338 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2339 host_to_target_data_addr_rtattr);
2342 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2343 uint32_t rtattr_len)
2345 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2346 host_to_target_data_route_rtattr);
2349 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2351 uint32_t nlmsg_len;
2352 struct ifinfomsg *ifi;
2353 struct ifaddrmsg *ifa;
2354 struct rtmsg *rtm;
2356 nlmsg_len = nlh->nlmsg_len;
2357 switch (nlh->nlmsg_type) {
2358 case RTM_NEWLINK:
2359 case RTM_DELLINK:
2360 case RTM_GETLINK:
2361 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2362 ifi = NLMSG_DATA(nlh);
2363 ifi->ifi_type = tswap16(ifi->ifi_type);
2364 ifi->ifi_index = tswap32(ifi->ifi_index);
2365 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2366 ifi->ifi_change = tswap32(ifi->ifi_change);
2367 host_to_target_link_rtattr(IFLA_RTA(ifi),
2368 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2370 break;
2371 case RTM_NEWADDR:
2372 case RTM_DELADDR:
2373 case RTM_GETADDR:
2374 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2375 ifa = NLMSG_DATA(nlh);
2376 ifa->ifa_index = tswap32(ifa->ifa_index);
2377 host_to_target_addr_rtattr(IFA_RTA(ifa),
2378 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2380 break;
2381 case RTM_NEWROUTE:
2382 case RTM_DELROUTE:
2383 case RTM_GETROUTE:
2384 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2385 rtm = NLMSG_DATA(nlh);
2386 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2387 host_to_target_route_rtattr(RTM_RTA(rtm),
2388 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2390 break;
2391 default:
2392 return -TARGET_EINVAL;
2394 return 0;
2397 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2398 size_t len)
2400 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2403 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2404 size_t len,
2405 abi_long (*target_to_host_rtattr)
2406 (struct rtattr *))
2408 abi_long ret;
2410 while (len >= sizeof(struct rtattr)) {
2411 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2412 tswap16(rtattr->rta_len) > len) {
2413 break;
2415 rtattr->rta_len = tswap16(rtattr->rta_len);
2416 rtattr->rta_type = tswap16(rtattr->rta_type);
2417 ret = target_to_host_rtattr(rtattr);
2418 if (ret < 0) {
2419 return ret;
2421 len -= RTA_ALIGN(rtattr->rta_len);
2422 rtattr = (struct rtattr *)(((char *)rtattr) +
2423 RTA_ALIGN(rtattr->rta_len));
2425 return 0;
2428 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2430 switch (rtattr->rta_type) {
2431 default:
2432 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
2433 break;
2435 return 0;
2438 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2440 switch (rtattr->rta_type) {
2441 /* binary: depends on family type */
2442 case IFA_LOCAL:
2443 case IFA_ADDRESS:
2444 break;
2445 default:
2446 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2447 break;
2449 return 0;
2452 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2454 uint32_t *u32;
2455 switch (rtattr->rta_type) {
2456 /* binary: depends on family type */
2457 case RTA_DST:
2458 case RTA_SRC:
2459 case RTA_GATEWAY:
2460 break;
2461 /* u32 */
2462 case RTA_OIF:
2463 u32 = RTA_DATA(rtattr);
2464 *u32 = tswap32(*u32);
2465 break;
2466 default:
2467 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2468 break;
2470 return 0;
2473 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2474 uint32_t rtattr_len)
2476 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2477 target_to_host_data_link_rtattr);
2480 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2481 uint32_t rtattr_len)
2483 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2484 target_to_host_data_addr_rtattr);
2487 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2488 uint32_t rtattr_len)
2490 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2491 target_to_host_data_route_rtattr);
2494 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2496 struct ifinfomsg *ifi;
2497 struct ifaddrmsg *ifa;
2498 struct rtmsg *rtm;
2500 switch (nlh->nlmsg_type) {
2501 case RTM_GETLINK:
2502 break;
2503 case RTM_NEWLINK:
2504 case RTM_DELLINK:
2505 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2506 ifi = NLMSG_DATA(nlh);
2507 ifi->ifi_type = tswap16(ifi->ifi_type);
2508 ifi->ifi_index = tswap32(ifi->ifi_index);
2509 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2510 ifi->ifi_change = tswap32(ifi->ifi_change);
2511 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2512 NLMSG_LENGTH(sizeof(*ifi)));
2514 break;
2515 case RTM_GETADDR:
2516 case RTM_NEWADDR:
2517 case RTM_DELADDR:
2518 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2519 ifa = NLMSG_DATA(nlh);
2520 ifa->ifa_index = tswap32(ifa->ifa_index);
2521 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2522 NLMSG_LENGTH(sizeof(*ifa)));
2524 break;
2525 case RTM_GETROUTE:
2526 break;
2527 case RTM_NEWROUTE:
2528 case RTM_DELROUTE:
2529 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2530 rtm = NLMSG_DATA(nlh);
2531 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2532 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2533 NLMSG_LENGTH(sizeof(*rtm)));
2535 break;
2536 default:
2537 return -TARGET_EOPNOTSUPP;
2539 return 0;
2542 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2544 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2546 #endif /* CONFIG_RTNETLINK */
2548 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2550 switch (nlh->nlmsg_type) {
2551 default:
2552 gemu_log("Unknown host audit message type %d\n",
2553 nlh->nlmsg_type);
2554 return -TARGET_EINVAL;
2556 return 0;
2559 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2560 size_t len)
2562 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2565 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2567 switch (nlh->nlmsg_type) {
2568 case AUDIT_USER:
2569 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2570 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2571 break;
2572 default:
2573 gemu_log("Unknown target audit message type %d\n",
2574 nlh->nlmsg_type);
2575 return -TARGET_EINVAL;
2578 return 0;
2581 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2583 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2586 /* do_setsockopt() Must return target values and target errnos. */
2587 static abi_long do_setsockopt(int sockfd, int level, int optname,
2588 abi_ulong optval_addr, socklen_t optlen)
2590 abi_long ret;
2591 int val;
2592 struct ip_mreqn *ip_mreq;
2593 struct ip_mreq_source *ip_mreq_source;
2595 switch(level) {
2596 case SOL_TCP:
2597 /* TCP options all take an 'int' value. */
2598 if (optlen < sizeof(uint32_t))
2599 return -TARGET_EINVAL;
2601 if (get_user_u32(val, optval_addr))
2602 return -TARGET_EFAULT;
2603 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2604 break;
2605 case SOL_IP:
2606 switch(optname) {
2607 case IP_TOS:
2608 case IP_TTL:
2609 case IP_HDRINCL:
2610 case IP_ROUTER_ALERT:
2611 case IP_RECVOPTS:
2612 case IP_RETOPTS:
2613 case IP_PKTINFO:
2614 case IP_MTU_DISCOVER:
2615 case IP_RECVERR:
2616 case IP_RECVTOS:
2617 #ifdef IP_FREEBIND
2618 case IP_FREEBIND:
2619 #endif
2620 case IP_MULTICAST_TTL:
2621 case IP_MULTICAST_LOOP:
2622 val = 0;
2623 if (optlen >= sizeof(uint32_t)) {
2624 if (get_user_u32(val, optval_addr))
2625 return -TARGET_EFAULT;
2626 } else if (optlen >= 1) {
2627 if (get_user_u8(val, optval_addr))
2628 return -TARGET_EFAULT;
2630 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2631 break;
2632 case IP_ADD_MEMBERSHIP:
2633 case IP_DROP_MEMBERSHIP:
2634 if (optlen < sizeof (struct target_ip_mreq) ||
2635 optlen > sizeof (struct target_ip_mreqn))
2636 return -TARGET_EINVAL;
2638 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2639 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2640 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2641 break;
2643 case IP_BLOCK_SOURCE:
2644 case IP_UNBLOCK_SOURCE:
2645 case IP_ADD_SOURCE_MEMBERSHIP:
2646 case IP_DROP_SOURCE_MEMBERSHIP:
2647 if (optlen != sizeof (struct target_ip_mreq_source))
2648 return -TARGET_EINVAL;
2650 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2651 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2652 unlock_user (ip_mreq_source, optval_addr, 0);
2653 break;
2655 default:
2656 goto unimplemented;
2658 break;
2659 case SOL_IPV6:
2660 switch (optname) {
2661 case IPV6_MTU_DISCOVER:
2662 case IPV6_MTU:
2663 case IPV6_V6ONLY:
2664 case IPV6_RECVPKTINFO:
2665 val = 0;
2666 if (optlen < sizeof(uint32_t)) {
2667 return -TARGET_EINVAL;
2669 if (get_user_u32(val, optval_addr)) {
2670 return -TARGET_EFAULT;
2672 ret = get_errno(setsockopt(sockfd, level, optname,
2673 &val, sizeof(val)));
2674 break;
2675 default:
2676 goto unimplemented;
2678 break;
2679 case SOL_RAW:
2680 switch (optname) {
2681 case ICMP_FILTER:
2682 /* struct icmp_filter takes an u32 value */
2683 if (optlen < sizeof(uint32_t)) {
2684 return -TARGET_EINVAL;
2687 if (get_user_u32(val, optval_addr)) {
2688 return -TARGET_EFAULT;
2690 ret = get_errno(setsockopt(sockfd, level, optname,
2691 &val, sizeof(val)));
2692 break;
2694 default:
2695 goto unimplemented;
2697 break;
2698 case TARGET_SOL_SOCKET:
2699 switch (optname) {
2700 case TARGET_SO_RCVTIMEO:
2702 struct timeval tv;
2704 optname = SO_RCVTIMEO;
2706 set_timeout:
2707 if (optlen != sizeof(struct target_timeval)) {
2708 return -TARGET_EINVAL;
2711 if (copy_from_user_timeval(&tv, optval_addr)) {
2712 return -TARGET_EFAULT;
2715 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2716 &tv, sizeof(tv)));
2717 return ret;
2719 case TARGET_SO_SNDTIMEO:
2720 optname = SO_SNDTIMEO;
2721 goto set_timeout;
2722 case TARGET_SO_ATTACH_FILTER:
2724 struct target_sock_fprog *tfprog;
2725 struct target_sock_filter *tfilter;
2726 struct sock_fprog fprog;
2727 struct sock_filter *filter;
2728 int i;
2730 if (optlen != sizeof(*tfprog)) {
2731 return -TARGET_EINVAL;
2733 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2734 return -TARGET_EFAULT;
2736 if (!lock_user_struct(VERIFY_READ, tfilter,
2737 tswapal(tfprog->filter), 0)) {
2738 unlock_user_struct(tfprog, optval_addr, 1);
2739 return -TARGET_EFAULT;
2742 fprog.len = tswap16(tfprog->len);
2743 filter = g_try_new(struct sock_filter, fprog.len);
2744 if (filter == NULL) {
2745 unlock_user_struct(tfilter, tfprog->filter, 1);
2746 unlock_user_struct(tfprog, optval_addr, 1);
2747 return -TARGET_ENOMEM;
2749 for (i = 0; i < fprog.len; i++) {
2750 filter[i].code = tswap16(tfilter[i].code);
2751 filter[i].jt = tfilter[i].jt;
2752 filter[i].jf = tfilter[i].jf;
2753 filter[i].k = tswap32(tfilter[i].k);
2755 fprog.filter = filter;
2757 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2758 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2759 g_free(filter);
2761 unlock_user_struct(tfilter, tfprog->filter, 1);
2762 unlock_user_struct(tfprog, optval_addr, 1);
2763 return ret;
2765 case TARGET_SO_BINDTODEVICE:
2767 char *dev_ifname, *addr_ifname;
2769 if (optlen > IFNAMSIZ - 1) {
2770 optlen = IFNAMSIZ - 1;
2772 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2773 if (!dev_ifname) {
2774 return -TARGET_EFAULT;
2776 optname = SO_BINDTODEVICE;
2777 addr_ifname = alloca(IFNAMSIZ);
2778 memcpy(addr_ifname, dev_ifname, optlen);
2779 addr_ifname[optlen] = 0;
2780 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2781 addr_ifname, optlen));
2782 unlock_user (dev_ifname, optval_addr, 0);
2783 return ret;
2785 /* Options with 'int' argument. */
2786 case TARGET_SO_DEBUG:
2787 optname = SO_DEBUG;
2788 break;
2789 case TARGET_SO_REUSEADDR:
2790 optname = SO_REUSEADDR;
2791 break;
2792 case TARGET_SO_TYPE:
2793 optname = SO_TYPE;
2794 break;
2795 case TARGET_SO_ERROR:
2796 optname = SO_ERROR;
2797 break;
2798 case TARGET_SO_DONTROUTE:
2799 optname = SO_DONTROUTE;
2800 break;
2801 case TARGET_SO_BROADCAST:
2802 optname = SO_BROADCAST;
2803 break;
2804 case TARGET_SO_SNDBUF:
2805 optname = SO_SNDBUF;
2806 break;
2807 case TARGET_SO_SNDBUFFORCE:
2808 optname = SO_SNDBUFFORCE;
2809 break;
2810 case TARGET_SO_RCVBUF:
2811 optname = SO_RCVBUF;
2812 break;
2813 case TARGET_SO_RCVBUFFORCE:
2814 optname = SO_RCVBUFFORCE;
2815 break;
2816 case TARGET_SO_KEEPALIVE:
2817 optname = SO_KEEPALIVE;
2818 break;
2819 case TARGET_SO_OOBINLINE:
2820 optname = SO_OOBINLINE;
2821 break;
2822 case TARGET_SO_NO_CHECK:
2823 optname = SO_NO_CHECK;
2824 break;
2825 case TARGET_SO_PRIORITY:
2826 optname = SO_PRIORITY;
2827 break;
2828 #ifdef SO_BSDCOMPAT
2829 case TARGET_SO_BSDCOMPAT:
2830 optname = SO_BSDCOMPAT;
2831 break;
2832 #endif
2833 case TARGET_SO_PASSCRED:
2834 optname = SO_PASSCRED;
2835 break;
2836 case TARGET_SO_PASSSEC:
2837 optname = SO_PASSSEC;
2838 break;
2839 case TARGET_SO_TIMESTAMP:
2840 optname = SO_TIMESTAMP;
2841 break;
2842 case TARGET_SO_RCVLOWAT:
2843 optname = SO_RCVLOWAT;
2844 break;
2845 break;
2846 default:
2847 goto unimplemented;
2849 if (optlen < sizeof(uint32_t))
2850 return -TARGET_EINVAL;
2852 if (get_user_u32(val, optval_addr))
2853 return -TARGET_EFAULT;
2854 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2855 break;
2856 default:
2857 unimplemented:
2858 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2859 ret = -TARGET_ENOPROTOOPT;
2861 return ret;
2864 /* do_getsockopt() Must return target values and target errnos. */
2865 static abi_long do_getsockopt(int sockfd, int level, int optname,
2866 abi_ulong optval_addr, abi_ulong optlen)
2868 abi_long ret;
2869 int len, val;
2870 socklen_t lv;
2872 switch(level) {
2873 case TARGET_SOL_SOCKET:
2874 level = SOL_SOCKET;
2875 switch (optname) {
2876 /* These don't just return a single integer */
2877 case TARGET_SO_LINGER:
2878 case TARGET_SO_RCVTIMEO:
2879 case TARGET_SO_SNDTIMEO:
2880 case TARGET_SO_PEERNAME:
2881 goto unimplemented;
2882 case TARGET_SO_PEERCRED: {
2883 struct ucred cr;
2884 socklen_t crlen;
2885 struct target_ucred *tcr;
2887 if (get_user_u32(len, optlen)) {
2888 return -TARGET_EFAULT;
2890 if (len < 0) {
2891 return -TARGET_EINVAL;
2894 crlen = sizeof(cr);
2895 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2896 &cr, &crlen));
2897 if (ret < 0) {
2898 return ret;
2900 if (len > crlen) {
2901 len = crlen;
2903 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2904 return -TARGET_EFAULT;
2906 __put_user(cr.pid, &tcr->pid);
2907 __put_user(cr.uid, &tcr->uid);
2908 __put_user(cr.gid, &tcr->gid);
2909 unlock_user_struct(tcr, optval_addr, 1);
2910 if (put_user_u32(len, optlen)) {
2911 return -TARGET_EFAULT;
2913 break;
2915 /* Options with 'int' argument. */
2916 case TARGET_SO_DEBUG:
2917 optname = SO_DEBUG;
2918 goto int_case;
2919 case TARGET_SO_REUSEADDR:
2920 optname = SO_REUSEADDR;
2921 goto int_case;
2922 case TARGET_SO_TYPE:
2923 optname = SO_TYPE;
2924 goto int_case;
2925 case TARGET_SO_ERROR:
2926 optname = SO_ERROR;
2927 goto int_case;
2928 case TARGET_SO_DONTROUTE:
2929 optname = SO_DONTROUTE;
2930 goto int_case;
2931 case TARGET_SO_BROADCAST:
2932 optname = SO_BROADCAST;
2933 goto int_case;
2934 case TARGET_SO_SNDBUF:
2935 optname = SO_SNDBUF;
2936 goto int_case;
2937 case TARGET_SO_RCVBUF:
2938 optname = SO_RCVBUF;
2939 goto int_case;
2940 case TARGET_SO_KEEPALIVE:
2941 optname = SO_KEEPALIVE;
2942 goto int_case;
2943 case TARGET_SO_OOBINLINE:
2944 optname = SO_OOBINLINE;
2945 goto int_case;
2946 case TARGET_SO_NO_CHECK:
2947 optname = SO_NO_CHECK;
2948 goto int_case;
2949 case TARGET_SO_PRIORITY:
2950 optname = SO_PRIORITY;
2951 goto int_case;
2952 #ifdef SO_BSDCOMPAT
2953 case TARGET_SO_BSDCOMPAT:
2954 optname = SO_BSDCOMPAT;
2955 goto int_case;
2956 #endif
2957 case TARGET_SO_PASSCRED:
2958 optname = SO_PASSCRED;
2959 goto int_case;
2960 case TARGET_SO_TIMESTAMP:
2961 optname = SO_TIMESTAMP;
2962 goto int_case;
2963 case TARGET_SO_RCVLOWAT:
2964 optname = SO_RCVLOWAT;
2965 goto int_case;
2966 case TARGET_SO_ACCEPTCONN:
2967 optname = SO_ACCEPTCONN;
2968 goto int_case;
2969 default:
2970 goto int_case;
2972 break;
2973 case SOL_TCP:
2974 /* TCP options all take an 'int' value. */
2975 int_case:
2976 if (get_user_u32(len, optlen))
2977 return -TARGET_EFAULT;
2978 if (len < 0)
2979 return -TARGET_EINVAL;
2980 lv = sizeof(lv);
2981 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2982 if (ret < 0)
2983 return ret;
2984 if (optname == SO_TYPE) {
2985 val = host_to_target_sock_type(val);
2987 if (len > lv)
2988 len = lv;
2989 if (len == 4) {
2990 if (put_user_u32(val, optval_addr))
2991 return -TARGET_EFAULT;
2992 } else {
2993 if (put_user_u8(val, optval_addr))
2994 return -TARGET_EFAULT;
2996 if (put_user_u32(len, optlen))
2997 return -TARGET_EFAULT;
2998 break;
2999 case SOL_IP:
3000 switch(optname) {
3001 case IP_TOS:
3002 case IP_TTL:
3003 case IP_HDRINCL:
3004 case IP_ROUTER_ALERT:
3005 case IP_RECVOPTS:
3006 case IP_RETOPTS:
3007 case IP_PKTINFO:
3008 case IP_MTU_DISCOVER:
3009 case IP_RECVERR:
3010 case IP_RECVTOS:
3011 #ifdef IP_FREEBIND
3012 case IP_FREEBIND:
3013 #endif
3014 case IP_MULTICAST_TTL:
3015 case IP_MULTICAST_LOOP:
3016 if (get_user_u32(len, optlen))
3017 return -TARGET_EFAULT;
3018 if (len < 0)
3019 return -TARGET_EINVAL;
3020 lv = sizeof(lv);
3021 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3022 if (ret < 0)
3023 return ret;
3024 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3025 len = 1;
3026 if (put_user_u32(len, optlen)
3027 || put_user_u8(val, optval_addr))
3028 return -TARGET_EFAULT;
3029 } else {
3030 if (len > sizeof(int))
3031 len = sizeof(int);
3032 if (put_user_u32(len, optlen)
3033 || put_user_u32(val, optval_addr))
3034 return -TARGET_EFAULT;
3036 break;
3037 default:
3038 ret = -TARGET_ENOPROTOOPT;
3039 break;
3041 break;
3042 default:
3043 unimplemented:
3044 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3045 level, optname);
3046 ret = -TARGET_EOPNOTSUPP;
3047 break;
3049 return ret;
3052 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3053 int count, int copy)
3055 struct target_iovec *target_vec;
3056 struct iovec *vec;
3057 abi_ulong total_len, max_len;
3058 int i;
3059 int err = 0;
3060 bool bad_address = false;
3062 if (count == 0) {
3063 errno = 0;
3064 return NULL;
3066 if (count < 0 || count > IOV_MAX) {
3067 errno = EINVAL;
3068 return NULL;
3071 vec = g_try_new0(struct iovec, count);
3072 if (vec == NULL) {
3073 errno = ENOMEM;
3074 return NULL;
3077 target_vec = lock_user(VERIFY_READ, target_addr,
3078 count * sizeof(struct target_iovec), 1);
3079 if (target_vec == NULL) {
3080 err = EFAULT;
3081 goto fail2;
3084 /* ??? If host page size > target page size, this will result in a
3085 value larger than what we can actually support. */
3086 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3087 total_len = 0;
3089 for (i = 0; i < count; i++) {
3090 abi_ulong base = tswapal(target_vec[i].iov_base);
3091 abi_long len = tswapal(target_vec[i].iov_len);
3093 if (len < 0) {
3094 err = EINVAL;
3095 goto fail;
3096 } else if (len == 0) {
3097 /* Zero length pointer is ignored. */
3098 vec[i].iov_base = 0;
3099 } else {
3100 vec[i].iov_base = lock_user(type, base, len, copy);
3101 /* If the first buffer pointer is bad, this is a fault. But
3102 * subsequent bad buffers will result in a partial write; this
3103 * is realized by filling the vector with null pointers and
3104 * zero lengths. */
3105 if (!vec[i].iov_base) {
3106 if (i == 0) {
3107 err = EFAULT;
3108 goto fail;
3109 } else {
3110 bad_address = true;
3113 if (bad_address) {
3114 len = 0;
3116 if (len > max_len - total_len) {
3117 len = max_len - total_len;
3120 vec[i].iov_len = len;
3121 total_len += len;
3124 unlock_user(target_vec, target_addr, 0);
3125 return vec;
3127 fail:
3128 while (--i >= 0) {
3129 if (tswapal(target_vec[i].iov_len) > 0) {
3130 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3133 unlock_user(target_vec, target_addr, 0);
3134 fail2:
3135 g_free(vec);
3136 errno = err;
3137 return NULL;
3140 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3141 int count, int copy)
3143 struct target_iovec *target_vec;
3144 int i;
3146 target_vec = lock_user(VERIFY_READ, target_addr,
3147 count * sizeof(struct target_iovec), 1);
3148 if (target_vec) {
3149 for (i = 0; i < count; i++) {
3150 abi_ulong base = tswapal(target_vec[i].iov_base);
3151 abi_long len = tswapal(target_vec[i].iov_len);
3152 if (len < 0) {
3153 break;
3155 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3157 unlock_user(target_vec, target_addr, 0);
3160 g_free(vec);
3163 static inline int target_to_host_sock_type(int *type)
3165 int host_type = 0;
3166 int target_type = *type;
3168 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3169 case TARGET_SOCK_DGRAM:
3170 host_type = SOCK_DGRAM;
3171 break;
3172 case TARGET_SOCK_STREAM:
3173 host_type = SOCK_STREAM;
3174 break;
3175 default:
3176 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3177 break;
3179 if (target_type & TARGET_SOCK_CLOEXEC) {
3180 #if defined(SOCK_CLOEXEC)
3181 host_type |= SOCK_CLOEXEC;
3182 #else
3183 return -TARGET_EINVAL;
3184 #endif
3186 if (target_type & TARGET_SOCK_NONBLOCK) {
3187 #if defined(SOCK_NONBLOCK)
3188 host_type |= SOCK_NONBLOCK;
3189 #elif !defined(O_NONBLOCK)
3190 return -TARGET_EINVAL;
3191 #endif
3193 *type = host_type;
3194 return 0;
3197 /* Try to emulate socket type flags after socket creation. */
3198 static int sock_flags_fixup(int fd, int target_type)
3200 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3201 if (target_type & TARGET_SOCK_NONBLOCK) {
3202 int flags = fcntl(fd, F_GETFL);
3203 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3204 close(fd);
3205 return -TARGET_EINVAL;
3208 #endif
3209 return fd;
3212 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3213 abi_ulong target_addr,
3214 socklen_t len)
3216 struct sockaddr *addr = host_addr;
3217 struct target_sockaddr *target_saddr;
3219 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3220 if (!target_saddr) {
3221 return -TARGET_EFAULT;
3224 memcpy(addr, target_saddr, len);
3225 addr->sa_family = tswap16(target_saddr->sa_family);
3226 /* spkt_protocol is big-endian */
3228 unlock_user(target_saddr, target_addr, 0);
3229 return 0;
3232 static TargetFdTrans target_packet_trans = {
3233 .target_to_host_addr = packet_target_to_host_sockaddr,
3236 #ifdef CONFIG_RTNETLINK
3237 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3239 abi_long ret;
3241 ret = target_to_host_nlmsg_route(buf, len);
3242 if (ret < 0) {
3243 return ret;
3246 return len;
3249 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3251 abi_long ret;
3253 ret = host_to_target_nlmsg_route(buf, len);
3254 if (ret < 0) {
3255 return ret;
3258 return len;
3261 static TargetFdTrans target_netlink_route_trans = {
3262 .target_to_host_data = netlink_route_target_to_host,
3263 .host_to_target_data = netlink_route_host_to_target,
3265 #endif /* CONFIG_RTNETLINK */
3267 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3269 abi_long ret;
3271 ret = target_to_host_nlmsg_audit(buf, len);
3272 if (ret < 0) {
3273 return ret;
3276 return len;
3279 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3281 abi_long ret;
3283 ret = host_to_target_nlmsg_audit(buf, len);
3284 if (ret < 0) {
3285 return ret;
3288 return len;
3291 static TargetFdTrans target_netlink_audit_trans = {
3292 .target_to_host_data = netlink_audit_target_to_host,
3293 .host_to_target_data = netlink_audit_host_to_target,
3296 /* do_socket() Must return target values and target errnos. */
3297 static abi_long do_socket(int domain, int type, int protocol)
3299 int target_type = type;
3300 int ret;
3302 ret = target_to_host_sock_type(&type);
3303 if (ret) {
3304 return ret;
3307 if (domain == PF_NETLINK && !(
3308 #ifdef CONFIG_RTNETLINK
3309 protocol == NETLINK_ROUTE ||
3310 #endif
3311 protocol == NETLINK_KOBJECT_UEVENT ||
3312 protocol == NETLINK_AUDIT)) {
3313 return -EPFNOSUPPORT;
3316 if (domain == AF_PACKET ||
3317 (domain == AF_INET && type == SOCK_PACKET)) {
3318 protocol = tswap16(protocol);
3321 ret = get_errno(socket(domain, type, protocol));
3322 if (ret >= 0) {
3323 ret = sock_flags_fixup(ret, target_type);
3324 if (type == SOCK_PACKET) {
3325 /* Manage an obsolete case :
3326 * if socket type is SOCK_PACKET, bind by name
3328 fd_trans_register(ret, &target_packet_trans);
3329 } else if (domain == PF_NETLINK) {
3330 switch (protocol) {
3331 #ifdef CONFIG_RTNETLINK
3332 case NETLINK_ROUTE:
3333 fd_trans_register(ret, &target_netlink_route_trans);
3334 break;
3335 #endif
3336 case NETLINK_KOBJECT_UEVENT:
3337 /* nothing to do: messages are strings */
3338 break;
3339 case NETLINK_AUDIT:
3340 fd_trans_register(ret, &target_netlink_audit_trans);
3341 break;
3342 default:
3343 g_assert_not_reached();
3347 return ret;
3350 /* do_bind() Must return target values and target errnos. */
3351 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3352 socklen_t addrlen)
3354 void *addr;
3355 abi_long ret;
3357 if ((int)addrlen < 0) {
3358 return -TARGET_EINVAL;
3361 addr = alloca(addrlen+1);
3363 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3364 if (ret)
3365 return ret;
3367 return get_errno(bind(sockfd, addr, addrlen));
3370 /* do_connect() Must return target values and target errnos. */
3371 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3372 socklen_t addrlen)
3374 void *addr;
3375 abi_long ret;
3377 if ((int)addrlen < 0) {
3378 return -TARGET_EINVAL;
3381 addr = alloca(addrlen+1);
3383 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3384 if (ret)
3385 return ret;
3387 return get_errno(safe_connect(sockfd, addr, addrlen));
3390 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3391 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3392 int flags, int send)
3394 abi_long ret, len;
3395 struct msghdr msg;
3396 int count;
3397 struct iovec *vec;
3398 abi_ulong target_vec;
3400 if (msgp->msg_name) {
3401 msg.msg_namelen = tswap32(msgp->msg_namelen);
3402 msg.msg_name = alloca(msg.msg_namelen+1);
3403 ret = target_to_host_sockaddr(fd, msg.msg_name,
3404 tswapal(msgp->msg_name),
3405 msg.msg_namelen);
3406 if (ret) {
3407 goto out2;
3409 } else {
3410 msg.msg_name = NULL;
3411 msg.msg_namelen = 0;
3413 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3414 msg.msg_control = alloca(msg.msg_controllen);
3415 msg.msg_flags = tswap32(msgp->msg_flags);
3417 count = tswapal(msgp->msg_iovlen);
3418 target_vec = tswapal(msgp->msg_iov);
3419 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3420 target_vec, count, send);
3421 if (vec == NULL) {
3422 ret = -host_to_target_errno(errno);
3423 goto out2;
3425 msg.msg_iovlen = count;
3426 msg.msg_iov = vec;
3428 if (send) {
3429 if (fd_trans_target_to_host_data(fd)) {
3430 void *host_msg;
3432 host_msg = g_malloc(msg.msg_iov->iov_len);
3433 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3434 ret = fd_trans_target_to_host_data(fd)(host_msg,
3435 msg.msg_iov->iov_len);
3436 if (ret >= 0) {
3437 msg.msg_iov->iov_base = host_msg;
3438 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3440 g_free(host_msg);
3441 } else {
3442 ret = target_to_host_cmsg(&msg, msgp);
3443 if (ret == 0) {
3444 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3447 } else {
3448 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3449 if (!is_error(ret)) {
3450 len = ret;
3451 if (fd_trans_host_to_target_data(fd)) {
3452 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3453 len);
3454 } else {
3455 ret = host_to_target_cmsg(msgp, &msg);
3457 if (!is_error(ret)) {
3458 msgp->msg_namelen = tswap32(msg.msg_namelen);
3459 if (msg.msg_name != NULL) {
3460 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3461 msg.msg_name, msg.msg_namelen);
3462 if (ret) {
3463 goto out;
3467 ret = len;
3472 out:
3473 unlock_iovec(vec, target_vec, count, !send);
3474 out2:
3475 return ret;
3478 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3479 int flags, int send)
3481 abi_long ret;
3482 struct target_msghdr *msgp;
3484 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3485 msgp,
3486 target_msg,
3487 send ? 1 : 0)) {
3488 return -TARGET_EFAULT;
3490 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3491 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3492 return ret;
3495 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3496 * so it might not have this *mmsg-specific flag either.
3498 #ifndef MSG_WAITFORONE
3499 #define MSG_WAITFORONE 0x10000
3500 #endif
3502 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3503 unsigned int vlen, unsigned int flags,
3504 int send)
3506 struct target_mmsghdr *mmsgp;
3507 abi_long ret = 0;
3508 int i;
3510 if (vlen > UIO_MAXIOV) {
3511 vlen = UIO_MAXIOV;
3514 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3515 if (!mmsgp) {
3516 return -TARGET_EFAULT;
3519 for (i = 0; i < vlen; i++) {
3520 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3521 if (is_error(ret)) {
3522 break;
3524 mmsgp[i].msg_len = tswap32(ret);
3525 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3526 if (flags & MSG_WAITFORONE) {
3527 flags |= MSG_DONTWAIT;
3531 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3533 /* Return number of datagrams sent if we sent any at all;
3534 * otherwise return the error.
3536 if (i) {
3537 return i;
3539 return ret;
3542 /* do_accept4() Must return target values and target errnos. */
3543 static abi_long do_accept4(int fd, abi_ulong target_addr,
3544 abi_ulong target_addrlen_addr, int flags)
3546 socklen_t addrlen;
3547 void *addr;
3548 abi_long ret;
3549 int host_flags;
3551 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3553 if (target_addr == 0) {
3554 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3557 /* linux returns EINVAL if addrlen pointer is invalid */
3558 if (get_user_u32(addrlen, target_addrlen_addr))
3559 return -TARGET_EINVAL;
3561 if ((int)addrlen < 0) {
3562 return -TARGET_EINVAL;
3565 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3566 return -TARGET_EINVAL;
3568 addr = alloca(addrlen);
3570 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3571 if (!is_error(ret)) {
3572 host_to_target_sockaddr(target_addr, addr, addrlen);
3573 if (put_user_u32(addrlen, target_addrlen_addr))
3574 ret = -TARGET_EFAULT;
3576 return ret;
3579 /* do_getpeername() Must return target values and target errnos. */
3580 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3581 abi_ulong target_addrlen_addr)
3583 socklen_t addrlen;
3584 void *addr;
3585 abi_long ret;
3587 if (get_user_u32(addrlen, target_addrlen_addr))
3588 return -TARGET_EFAULT;
3590 if ((int)addrlen < 0) {
3591 return -TARGET_EINVAL;
3594 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3595 return -TARGET_EFAULT;
3597 addr = alloca(addrlen);
3599 ret = get_errno(getpeername(fd, addr, &addrlen));
3600 if (!is_error(ret)) {
3601 host_to_target_sockaddr(target_addr, addr, addrlen);
3602 if (put_user_u32(addrlen, target_addrlen_addr))
3603 ret = -TARGET_EFAULT;
3605 return ret;
3608 /* do_getsockname() Must return target values and target errnos. */
3609 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3610 abi_ulong target_addrlen_addr)
3612 socklen_t addrlen;
3613 void *addr;
3614 abi_long ret;
3616 if (get_user_u32(addrlen, target_addrlen_addr))
3617 return -TARGET_EFAULT;
3619 if ((int)addrlen < 0) {
3620 return -TARGET_EINVAL;
3623 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3624 return -TARGET_EFAULT;
3626 addr = alloca(addrlen);
3628 ret = get_errno(getsockname(fd, addr, &addrlen));
3629 if (!is_error(ret)) {
3630 host_to_target_sockaddr(target_addr, addr, addrlen);
3631 if (put_user_u32(addrlen, target_addrlen_addr))
3632 ret = -TARGET_EFAULT;
3634 return ret;
3637 /* do_socketpair() Must return target values and target errnos. */
3638 static abi_long do_socketpair(int domain, int type, int protocol,
3639 abi_ulong target_tab_addr)
3641 int tab[2];
3642 abi_long ret;
3644 target_to_host_sock_type(&type);
3646 ret = get_errno(socketpair(domain, type, protocol, tab));
3647 if (!is_error(ret)) {
3648 if (put_user_s32(tab[0], target_tab_addr)
3649 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3650 ret = -TARGET_EFAULT;
3652 return ret;
3655 /* do_sendto() Must return target values and target errnos. */
3656 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3657 abi_ulong target_addr, socklen_t addrlen)
3659 void *addr;
3660 void *host_msg;
3661 void *copy_msg = NULL;
3662 abi_long ret;
3664 if ((int)addrlen < 0) {
3665 return -TARGET_EINVAL;
3668 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3669 if (!host_msg)
3670 return -TARGET_EFAULT;
3671 if (fd_trans_target_to_host_data(fd)) {
3672 copy_msg = host_msg;
3673 host_msg = g_malloc(len);
3674 memcpy(host_msg, copy_msg, len);
3675 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3676 if (ret < 0) {
3677 goto fail;
3680 if (target_addr) {
3681 addr = alloca(addrlen+1);
3682 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3683 if (ret) {
3684 goto fail;
3686 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3687 } else {
3688 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3690 fail:
3691 if (copy_msg) {
3692 g_free(host_msg);
3693 host_msg = copy_msg;
3695 unlock_user(host_msg, msg, 0);
3696 return ret;
3699 /* do_recvfrom() Must return target values and target errnos. */
3700 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3701 abi_ulong target_addr,
3702 abi_ulong target_addrlen)
3704 socklen_t addrlen;
3705 void *addr;
3706 void *host_msg;
3707 abi_long ret;
3709 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3710 if (!host_msg)
3711 return -TARGET_EFAULT;
3712 if (target_addr) {
3713 if (get_user_u32(addrlen, target_addrlen)) {
3714 ret = -TARGET_EFAULT;
3715 goto fail;
3717 if ((int)addrlen < 0) {
3718 ret = -TARGET_EINVAL;
3719 goto fail;
3721 addr = alloca(addrlen);
3722 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3723 addr, &addrlen));
3724 } else {
3725 addr = NULL; /* To keep compiler quiet. */
3726 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3728 if (!is_error(ret)) {
3729 if (fd_trans_host_to_target_data(fd)) {
3730 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3732 if (target_addr) {
3733 host_to_target_sockaddr(target_addr, addr, addrlen);
3734 if (put_user_u32(addrlen, target_addrlen)) {
3735 ret = -TARGET_EFAULT;
3736 goto fail;
3739 unlock_user(host_msg, msg, len);
3740 } else {
3741 fail:
3742 unlock_user(host_msg, msg, 0);
3744 return ret;
3747 #ifdef TARGET_NR_socketcall
3748 /* do_socketcall() Must return target values and target errnos. */
3749 static abi_long do_socketcall(int num, abi_ulong vptr)
3751 static const unsigned ac[] = { /* number of arguments per call */
3752 [SOCKOP_socket] = 3, /* domain, type, protocol */
3753 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3754 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3755 [SOCKOP_listen] = 2, /* sockfd, backlog */
3756 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3757 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3758 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3759 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3760 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3761 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3762 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3763 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3764 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3765 [SOCKOP_shutdown] = 2, /* sockfd, how */
3766 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3767 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3768 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3769 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3770 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3771 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3773 abi_long a[6]; /* max 6 args */
3775 /* first, collect the arguments in a[] according to ac[] */
3776 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3777 unsigned i;
3778 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3779 for (i = 0; i < ac[num]; ++i) {
3780 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3781 return -TARGET_EFAULT;
3786 /* now when we have the args, actually handle the call */
3787 switch (num) {
3788 case SOCKOP_socket: /* domain, type, protocol */
3789 return do_socket(a[0], a[1], a[2]);
3790 case SOCKOP_bind: /* sockfd, addr, addrlen */
3791 return do_bind(a[0], a[1], a[2]);
3792 case SOCKOP_connect: /* sockfd, addr, addrlen */
3793 return do_connect(a[0], a[1], a[2]);
3794 case SOCKOP_listen: /* sockfd, backlog */
3795 return get_errno(listen(a[0], a[1]));
3796 case SOCKOP_accept: /* sockfd, addr, addrlen */
3797 return do_accept4(a[0], a[1], a[2], 0);
3798 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3799 return do_accept4(a[0], a[1], a[2], a[3]);
3800 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3801 return do_getsockname(a[0], a[1], a[2]);
3802 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3803 return do_getpeername(a[0], a[1], a[2]);
3804 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3805 return do_socketpair(a[0], a[1], a[2], a[3]);
3806 case SOCKOP_send: /* sockfd, msg, len, flags */
3807 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3808 case SOCKOP_recv: /* sockfd, msg, len, flags */
3809 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3810 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3811 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3812 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3813 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3814 case SOCKOP_shutdown: /* sockfd, how */
3815 return get_errno(shutdown(a[0], a[1]));
3816 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3817 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3818 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3819 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3820 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3821 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3822 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3823 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3824 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3825 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3826 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3827 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3828 default:
3829 gemu_log("Unsupported socketcall: %d\n", num);
3830 return -TARGET_ENOSYS;
3833 #endif
3835 #define N_SHM_REGIONS 32
3837 static struct shm_region {
3838 abi_ulong start;
3839 abi_ulong size;
3840 bool in_use;
3841 } shm_regions[N_SHM_REGIONS];
3843 #ifndef TARGET_SEMID64_DS
3844 /* asm-generic version of this struct */
3845 struct target_semid64_ds
3847 struct target_ipc_perm sem_perm;
3848 abi_ulong sem_otime;
3849 #if TARGET_ABI_BITS == 32
3850 abi_ulong __unused1;
3851 #endif
3852 abi_ulong sem_ctime;
3853 #if TARGET_ABI_BITS == 32
3854 abi_ulong __unused2;
3855 #endif
3856 abi_ulong sem_nsems;
3857 abi_ulong __unused3;
3858 abi_ulong __unused4;
3860 #endif
3862 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3863 abi_ulong target_addr)
3865 struct target_ipc_perm *target_ip;
3866 struct target_semid64_ds *target_sd;
3868 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3869 return -TARGET_EFAULT;
3870 target_ip = &(target_sd->sem_perm);
3871 host_ip->__key = tswap32(target_ip->__key);
3872 host_ip->uid = tswap32(target_ip->uid);
3873 host_ip->gid = tswap32(target_ip->gid);
3874 host_ip->cuid = tswap32(target_ip->cuid);
3875 host_ip->cgid = tswap32(target_ip->cgid);
3876 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3877 host_ip->mode = tswap32(target_ip->mode);
3878 #else
3879 host_ip->mode = tswap16(target_ip->mode);
3880 #endif
3881 #if defined(TARGET_PPC)
3882 host_ip->__seq = tswap32(target_ip->__seq);
3883 #else
3884 host_ip->__seq = tswap16(target_ip->__seq);
3885 #endif
3886 unlock_user_struct(target_sd, target_addr, 0);
3887 return 0;
3890 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3891 struct ipc_perm *host_ip)
3893 struct target_ipc_perm *target_ip;
3894 struct target_semid64_ds *target_sd;
3896 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3897 return -TARGET_EFAULT;
3898 target_ip = &(target_sd->sem_perm);
3899 target_ip->__key = tswap32(host_ip->__key);
3900 target_ip->uid = tswap32(host_ip->uid);
3901 target_ip->gid = tswap32(host_ip->gid);
3902 target_ip->cuid = tswap32(host_ip->cuid);
3903 target_ip->cgid = tswap32(host_ip->cgid);
3904 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3905 target_ip->mode = tswap32(host_ip->mode);
3906 #else
3907 target_ip->mode = tswap16(host_ip->mode);
3908 #endif
3909 #if defined(TARGET_PPC)
3910 target_ip->__seq = tswap32(host_ip->__seq);
3911 #else
3912 target_ip->__seq = tswap16(host_ip->__seq);
3913 #endif
3914 unlock_user_struct(target_sd, target_addr, 1);
3915 return 0;
3918 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3919 abi_ulong target_addr)
3921 struct target_semid64_ds *target_sd;
3923 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3924 return -TARGET_EFAULT;
3925 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3926 return -TARGET_EFAULT;
3927 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3928 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3929 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3930 unlock_user_struct(target_sd, target_addr, 0);
3931 return 0;
3934 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3935 struct semid_ds *host_sd)
3937 struct target_semid64_ds *target_sd;
3939 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3940 return -TARGET_EFAULT;
3941 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3942 return -TARGET_EFAULT;
3943 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3944 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3945 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3946 unlock_user_struct(target_sd, target_addr, 1);
3947 return 0;
3950 struct target_seminfo {
3951 int semmap;
3952 int semmni;
3953 int semmns;
3954 int semmnu;
3955 int semmsl;
3956 int semopm;
3957 int semume;
3958 int semusz;
3959 int semvmx;
3960 int semaem;
3963 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3964 struct seminfo *host_seminfo)
3966 struct target_seminfo *target_seminfo;
3967 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3968 return -TARGET_EFAULT;
3969 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3970 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3971 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3972 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3973 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3974 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3975 __put_user(host_seminfo->semume, &target_seminfo->semume);
3976 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3977 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3978 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3979 unlock_user_struct(target_seminfo, target_addr, 1);
3980 return 0;
3983 union semun {
3984 int val;
3985 struct semid_ds *buf;
3986 unsigned short *array;
3987 struct seminfo *__buf;
3990 union target_semun {
3991 int val;
3992 abi_ulong buf;
3993 abi_ulong array;
3994 abi_ulong __buf;
3997 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3998 abi_ulong target_addr)
4000 int nsems;
4001 unsigned short *array;
4002 union semun semun;
4003 struct semid_ds semid_ds;
4004 int i, ret;
4006 semun.buf = &semid_ds;
4008 ret = semctl(semid, 0, IPC_STAT, semun);
4009 if (ret == -1)
4010 return get_errno(ret);
4012 nsems = semid_ds.sem_nsems;
4014 *host_array = g_try_new(unsigned short, nsems);
4015 if (!*host_array) {
4016 return -TARGET_ENOMEM;
4018 array = lock_user(VERIFY_READ, target_addr,
4019 nsems*sizeof(unsigned short), 1);
4020 if (!array) {
4021 g_free(*host_array);
4022 return -TARGET_EFAULT;
4025 for(i=0; i<nsems; i++) {
4026 __get_user((*host_array)[i], &array[i]);
4028 unlock_user(array, target_addr, 0);
4030 return 0;
4033 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4034 unsigned short **host_array)
4036 int nsems;
4037 unsigned short *array;
4038 union semun semun;
4039 struct semid_ds semid_ds;
4040 int i, ret;
4042 semun.buf = &semid_ds;
4044 ret = semctl(semid, 0, IPC_STAT, semun);
4045 if (ret == -1)
4046 return get_errno(ret);
4048 nsems = semid_ds.sem_nsems;
4050 array = lock_user(VERIFY_WRITE, target_addr,
4051 nsems*sizeof(unsigned short), 0);
4052 if (!array)
4053 return -TARGET_EFAULT;
4055 for(i=0; i<nsems; i++) {
4056 __put_user((*host_array)[i], &array[i]);
4058 g_free(*host_array);
4059 unlock_user(array, target_addr, 1);
4061 return 0;
4064 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4065 abi_ulong target_arg)
4067 union target_semun target_su = { .buf = target_arg };
4068 union semun arg;
4069 struct semid_ds dsarg;
4070 unsigned short *array = NULL;
4071 struct seminfo seminfo;
4072 abi_long ret = -TARGET_EINVAL;
4073 abi_long err;
4074 cmd &= 0xff;
4076 switch( cmd ) {
4077 case GETVAL:
4078 case SETVAL:
4079 /* In 64 bit cross-endian situations, we will erroneously pick up
4080 * the wrong half of the union for the "val" element. To rectify
4081 * this, the entire 8-byte structure is byteswapped, followed by
4082 * a swap of the 4 byte val field. In other cases, the data is
4083 * already in proper host byte order. */
4084 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4085 target_su.buf = tswapal(target_su.buf);
4086 arg.val = tswap32(target_su.val);
4087 } else {
4088 arg.val = target_su.val;
4090 ret = get_errno(semctl(semid, semnum, cmd, arg));
4091 break;
4092 case GETALL:
4093 case SETALL:
4094 err = target_to_host_semarray(semid, &array, target_su.array);
4095 if (err)
4096 return err;
4097 arg.array = array;
4098 ret = get_errno(semctl(semid, semnum, cmd, arg));
4099 err = host_to_target_semarray(semid, target_su.array, &array);
4100 if (err)
4101 return err;
4102 break;
4103 case IPC_STAT:
4104 case IPC_SET:
4105 case SEM_STAT:
4106 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4107 if (err)
4108 return err;
4109 arg.buf = &dsarg;
4110 ret = get_errno(semctl(semid, semnum, cmd, arg));
4111 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4112 if (err)
4113 return err;
4114 break;
4115 case IPC_INFO:
4116 case SEM_INFO:
4117 arg.__buf = &seminfo;
4118 ret = get_errno(semctl(semid, semnum, cmd, arg));
4119 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4120 if (err)
4121 return err;
4122 break;
4123 case IPC_RMID:
4124 case GETPID:
4125 case GETNCNT:
4126 case GETZCNT:
4127 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4128 break;
4131 return ret;
4134 struct target_sembuf {
4135 unsigned short sem_num;
4136 short sem_op;
4137 short sem_flg;
4140 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4141 abi_ulong target_addr,
4142 unsigned nsops)
4144 struct target_sembuf *target_sembuf;
4145 int i;
4147 target_sembuf = lock_user(VERIFY_READ, target_addr,
4148 nsops*sizeof(struct target_sembuf), 1);
4149 if (!target_sembuf)
4150 return -TARGET_EFAULT;
4152 for(i=0; i<nsops; i++) {
4153 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4154 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4155 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4158 unlock_user(target_sembuf, target_addr, 0);
4160 return 0;
4163 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4165 struct sembuf sops[nsops];
4167 if (target_to_host_sembuf(sops, ptr, nsops))
4168 return -TARGET_EFAULT;
4170 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4173 struct target_msqid_ds
4175 struct target_ipc_perm msg_perm;
4176 abi_ulong msg_stime;
4177 #if TARGET_ABI_BITS == 32
4178 abi_ulong __unused1;
4179 #endif
4180 abi_ulong msg_rtime;
4181 #if TARGET_ABI_BITS == 32
4182 abi_ulong __unused2;
4183 #endif
4184 abi_ulong msg_ctime;
4185 #if TARGET_ABI_BITS == 32
4186 abi_ulong __unused3;
4187 #endif
4188 abi_ulong __msg_cbytes;
4189 abi_ulong msg_qnum;
4190 abi_ulong msg_qbytes;
4191 abi_ulong msg_lspid;
4192 abi_ulong msg_lrpid;
4193 abi_ulong __unused4;
4194 abi_ulong __unused5;
4197 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4198 abi_ulong target_addr)
4200 struct target_msqid_ds *target_md;
4202 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4203 return -TARGET_EFAULT;
4204 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4205 return -TARGET_EFAULT;
4206 host_md->msg_stime = tswapal(target_md->msg_stime);
4207 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4208 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4209 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4210 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4211 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4212 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4213 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4214 unlock_user_struct(target_md, target_addr, 0);
4215 return 0;
4218 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4219 struct msqid_ds *host_md)
4221 struct target_msqid_ds *target_md;
4223 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4224 return -TARGET_EFAULT;
4225 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4226 return -TARGET_EFAULT;
4227 target_md->msg_stime = tswapal(host_md->msg_stime);
4228 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4229 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4230 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4231 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4232 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4233 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4234 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4235 unlock_user_struct(target_md, target_addr, 1);
4236 return 0;
4239 struct target_msginfo {
4240 int msgpool;
4241 int msgmap;
4242 int msgmax;
4243 int msgmnb;
4244 int msgmni;
4245 int msgssz;
4246 int msgtql;
4247 unsigned short int msgseg;
4250 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4251 struct msginfo *host_msginfo)
4253 struct target_msginfo *target_msginfo;
4254 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4255 return -TARGET_EFAULT;
4256 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4257 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4258 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4259 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4260 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4261 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4262 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4263 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4264 unlock_user_struct(target_msginfo, target_addr, 1);
4265 return 0;
4268 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4270 struct msqid_ds dsarg;
4271 struct msginfo msginfo;
4272 abi_long ret = -TARGET_EINVAL;
4274 cmd &= 0xff;
4276 switch (cmd) {
4277 case IPC_STAT:
4278 case IPC_SET:
4279 case MSG_STAT:
4280 if (target_to_host_msqid_ds(&dsarg,ptr))
4281 return -TARGET_EFAULT;
4282 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4283 if (host_to_target_msqid_ds(ptr,&dsarg))
4284 return -TARGET_EFAULT;
4285 break;
4286 case IPC_RMID:
4287 ret = get_errno(msgctl(msgid, cmd, NULL));
4288 break;
4289 case IPC_INFO:
4290 case MSG_INFO:
4291 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4292 if (host_to_target_msginfo(ptr, &msginfo))
4293 return -TARGET_EFAULT;
4294 break;
4297 return ret;
4300 struct target_msgbuf {
4301 abi_long mtype;
4302 char mtext[1];
4305 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4306 ssize_t msgsz, int msgflg)
4308 struct target_msgbuf *target_mb;
4309 struct msgbuf *host_mb;
4310 abi_long ret = 0;
4312 if (msgsz < 0) {
4313 return -TARGET_EINVAL;
4316 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4317 return -TARGET_EFAULT;
4318 host_mb = g_try_malloc(msgsz + sizeof(long));
4319 if (!host_mb) {
4320 unlock_user_struct(target_mb, msgp, 0);
4321 return -TARGET_ENOMEM;
4323 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4324 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4325 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4326 g_free(host_mb);
4327 unlock_user_struct(target_mb, msgp, 0);
4329 return ret;
4332 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4333 ssize_t msgsz, abi_long msgtyp,
4334 int msgflg)
4336 struct target_msgbuf *target_mb;
4337 char *target_mtext;
4338 struct msgbuf *host_mb;
4339 abi_long ret = 0;
4341 if (msgsz < 0) {
4342 return -TARGET_EINVAL;
4345 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4346 return -TARGET_EFAULT;
4348 host_mb = g_try_malloc(msgsz + sizeof(long));
4349 if (!host_mb) {
4350 ret = -TARGET_ENOMEM;
4351 goto end;
4353 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4355 if (ret > 0) {
4356 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4357 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4358 if (!target_mtext) {
4359 ret = -TARGET_EFAULT;
4360 goto end;
4362 memcpy(target_mb->mtext, host_mb->mtext, ret);
4363 unlock_user(target_mtext, target_mtext_addr, ret);
4366 target_mb->mtype = tswapal(host_mb->mtype);
4368 end:
4369 if (target_mb)
4370 unlock_user_struct(target_mb, msgp, 1);
4371 g_free(host_mb);
4372 return ret;
4375 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4376 abi_ulong target_addr)
4378 struct target_shmid_ds *target_sd;
4380 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4381 return -TARGET_EFAULT;
4382 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4383 return -TARGET_EFAULT;
4384 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4385 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4386 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4387 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4388 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4389 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4390 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4391 unlock_user_struct(target_sd, target_addr, 0);
4392 return 0;
4395 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4396 struct shmid_ds *host_sd)
4398 struct target_shmid_ds *target_sd;
4400 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4401 return -TARGET_EFAULT;
4402 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4403 return -TARGET_EFAULT;
4404 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4405 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4406 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4407 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4408 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4409 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4410 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4411 unlock_user_struct(target_sd, target_addr, 1);
4412 return 0;
4415 struct target_shminfo {
4416 abi_ulong shmmax;
4417 abi_ulong shmmin;
4418 abi_ulong shmmni;
4419 abi_ulong shmseg;
4420 abi_ulong shmall;
4423 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4424 struct shminfo *host_shminfo)
4426 struct target_shminfo *target_shminfo;
4427 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4428 return -TARGET_EFAULT;
4429 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4430 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4431 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4432 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4433 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4434 unlock_user_struct(target_shminfo, target_addr, 1);
4435 return 0;
4438 struct target_shm_info {
4439 int used_ids;
4440 abi_ulong shm_tot;
4441 abi_ulong shm_rss;
4442 abi_ulong shm_swp;
4443 abi_ulong swap_attempts;
4444 abi_ulong swap_successes;
4447 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4448 struct shm_info *host_shm_info)
4450 struct target_shm_info *target_shm_info;
4451 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4452 return -TARGET_EFAULT;
4453 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4454 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4455 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4456 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4457 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4458 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4459 unlock_user_struct(target_shm_info, target_addr, 1);
4460 return 0;
4463 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4465 struct shmid_ds dsarg;
4466 struct shminfo shminfo;
4467 struct shm_info shm_info;
4468 abi_long ret = -TARGET_EINVAL;
4470 cmd &= 0xff;
4472 switch(cmd) {
4473 case IPC_STAT:
4474 case IPC_SET:
4475 case SHM_STAT:
4476 if (target_to_host_shmid_ds(&dsarg, buf))
4477 return -TARGET_EFAULT;
4478 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4479 if (host_to_target_shmid_ds(buf, &dsarg))
4480 return -TARGET_EFAULT;
4481 break;
4482 case IPC_INFO:
4483 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4484 if (host_to_target_shminfo(buf, &shminfo))
4485 return -TARGET_EFAULT;
4486 break;
4487 case SHM_INFO:
4488 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4489 if (host_to_target_shm_info(buf, &shm_info))
4490 return -TARGET_EFAULT;
4491 break;
4492 case IPC_RMID:
4493 case SHM_LOCK:
4494 case SHM_UNLOCK:
4495 ret = get_errno(shmctl(shmid, cmd, NULL));
4496 break;
4499 return ret;
4502 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4504 abi_long raddr;
4505 void *host_raddr;
4506 struct shmid_ds shm_info;
4507 int i,ret;
4509 /* find out the length of the shared memory segment */
4510 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4511 if (is_error(ret)) {
4512 /* can't get length, bail out */
4513 return ret;
4516 mmap_lock();
4518 if (shmaddr)
4519 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4520 else {
4521 abi_ulong mmap_start;
4523 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4525 if (mmap_start == -1) {
4526 errno = ENOMEM;
4527 host_raddr = (void *)-1;
4528 } else
4529 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4532 if (host_raddr == (void *)-1) {
4533 mmap_unlock();
4534 return get_errno((long)host_raddr);
4536 raddr=h2g((unsigned long)host_raddr);
4538 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4539 PAGE_VALID | PAGE_READ |
4540 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4542 for (i = 0; i < N_SHM_REGIONS; i++) {
4543 if (!shm_regions[i].in_use) {
4544 shm_regions[i].in_use = true;
4545 shm_regions[i].start = raddr;
4546 shm_regions[i].size = shm_info.shm_segsz;
4547 break;
4551 mmap_unlock();
4552 return raddr;
4556 static inline abi_long do_shmdt(abi_ulong shmaddr)
4558 int i;
4560 for (i = 0; i < N_SHM_REGIONS; ++i) {
4561 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4562 shm_regions[i].in_use = false;
4563 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4564 break;
4568 return get_errno(shmdt(g2h(shmaddr)));
4571 #ifdef TARGET_NR_ipc
4572 /* ??? This only works with linear mappings. */
4573 /* do_ipc() must return target values and target errnos. */
4574 static abi_long do_ipc(unsigned int call, abi_long first,
4575 abi_long second, abi_long third,
4576 abi_long ptr, abi_long fifth)
4578 int version;
4579 abi_long ret = 0;
4581 version = call >> 16;
4582 call &= 0xffff;
4584 switch (call) {
4585 case IPCOP_semop:
4586 ret = do_semop(first, ptr, second);
4587 break;
4589 case IPCOP_semget:
4590 ret = get_errno(semget(first, second, third));
4591 break;
4593 case IPCOP_semctl: {
4594 /* The semun argument to semctl is passed by value, so dereference the
4595 * ptr argument. */
4596 abi_ulong atptr;
4597 get_user_ual(atptr, ptr);
4598 ret = do_semctl(first, second, third, atptr);
4599 break;
4602 case IPCOP_msgget:
4603 ret = get_errno(msgget(first, second));
4604 break;
4606 case IPCOP_msgsnd:
4607 ret = do_msgsnd(first, ptr, second, third);
4608 break;
4610 case IPCOP_msgctl:
4611 ret = do_msgctl(first, second, ptr);
4612 break;
4614 case IPCOP_msgrcv:
4615 switch (version) {
4616 case 0:
4618 struct target_ipc_kludge {
4619 abi_long msgp;
4620 abi_long msgtyp;
4621 } *tmp;
4623 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4624 ret = -TARGET_EFAULT;
4625 break;
4628 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4630 unlock_user_struct(tmp, ptr, 0);
4631 break;
4633 default:
4634 ret = do_msgrcv(first, ptr, second, fifth, third);
4636 break;
4638 case IPCOP_shmat:
4639 switch (version) {
4640 default:
4642 abi_ulong raddr;
4643 raddr = do_shmat(first, ptr, second);
4644 if (is_error(raddr))
4645 return get_errno(raddr);
4646 if (put_user_ual(raddr, third))
4647 return -TARGET_EFAULT;
4648 break;
4650 case 1:
4651 ret = -TARGET_EINVAL;
4652 break;
4654 break;
4655 case IPCOP_shmdt:
4656 ret = do_shmdt(ptr);
4657 break;
4659 case IPCOP_shmget:
4660 /* IPC_* flag values are the same on all linux platforms */
4661 ret = get_errno(shmget(first, second, third));
4662 break;
4664 /* IPC_* and SHM_* command values are the same on all linux platforms */
4665 case IPCOP_shmctl:
4666 ret = do_shmctl(first, second, ptr);
4667 break;
4668 default:
4669 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4670 ret = -TARGET_ENOSYS;
4671 break;
4673 return ret;
4675 #endif
4677 /* kernel structure types definitions */
4679 #define STRUCT(name, ...) STRUCT_ ## name,
4680 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4681 enum {
4682 #include "syscall_types.h"
4683 STRUCT_MAX
4685 #undef STRUCT
4686 #undef STRUCT_SPECIAL
4688 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4689 #define STRUCT_SPECIAL(name)
4690 #include "syscall_types.h"
4691 #undef STRUCT
4692 #undef STRUCT_SPECIAL
4694 typedef struct IOCTLEntry IOCTLEntry;
4696 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4697 int fd, int cmd, abi_long arg);
4699 struct IOCTLEntry {
4700 int target_cmd;
4701 unsigned int host_cmd;
4702 const char *name;
4703 int access;
4704 do_ioctl_fn *do_ioctl;
4705 const argtype arg_type[5];
4708 #define IOC_R 0x0001
4709 #define IOC_W 0x0002
4710 #define IOC_RW (IOC_R | IOC_W)
4712 #define MAX_STRUCT_SIZE 4096
4714 #ifdef CONFIG_FIEMAP
4715 /* So fiemap access checks don't overflow on 32 bit systems.
4716 * This is very slightly smaller than the limit imposed by
4717 * the underlying kernel.
4719 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4720 / sizeof(struct fiemap_extent))
4722 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4723 int fd, int cmd, abi_long arg)
4725 /* The parameter for this ioctl is a struct fiemap followed
4726 * by an array of struct fiemap_extent whose size is set
4727 * in fiemap->fm_extent_count. The array is filled in by the
4728 * ioctl.
4730 int target_size_in, target_size_out;
4731 struct fiemap *fm;
4732 const argtype *arg_type = ie->arg_type;
4733 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4734 void *argptr, *p;
4735 abi_long ret;
4736 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4737 uint32_t outbufsz;
4738 int free_fm = 0;
4740 assert(arg_type[0] == TYPE_PTR);
4741 assert(ie->access == IOC_RW);
4742 arg_type++;
4743 target_size_in = thunk_type_size(arg_type, 0);
4744 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4745 if (!argptr) {
4746 return -TARGET_EFAULT;
4748 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4749 unlock_user(argptr, arg, 0);
4750 fm = (struct fiemap *)buf_temp;
4751 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4752 return -TARGET_EINVAL;
4755 outbufsz = sizeof (*fm) +
4756 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4758 if (outbufsz > MAX_STRUCT_SIZE) {
4759 /* We can't fit all the extents into the fixed size buffer.
4760 * Allocate one that is large enough and use it instead.
4762 fm = g_try_malloc(outbufsz);
4763 if (!fm) {
4764 return -TARGET_ENOMEM;
4766 memcpy(fm, buf_temp, sizeof(struct fiemap));
4767 free_fm = 1;
4769 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4770 if (!is_error(ret)) {
4771 target_size_out = target_size_in;
4772 /* An extent_count of 0 means we were only counting the extents
4773 * so there are no structs to copy
4775 if (fm->fm_extent_count != 0) {
4776 target_size_out += fm->fm_mapped_extents * extent_size;
4778 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4779 if (!argptr) {
4780 ret = -TARGET_EFAULT;
4781 } else {
4782 /* Convert the struct fiemap */
4783 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4784 if (fm->fm_extent_count != 0) {
4785 p = argptr + target_size_in;
4786 /* ...and then all the struct fiemap_extents */
4787 for (i = 0; i < fm->fm_mapped_extents; i++) {
4788 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4789 THUNK_TARGET);
4790 p += extent_size;
4793 unlock_user(argptr, arg, target_size_out);
4796 if (free_fm) {
4797 g_free(fm);
4799 return ret;
4801 #endif
4803 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4804 int fd, int cmd, abi_long arg)
4806 const argtype *arg_type = ie->arg_type;
4807 int target_size;
4808 void *argptr;
4809 int ret;
4810 struct ifconf *host_ifconf;
4811 uint32_t outbufsz;
4812 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4813 int target_ifreq_size;
4814 int nb_ifreq;
4815 int free_buf = 0;
4816 int i;
4817 int target_ifc_len;
4818 abi_long target_ifc_buf;
4819 int host_ifc_len;
4820 char *host_ifc_buf;
4822 assert(arg_type[0] == TYPE_PTR);
4823 assert(ie->access == IOC_RW);
4825 arg_type++;
4826 target_size = thunk_type_size(arg_type, 0);
4828 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4829 if (!argptr)
4830 return -TARGET_EFAULT;
4831 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4832 unlock_user(argptr, arg, 0);
4834 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4835 target_ifc_len = host_ifconf->ifc_len;
4836 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4838 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4839 nb_ifreq = target_ifc_len / target_ifreq_size;
4840 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4842 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4843 if (outbufsz > MAX_STRUCT_SIZE) {
4844 /* We can't fit all the extents into the fixed size buffer.
4845 * Allocate one that is large enough and use it instead.
4847 host_ifconf = malloc(outbufsz);
4848 if (!host_ifconf) {
4849 return -TARGET_ENOMEM;
4851 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4852 free_buf = 1;
4854 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4856 host_ifconf->ifc_len = host_ifc_len;
4857 host_ifconf->ifc_buf = host_ifc_buf;
4859 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4860 if (!is_error(ret)) {
4861 /* convert host ifc_len to target ifc_len */
4863 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4864 target_ifc_len = nb_ifreq * target_ifreq_size;
4865 host_ifconf->ifc_len = target_ifc_len;
4867 /* restore target ifc_buf */
4869 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4871 /* copy struct ifconf to target user */
4873 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4874 if (!argptr)
4875 return -TARGET_EFAULT;
4876 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4877 unlock_user(argptr, arg, target_size);
4879 /* copy ifreq[] to target user */
4881 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4882 for (i = 0; i < nb_ifreq ; i++) {
4883 thunk_convert(argptr + i * target_ifreq_size,
4884 host_ifc_buf + i * sizeof(struct ifreq),
4885 ifreq_arg_type, THUNK_TARGET);
4887 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4890 if (free_buf) {
4891 free(host_ifconf);
4894 return ret;
4897 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4898 int cmd, abi_long arg)
4900 void *argptr;
4901 struct dm_ioctl *host_dm;
4902 abi_long guest_data;
4903 uint32_t guest_data_size;
4904 int target_size;
4905 const argtype *arg_type = ie->arg_type;
4906 abi_long ret;
4907 void *big_buf = NULL;
4908 char *host_data;
4910 arg_type++;
4911 target_size = thunk_type_size(arg_type, 0);
4912 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4913 if (!argptr) {
4914 ret = -TARGET_EFAULT;
4915 goto out;
4917 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4918 unlock_user(argptr, arg, 0);
4920 /* buf_temp is too small, so fetch things into a bigger buffer */
4921 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4922 memcpy(big_buf, buf_temp, target_size);
4923 buf_temp = big_buf;
4924 host_dm = big_buf;
4926 guest_data = arg + host_dm->data_start;
4927 if ((guest_data - arg) < 0) {
4928 ret = -EINVAL;
4929 goto out;
4931 guest_data_size = host_dm->data_size - host_dm->data_start;
4932 host_data = (char*)host_dm + host_dm->data_start;
4934 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4935 switch (ie->host_cmd) {
4936 case DM_REMOVE_ALL:
4937 case DM_LIST_DEVICES:
4938 case DM_DEV_CREATE:
4939 case DM_DEV_REMOVE:
4940 case DM_DEV_SUSPEND:
4941 case DM_DEV_STATUS:
4942 case DM_DEV_WAIT:
4943 case DM_TABLE_STATUS:
4944 case DM_TABLE_CLEAR:
4945 case DM_TABLE_DEPS:
4946 case DM_LIST_VERSIONS:
4947 /* no input data */
4948 break;
4949 case DM_DEV_RENAME:
4950 case DM_DEV_SET_GEOMETRY:
4951 /* data contains only strings */
4952 memcpy(host_data, argptr, guest_data_size);
4953 break;
4954 case DM_TARGET_MSG:
4955 memcpy(host_data, argptr, guest_data_size);
4956 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4957 break;
4958 case DM_TABLE_LOAD:
4960 void *gspec = argptr;
4961 void *cur_data = host_data;
4962 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4963 int spec_size = thunk_type_size(arg_type, 0);
4964 int i;
4966 for (i = 0; i < host_dm->target_count; i++) {
4967 struct dm_target_spec *spec = cur_data;
4968 uint32_t next;
4969 int slen;
4971 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4972 slen = strlen((char*)gspec + spec_size) + 1;
4973 next = spec->next;
4974 spec->next = sizeof(*spec) + slen;
4975 strcpy((char*)&spec[1], gspec + spec_size);
4976 gspec += next;
4977 cur_data += spec->next;
4979 break;
4981 default:
4982 ret = -TARGET_EINVAL;
4983 unlock_user(argptr, guest_data, 0);
4984 goto out;
4986 unlock_user(argptr, guest_data, 0);
4988 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4989 if (!is_error(ret)) {
4990 guest_data = arg + host_dm->data_start;
4991 guest_data_size = host_dm->data_size - host_dm->data_start;
4992 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4993 switch (ie->host_cmd) {
4994 case DM_REMOVE_ALL:
4995 case DM_DEV_CREATE:
4996 case DM_DEV_REMOVE:
4997 case DM_DEV_RENAME:
4998 case DM_DEV_SUSPEND:
4999 case DM_DEV_STATUS:
5000 case DM_TABLE_LOAD:
5001 case DM_TABLE_CLEAR:
5002 case DM_TARGET_MSG:
5003 case DM_DEV_SET_GEOMETRY:
5004 /* no return data */
5005 break;
5006 case DM_LIST_DEVICES:
5008 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5009 uint32_t remaining_data = guest_data_size;
5010 void *cur_data = argptr;
5011 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5012 int nl_size = 12; /* can't use thunk_size due to alignment */
5014 while (1) {
5015 uint32_t next = nl->next;
5016 if (next) {
5017 nl->next = nl_size + (strlen(nl->name) + 1);
5019 if (remaining_data < nl->next) {
5020 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5021 break;
5023 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5024 strcpy(cur_data + nl_size, nl->name);
5025 cur_data += nl->next;
5026 remaining_data -= nl->next;
5027 if (!next) {
5028 break;
5030 nl = (void*)nl + next;
5032 break;
5034 case DM_DEV_WAIT:
5035 case DM_TABLE_STATUS:
5037 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5038 void *cur_data = argptr;
5039 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5040 int spec_size = thunk_type_size(arg_type, 0);
5041 int i;
5043 for (i = 0; i < host_dm->target_count; i++) {
5044 uint32_t next = spec->next;
5045 int slen = strlen((char*)&spec[1]) + 1;
5046 spec->next = (cur_data - argptr) + spec_size + slen;
5047 if (guest_data_size < spec->next) {
5048 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5049 break;
5051 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5052 strcpy(cur_data + spec_size, (char*)&spec[1]);
5053 cur_data = argptr + spec->next;
5054 spec = (void*)host_dm + host_dm->data_start + next;
5056 break;
5058 case DM_TABLE_DEPS:
5060 void *hdata = (void*)host_dm + host_dm->data_start;
5061 int count = *(uint32_t*)hdata;
5062 uint64_t *hdev = hdata + 8;
5063 uint64_t *gdev = argptr + 8;
5064 int i;
5066 *(uint32_t*)argptr = tswap32(count);
5067 for (i = 0; i < count; i++) {
5068 *gdev = tswap64(*hdev);
5069 gdev++;
5070 hdev++;
5072 break;
5074 case DM_LIST_VERSIONS:
5076 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5077 uint32_t remaining_data = guest_data_size;
5078 void *cur_data = argptr;
5079 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5080 int vers_size = thunk_type_size(arg_type, 0);
5082 while (1) {
5083 uint32_t next = vers->next;
5084 if (next) {
5085 vers->next = vers_size + (strlen(vers->name) + 1);
5087 if (remaining_data < vers->next) {
5088 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5089 break;
5091 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5092 strcpy(cur_data + vers_size, vers->name);
5093 cur_data += vers->next;
5094 remaining_data -= vers->next;
5095 if (!next) {
5096 break;
5098 vers = (void*)vers + next;
5100 break;
5102 default:
5103 unlock_user(argptr, guest_data, 0);
5104 ret = -TARGET_EINVAL;
5105 goto out;
5107 unlock_user(argptr, guest_data, guest_data_size);
5109 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5110 if (!argptr) {
5111 ret = -TARGET_EFAULT;
5112 goto out;
5114 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5115 unlock_user(argptr, arg, target_size);
5117 out:
5118 g_free(big_buf);
5119 return ret;
5122 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5123 int cmd, abi_long arg)
5125 void *argptr;
5126 int target_size;
5127 const argtype *arg_type = ie->arg_type;
5128 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5129 abi_long ret;
5131 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5132 struct blkpg_partition host_part;
5134 /* Read and convert blkpg */
5135 arg_type++;
5136 target_size = thunk_type_size(arg_type, 0);
5137 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5138 if (!argptr) {
5139 ret = -TARGET_EFAULT;
5140 goto out;
5142 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5143 unlock_user(argptr, arg, 0);
5145 switch (host_blkpg->op) {
5146 case BLKPG_ADD_PARTITION:
5147 case BLKPG_DEL_PARTITION:
5148 /* payload is struct blkpg_partition */
5149 break;
5150 default:
5151 /* Unknown opcode */
5152 ret = -TARGET_EINVAL;
5153 goto out;
5156 /* Read and convert blkpg->data */
5157 arg = (abi_long)(uintptr_t)host_blkpg->data;
5158 target_size = thunk_type_size(part_arg_type, 0);
5159 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5160 if (!argptr) {
5161 ret = -TARGET_EFAULT;
5162 goto out;
5164 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5165 unlock_user(argptr, arg, 0);
5167 /* Swizzle the data pointer to our local copy and call! */
5168 host_blkpg->data = &host_part;
5169 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5171 out:
5172 return ret;
5175 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5176 int fd, int cmd, abi_long arg)
5178 const argtype *arg_type = ie->arg_type;
5179 const StructEntry *se;
5180 const argtype *field_types;
5181 const int *dst_offsets, *src_offsets;
5182 int target_size;
5183 void *argptr;
5184 abi_ulong *target_rt_dev_ptr;
5185 unsigned long *host_rt_dev_ptr;
5186 abi_long ret;
5187 int i;
5189 assert(ie->access == IOC_W);
5190 assert(*arg_type == TYPE_PTR);
5191 arg_type++;
5192 assert(*arg_type == TYPE_STRUCT);
5193 target_size = thunk_type_size(arg_type, 0);
5194 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5195 if (!argptr) {
5196 return -TARGET_EFAULT;
5198 arg_type++;
5199 assert(*arg_type == (int)STRUCT_rtentry);
5200 se = struct_entries + *arg_type++;
5201 assert(se->convert[0] == NULL);
5202 /* convert struct here to be able to catch rt_dev string */
5203 field_types = se->field_types;
5204 dst_offsets = se->field_offsets[THUNK_HOST];
5205 src_offsets = se->field_offsets[THUNK_TARGET];
5206 for (i = 0; i < se->nb_fields; i++) {
5207 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5208 assert(*field_types == TYPE_PTRVOID);
5209 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5210 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5211 if (*target_rt_dev_ptr != 0) {
5212 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5213 tswapal(*target_rt_dev_ptr));
5214 if (!*host_rt_dev_ptr) {
5215 unlock_user(argptr, arg, 0);
5216 return -TARGET_EFAULT;
5218 } else {
5219 *host_rt_dev_ptr = 0;
5221 field_types++;
5222 continue;
5224 field_types = thunk_convert(buf_temp + dst_offsets[i],
5225 argptr + src_offsets[i],
5226 field_types, THUNK_HOST);
5228 unlock_user(argptr, arg, 0);
5230 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5231 if (*host_rt_dev_ptr != 0) {
5232 unlock_user((void *)*host_rt_dev_ptr,
5233 *target_rt_dev_ptr, 0);
5235 return ret;
5238 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5239 int fd, int cmd, abi_long arg)
5241 int sig = target_to_host_signal(arg);
5242 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5245 static IOCTLEntry ioctl_entries[] = {
5246 #define IOCTL(cmd, access, ...) \
5247 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5248 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5249 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5250 #include "ioctls.h"
5251 { 0, 0, },
5254 /* ??? Implement proper locking for ioctls. */
5255 /* do_ioctl() Must return target values and target errnos. */
5256 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5258 const IOCTLEntry *ie;
5259 const argtype *arg_type;
5260 abi_long ret;
5261 uint8_t buf_temp[MAX_STRUCT_SIZE];
5262 int target_size;
5263 void *argptr;
5265 ie = ioctl_entries;
5266 for(;;) {
5267 if (ie->target_cmd == 0) {
5268 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5269 return -TARGET_ENOSYS;
5271 if (ie->target_cmd == cmd)
5272 break;
5273 ie++;
5275 arg_type = ie->arg_type;
5276 #if defined(DEBUG)
5277 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5278 #endif
5279 if (ie->do_ioctl) {
5280 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5283 switch(arg_type[0]) {
5284 case TYPE_NULL:
5285 /* no argument */
5286 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5287 break;
5288 case TYPE_PTRVOID:
5289 case TYPE_INT:
5290 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5291 break;
5292 case TYPE_PTR:
5293 arg_type++;
5294 target_size = thunk_type_size(arg_type, 0);
5295 switch(ie->access) {
5296 case IOC_R:
5297 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5298 if (!is_error(ret)) {
5299 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5300 if (!argptr)
5301 return -TARGET_EFAULT;
5302 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5303 unlock_user(argptr, arg, target_size);
5305 break;
5306 case IOC_W:
5307 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5308 if (!argptr)
5309 return -TARGET_EFAULT;
5310 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5311 unlock_user(argptr, arg, 0);
5312 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5313 break;
5314 default:
5315 case IOC_RW:
5316 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5317 if (!argptr)
5318 return -TARGET_EFAULT;
5319 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5320 unlock_user(argptr, arg, 0);
5321 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5322 if (!is_error(ret)) {
5323 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5324 if (!argptr)
5325 return -TARGET_EFAULT;
5326 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5327 unlock_user(argptr, arg, target_size);
5329 break;
5331 break;
5332 default:
5333 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5334 (long)cmd, arg_type[0]);
5335 ret = -TARGET_ENOSYS;
5336 break;
5338 return ret;
5341 static const bitmask_transtbl iflag_tbl[] = {
5342 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5343 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5344 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5345 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5346 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5347 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5348 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5349 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5350 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5351 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5352 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5353 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5354 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5355 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5356 { 0, 0, 0, 0 }
5359 static const bitmask_transtbl oflag_tbl[] = {
5360 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5361 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5362 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5363 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5364 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5365 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5366 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5367 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5368 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5369 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5370 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5371 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5372 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5373 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5374 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5375 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5376 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5377 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5378 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5379 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5380 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5381 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5382 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5383 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5384 { 0, 0, 0, 0 }
5387 static const bitmask_transtbl cflag_tbl[] = {
5388 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5389 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5390 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5391 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5392 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5393 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5394 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5395 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5396 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5397 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5398 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5399 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5400 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5401 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5402 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5403 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5404 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5405 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5406 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5407 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5408 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5409 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5410 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5411 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5412 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5413 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5414 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5415 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5416 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5417 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5418 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5419 { 0, 0, 0, 0 }
5422 static const bitmask_transtbl lflag_tbl[] = {
5423 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5424 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5425 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5426 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5427 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5428 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5429 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5430 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5431 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5432 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5433 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5434 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5435 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5436 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5437 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5438 { 0, 0, 0, 0 }
5441 static void target_to_host_termios (void *dst, const void *src)
5443 struct host_termios *host = dst;
5444 const struct target_termios *target = src;
5446 host->c_iflag =
5447 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5448 host->c_oflag =
5449 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5450 host->c_cflag =
5451 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5452 host->c_lflag =
5453 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5454 host->c_line = target->c_line;
5456 memset(host->c_cc, 0, sizeof(host->c_cc));
5457 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5458 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5459 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5460 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5461 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5462 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5463 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5464 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5465 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5466 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5467 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5468 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5469 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5470 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5471 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5472 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5473 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5476 static void host_to_target_termios (void *dst, const void *src)
5478 struct target_termios *target = dst;
5479 const struct host_termios *host = src;
5481 target->c_iflag =
5482 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5483 target->c_oflag =
5484 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5485 target->c_cflag =
5486 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5487 target->c_lflag =
5488 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5489 target->c_line = host->c_line;
5491 memset(target->c_cc, 0, sizeof(target->c_cc));
5492 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5493 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5494 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5495 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5496 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5497 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5498 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5499 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5500 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5501 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5502 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5503 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5504 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5505 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5506 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5507 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5508 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5511 static const StructEntry struct_termios_def = {
5512 .convert = { host_to_target_termios, target_to_host_termios },
5513 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5514 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5517 static bitmask_transtbl mmap_flags_tbl[] = {
5518 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5519 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5520 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5521 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5522 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5523 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5524 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5525 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5526 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5527 MAP_NORESERVE },
5528 { 0, 0, 0, 0 }
5531 #if defined(TARGET_I386)
5533 /* NOTE: there is really one LDT for all the threads */
5534 static uint8_t *ldt_table;
5536 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5538 int size;
5539 void *p;
5541 if (!ldt_table)
5542 return 0;
5543 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5544 if (size > bytecount)
5545 size = bytecount;
5546 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5547 if (!p)
5548 return -TARGET_EFAULT;
5549 /* ??? Should this by byteswapped? */
5550 memcpy(p, ldt_table, size);
5551 unlock_user(p, ptr, size);
5552 return size;
5555 /* XXX: add locking support */
5556 static abi_long write_ldt(CPUX86State *env,
5557 abi_ulong ptr, unsigned long bytecount, int oldmode)
5559 struct target_modify_ldt_ldt_s ldt_info;
5560 struct target_modify_ldt_ldt_s *target_ldt_info;
5561 int seg_32bit, contents, read_exec_only, limit_in_pages;
5562 int seg_not_present, useable, lm;
5563 uint32_t *lp, entry_1, entry_2;
5565 if (bytecount != sizeof(ldt_info))
5566 return -TARGET_EINVAL;
5567 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5568 return -TARGET_EFAULT;
5569 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5570 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5571 ldt_info.limit = tswap32(target_ldt_info->limit);
5572 ldt_info.flags = tswap32(target_ldt_info->flags);
5573 unlock_user_struct(target_ldt_info, ptr, 0);
5575 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5576 return -TARGET_EINVAL;
5577 seg_32bit = ldt_info.flags & 1;
5578 contents = (ldt_info.flags >> 1) & 3;
5579 read_exec_only = (ldt_info.flags >> 3) & 1;
5580 limit_in_pages = (ldt_info.flags >> 4) & 1;
5581 seg_not_present = (ldt_info.flags >> 5) & 1;
5582 useable = (ldt_info.flags >> 6) & 1;
5583 #ifdef TARGET_ABI32
5584 lm = 0;
5585 #else
5586 lm = (ldt_info.flags >> 7) & 1;
5587 #endif
5588 if (contents == 3) {
5589 if (oldmode)
5590 return -TARGET_EINVAL;
5591 if (seg_not_present == 0)
5592 return -TARGET_EINVAL;
5594 /* allocate the LDT */
5595 if (!ldt_table) {
5596 env->ldt.base = target_mmap(0,
5597 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5598 PROT_READ|PROT_WRITE,
5599 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5600 if (env->ldt.base == -1)
5601 return -TARGET_ENOMEM;
5602 memset(g2h(env->ldt.base), 0,
5603 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5604 env->ldt.limit = 0xffff;
5605 ldt_table = g2h(env->ldt.base);
5608 /* NOTE: same code as Linux kernel */
5609 /* Allow LDTs to be cleared by the user. */
5610 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5611 if (oldmode ||
5612 (contents == 0 &&
5613 read_exec_only == 1 &&
5614 seg_32bit == 0 &&
5615 limit_in_pages == 0 &&
5616 seg_not_present == 1 &&
5617 useable == 0 )) {
5618 entry_1 = 0;
5619 entry_2 = 0;
5620 goto install;
5624 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5625 (ldt_info.limit & 0x0ffff);
5626 entry_2 = (ldt_info.base_addr & 0xff000000) |
5627 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5628 (ldt_info.limit & 0xf0000) |
5629 ((read_exec_only ^ 1) << 9) |
5630 (contents << 10) |
5631 ((seg_not_present ^ 1) << 15) |
5632 (seg_32bit << 22) |
5633 (limit_in_pages << 23) |
5634 (lm << 21) |
5635 0x7000;
5636 if (!oldmode)
5637 entry_2 |= (useable << 20);
5639 /* Install the new entry ... */
5640 install:
5641 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5642 lp[0] = tswap32(entry_1);
5643 lp[1] = tswap32(entry_2);
5644 return 0;
5647 /* specific and weird i386 syscalls */
5648 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5649 unsigned long bytecount)
5651 abi_long ret;
5653 switch (func) {
5654 case 0:
5655 ret = read_ldt(ptr, bytecount);
5656 break;
5657 case 1:
5658 ret = write_ldt(env, ptr, bytecount, 1);
5659 break;
5660 case 0x11:
5661 ret = write_ldt(env, ptr, bytecount, 0);
5662 break;
5663 default:
5664 ret = -TARGET_ENOSYS;
5665 break;
5667 return ret;
5670 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5671 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5673 uint64_t *gdt_table = g2h(env->gdt.base);
5674 struct target_modify_ldt_ldt_s ldt_info;
5675 struct target_modify_ldt_ldt_s *target_ldt_info;
5676 int seg_32bit, contents, read_exec_only, limit_in_pages;
5677 int seg_not_present, useable, lm;
5678 uint32_t *lp, entry_1, entry_2;
5679 int i;
5681 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5682 if (!target_ldt_info)
5683 return -TARGET_EFAULT;
5684 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5685 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5686 ldt_info.limit = tswap32(target_ldt_info->limit);
5687 ldt_info.flags = tswap32(target_ldt_info->flags);
5688 if (ldt_info.entry_number == -1) {
5689 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5690 if (gdt_table[i] == 0) {
5691 ldt_info.entry_number = i;
5692 target_ldt_info->entry_number = tswap32(i);
5693 break;
5697 unlock_user_struct(target_ldt_info, ptr, 1);
5699 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5700 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5701 return -TARGET_EINVAL;
5702 seg_32bit = ldt_info.flags & 1;
5703 contents = (ldt_info.flags >> 1) & 3;
5704 read_exec_only = (ldt_info.flags >> 3) & 1;
5705 limit_in_pages = (ldt_info.flags >> 4) & 1;
5706 seg_not_present = (ldt_info.flags >> 5) & 1;
5707 useable = (ldt_info.flags >> 6) & 1;
5708 #ifdef TARGET_ABI32
5709 lm = 0;
5710 #else
5711 lm = (ldt_info.flags >> 7) & 1;
5712 #endif
5714 if (contents == 3) {
5715 if (seg_not_present == 0)
5716 return -TARGET_EINVAL;
5719 /* NOTE: same code as Linux kernel */
5720 /* Allow LDTs to be cleared by the user. */
5721 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5722 if ((contents == 0 &&
5723 read_exec_only == 1 &&
5724 seg_32bit == 0 &&
5725 limit_in_pages == 0 &&
5726 seg_not_present == 1 &&
5727 useable == 0 )) {
5728 entry_1 = 0;
5729 entry_2 = 0;
5730 goto install;
5734 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5735 (ldt_info.limit & 0x0ffff);
5736 entry_2 = (ldt_info.base_addr & 0xff000000) |
5737 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5738 (ldt_info.limit & 0xf0000) |
5739 ((read_exec_only ^ 1) << 9) |
5740 (contents << 10) |
5741 ((seg_not_present ^ 1) << 15) |
5742 (seg_32bit << 22) |
5743 (limit_in_pages << 23) |
5744 (useable << 20) |
5745 (lm << 21) |
5746 0x7000;
5748 /* Install the new entry ... */
5749 install:
5750 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5751 lp[0] = tswap32(entry_1);
5752 lp[1] = tswap32(entry_2);
5753 return 0;
5756 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5758 struct target_modify_ldt_ldt_s *target_ldt_info;
5759 uint64_t *gdt_table = g2h(env->gdt.base);
5760 uint32_t base_addr, limit, flags;
5761 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5762 int seg_not_present, useable, lm;
5763 uint32_t *lp, entry_1, entry_2;
5765 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5766 if (!target_ldt_info)
5767 return -TARGET_EFAULT;
5768 idx = tswap32(target_ldt_info->entry_number);
5769 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5770 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5771 unlock_user_struct(target_ldt_info, ptr, 1);
5772 return -TARGET_EINVAL;
5774 lp = (uint32_t *)(gdt_table + idx);
5775 entry_1 = tswap32(lp[0]);
5776 entry_2 = tswap32(lp[1]);
5778 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5779 contents = (entry_2 >> 10) & 3;
5780 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5781 seg_32bit = (entry_2 >> 22) & 1;
5782 limit_in_pages = (entry_2 >> 23) & 1;
5783 useable = (entry_2 >> 20) & 1;
5784 #ifdef TARGET_ABI32
5785 lm = 0;
5786 #else
5787 lm = (entry_2 >> 21) & 1;
5788 #endif
5789 flags = (seg_32bit << 0) | (contents << 1) |
5790 (read_exec_only << 3) | (limit_in_pages << 4) |
5791 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5792 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5793 base_addr = (entry_1 >> 16) |
5794 (entry_2 & 0xff000000) |
5795 ((entry_2 & 0xff) << 16);
5796 target_ldt_info->base_addr = tswapal(base_addr);
5797 target_ldt_info->limit = tswap32(limit);
5798 target_ldt_info->flags = tswap32(flags);
5799 unlock_user_struct(target_ldt_info, ptr, 1);
5800 return 0;
5802 #endif /* TARGET_I386 && TARGET_ABI32 */
5804 #ifndef TARGET_ABI32
5805 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5807 abi_long ret = 0;
5808 abi_ulong val;
5809 int idx;
5811 switch(code) {
5812 case TARGET_ARCH_SET_GS:
5813 case TARGET_ARCH_SET_FS:
5814 if (code == TARGET_ARCH_SET_GS)
5815 idx = R_GS;
5816 else
5817 idx = R_FS;
5818 cpu_x86_load_seg(env, idx, 0);
5819 env->segs[idx].base = addr;
5820 break;
5821 case TARGET_ARCH_GET_GS:
5822 case TARGET_ARCH_GET_FS:
5823 if (code == TARGET_ARCH_GET_GS)
5824 idx = R_GS;
5825 else
5826 idx = R_FS;
5827 val = env->segs[idx].base;
5828 if (put_user(val, addr, abi_ulong))
5829 ret = -TARGET_EFAULT;
5830 break;
5831 default:
5832 ret = -TARGET_EINVAL;
5833 break;
5835 return ret;
5837 #endif
5839 #endif /* defined(TARGET_I386) */
5841 #define NEW_STACK_SIZE 0x40000
5844 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5845 typedef struct {
5846 CPUArchState *env;
5847 pthread_mutex_t mutex;
5848 pthread_cond_t cond;
5849 pthread_t thread;
5850 uint32_t tid;
5851 abi_ulong child_tidptr;
5852 abi_ulong parent_tidptr;
5853 sigset_t sigmask;
5854 } new_thread_info;
5856 static void *clone_func(void *arg)
5858 new_thread_info *info = arg;
5859 CPUArchState *env;
5860 CPUState *cpu;
5861 TaskState *ts;
5863 rcu_register_thread();
5864 env = info->env;
5865 cpu = ENV_GET_CPU(env);
5866 thread_cpu = cpu;
5867 ts = (TaskState *)cpu->opaque;
5868 info->tid = gettid();
5869 cpu->host_tid = info->tid;
5870 task_settid(ts);
5871 if (info->child_tidptr)
5872 put_user_u32(info->tid, info->child_tidptr);
5873 if (info->parent_tidptr)
5874 put_user_u32(info->tid, info->parent_tidptr);
5875 /* Enable signals. */
5876 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5877 /* Signal to the parent that we're ready. */
5878 pthread_mutex_lock(&info->mutex);
5879 pthread_cond_broadcast(&info->cond);
5880 pthread_mutex_unlock(&info->mutex);
5881 /* Wait until the parent has finshed initializing the tls state. */
5882 pthread_mutex_lock(&clone_lock);
5883 pthread_mutex_unlock(&clone_lock);
5884 cpu_loop(env);
5885 /* never exits */
5886 return NULL;
5889 /* do_fork() Must return host values and target errnos (unlike most
5890 do_*() functions). */
5891 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5892 abi_ulong parent_tidptr, target_ulong newtls,
5893 abi_ulong child_tidptr)
5895 CPUState *cpu = ENV_GET_CPU(env);
5896 int ret;
5897 TaskState *ts;
5898 CPUState *new_cpu;
5899 CPUArchState *new_env;
5900 unsigned int nptl_flags;
5901 sigset_t sigmask;
5903 /* Emulate vfork() with fork() */
5904 if (flags & CLONE_VFORK)
5905 flags &= ~(CLONE_VFORK | CLONE_VM);
5907 if (flags & CLONE_VM) {
5908 TaskState *parent_ts = (TaskState *)cpu->opaque;
5909 new_thread_info info;
5910 pthread_attr_t attr;
5912 ts = g_new0(TaskState, 1);
5913 init_task_state(ts);
5914 /* we create a new CPU instance. */
5915 new_env = cpu_copy(env);
5916 /* Init regs that differ from the parent. */
5917 cpu_clone_regs(new_env, newsp);
5918 new_cpu = ENV_GET_CPU(new_env);
5919 new_cpu->opaque = ts;
5920 ts->bprm = parent_ts->bprm;
5921 ts->info = parent_ts->info;
5922 ts->signal_mask = parent_ts->signal_mask;
5923 nptl_flags = flags;
5924 flags &= ~CLONE_NPTL_FLAGS2;
5926 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5927 ts->child_tidptr = child_tidptr;
5930 if (nptl_flags & CLONE_SETTLS)
5931 cpu_set_tls (new_env, newtls);
5933 /* Grab a mutex so that thread setup appears atomic. */
5934 pthread_mutex_lock(&clone_lock);
5936 memset(&info, 0, sizeof(info));
5937 pthread_mutex_init(&info.mutex, NULL);
5938 pthread_mutex_lock(&info.mutex);
5939 pthread_cond_init(&info.cond, NULL);
5940 info.env = new_env;
5941 if (nptl_flags & CLONE_CHILD_SETTID)
5942 info.child_tidptr = child_tidptr;
5943 if (nptl_flags & CLONE_PARENT_SETTID)
5944 info.parent_tidptr = parent_tidptr;
5946 ret = pthread_attr_init(&attr);
5947 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5948 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5949 /* It is not safe to deliver signals until the child has finished
5950 initializing, so temporarily block all signals. */
5951 sigfillset(&sigmask);
5952 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5954 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5955 /* TODO: Free new CPU state if thread creation failed. */
5957 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5958 pthread_attr_destroy(&attr);
5959 if (ret == 0) {
5960 /* Wait for the child to initialize. */
5961 pthread_cond_wait(&info.cond, &info.mutex);
5962 ret = info.tid;
5963 if (flags & CLONE_PARENT_SETTID)
5964 put_user_u32(ret, parent_tidptr);
5965 } else {
5966 ret = -1;
5968 pthread_mutex_unlock(&info.mutex);
5969 pthread_cond_destroy(&info.cond);
5970 pthread_mutex_destroy(&info.mutex);
5971 pthread_mutex_unlock(&clone_lock);
5972 } else {
5973 /* if no CLONE_VM, we consider it is a fork */
5974 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5975 return -TARGET_EINVAL;
5978 if (block_signals()) {
5979 return -TARGET_ERESTARTSYS;
5982 fork_start();
5983 ret = fork();
5984 if (ret == 0) {
5985 /* Child Process. */
5986 rcu_after_fork();
5987 cpu_clone_regs(env, newsp);
5988 fork_end(1);
5989 /* There is a race condition here. The parent process could
5990 theoretically read the TID in the child process before the child
5991 tid is set. This would require using either ptrace
5992 (not implemented) or having *_tidptr to point at a shared memory
5993 mapping. We can't repeat the spinlock hack used above because
5994 the child process gets its own copy of the lock. */
5995 if (flags & CLONE_CHILD_SETTID)
5996 put_user_u32(gettid(), child_tidptr);
5997 if (flags & CLONE_PARENT_SETTID)
5998 put_user_u32(gettid(), parent_tidptr);
5999 ts = (TaskState *)cpu->opaque;
6000 if (flags & CLONE_SETTLS)
6001 cpu_set_tls (env, newtls);
6002 if (flags & CLONE_CHILD_CLEARTID)
6003 ts->child_tidptr = child_tidptr;
6004 } else {
6005 fork_end(0);
6008 return ret;
6011 /* warning : doesn't handle linux specific flags... */
6012 static int target_to_host_fcntl_cmd(int cmd)
6014 switch(cmd) {
6015 case TARGET_F_DUPFD:
6016 case TARGET_F_GETFD:
6017 case TARGET_F_SETFD:
6018 case TARGET_F_GETFL:
6019 case TARGET_F_SETFL:
6020 return cmd;
6021 case TARGET_F_GETLK:
6022 return F_GETLK64;
6023 case TARGET_F_SETLK:
6024 return F_SETLK64;
6025 case TARGET_F_SETLKW:
6026 return F_SETLKW64;
6027 case TARGET_F_GETOWN:
6028 return F_GETOWN;
6029 case TARGET_F_SETOWN:
6030 return F_SETOWN;
6031 case TARGET_F_GETSIG:
6032 return F_GETSIG;
6033 case TARGET_F_SETSIG:
6034 return F_SETSIG;
6035 #if TARGET_ABI_BITS == 32
6036 case TARGET_F_GETLK64:
6037 return F_GETLK64;
6038 case TARGET_F_SETLK64:
6039 return F_SETLK64;
6040 case TARGET_F_SETLKW64:
6041 return F_SETLKW64;
6042 #endif
6043 case TARGET_F_SETLEASE:
6044 return F_SETLEASE;
6045 case TARGET_F_GETLEASE:
6046 return F_GETLEASE;
6047 #ifdef F_DUPFD_CLOEXEC
6048 case TARGET_F_DUPFD_CLOEXEC:
6049 return F_DUPFD_CLOEXEC;
6050 #endif
6051 case TARGET_F_NOTIFY:
6052 return F_NOTIFY;
6053 #ifdef F_GETOWN_EX
6054 case TARGET_F_GETOWN_EX:
6055 return F_GETOWN_EX;
6056 #endif
6057 #ifdef F_SETOWN_EX
6058 case TARGET_F_SETOWN_EX:
6059 return F_SETOWN_EX;
6060 #endif
6061 #ifdef F_SETPIPE_SZ
6062 case TARGET_F_SETPIPE_SZ:
6063 return F_SETPIPE_SZ;
6064 case TARGET_F_GETPIPE_SZ:
6065 return F_GETPIPE_SZ;
6066 #endif
6067 default:
6068 return -TARGET_EINVAL;
6070 return -TARGET_EINVAL;
6073 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6074 static const bitmask_transtbl flock_tbl[] = {
6075 TRANSTBL_CONVERT(F_RDLCK),
6076 TRANSTBL_CONVERT(F_WRLCK),
6077 TRANSTBL_CONVERT(F_UNLCK),
6078 TRANSTBL_CONVERT(F_EXLCK),
6079 TRANSTBL_CONVERT(F_SHLCK),
6080 { 0, 0, 0, 0 }
6083 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6084 abi_ulong target_flock_addr)
6086 struct target_flock *target_fl;
6087 short l_type;
6089 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6090 return -TARGET_EFAULT;
6093 __get_user(l_type, &target_fl->l_type);
6094 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6095 __get_user(fl->l_whence, &target_fl->l_whence);
6096 __get_user(fl->l_start, &target_fl->l_start);
6097 __get_user(fl->l_len, &target_fl->l_len);
6098 __get_user(fl->l_pid, &target_fl->l_pid);
6099 unlock_user_struct(target_fl, target_flock_addr, 0);
6100 return 0;
6103 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6104 const struct flock64 *fl)
6106 struct target_flock *target_fl;
6107 short l_type;
6109 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6110 return -TARGET_EFAULT;
6113 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6114 __put_user(l_type, &target_fl->l_type);
6115 __put_user(fl->l_whence, &target_fl->l_whence);
6116 __put_user(fl->l_start, &target_fl->l_start);
6117 __put_user(fl->l_len, &target_fl->l_len);
6118 __put_user(fl->l_pid, &target_fl->l_pid);
6119 unlock_user_struct(target_fl, target_flock_addr, 1);
6120 return 0;
6123 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6124 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6126 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6127 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6128 abi_ulong target_flock_addr)
6130 struct target_eabi_flock64 *target_fl;
6131 short l_type;
6133 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6134 return -TARGET_EFAULT;
6137 __get_user(l_type, &target_fl->l_type);
6138 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6139 __get_user(fl->l_whence, &target_fl->l_whence);
6140 __get_user(fl->l_start, &target_fl->l_start);
6141 __get_user(fl->l_len, &target_fl->l_len);
6142 __get_user(fl->l_pid, &target_fl->l_pid);
6143 unlock_user_struct(target_fl, target_flock_addr, 0);
6144 return 0;
6147 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6148 const struct flock64 *fl)
6150 struct target_eabi_flock64 *target_fl;
6151 short l_type;
6153 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6154 return -TARGET_EFAULT;
6157 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6158 __put_user(l_type, &target_fl->l_type);
6159 __put_user(fl->l_whence, &target_fl->l_whence);
6160 __put_user(fl->l_start, &target_fl->l_start);
6161 __put_user(fl->l_len, &target_fl->l_len);
6162 __put_user(fl->l_pid, &target_fl->l_pid);
6163 unlock_user_struct(target_fl, target_flock_addr, 1);
6164 return 0;
6166 #endif
6168 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6169 abi_ulong target_flock_addr)
6171 struct target_flock64 *target_fl;
6172 short l_type;
6174 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6175 return -TARGET_EFAULT;
6178 __get_user(l_type, &target_fl->l_type);
6179 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6180 __get_user(fl->l_whence, &target_fl->l_whence);
6181 __get_user(fl->l_start, &target_fl->l_start);
6182 __get_user(fl->l_len, &target_fl->l_len);
6183 __get_user(fl->l_pid, &target_fl->l_pid);
6184 unlock_user_struct(target_fl, target_flock_addr, 0);
6185 return 0;
6188 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6189 const struct flock64 *fl)
6191 struct target_flock64 *target_fl;
6192 short l_type;
6194 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6195 return -TARGET_EFAULT;
6198 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6199 __put_user(l_type, &target_fl->l_type);
6200 __put_user(fl->l_whence, &target_fl->l_whence);
6201 __put_user(fl->l_start, &target_fl->l_start);
6202 __put_user(fl->l_len, &target_fl->l_len);
6203 __put_user(fl->l_pid, &target_fl->l_pid);
6204 unlock_user_struct(target_fl, target_flock_addr, 1);
6205 return 0;
6208 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6210 struct flock64 fl64;
6211 #ifdef F_GETOWN_EX
6212 struct f_owner_ex fox;
6213 struct target_f_owner_ex *target_fox;
6214 #endif
6215 abi_long ret;
6216 int host_cmd = target_to_host_fcntl_cmd(cmd);
6218 if (host_cmd == -TARGET_EINVAL)
6219 return host_cmd;
6221 switch(cmd) {
6222 case TARGET_F_GETLK:
6223 ret = copy_from_user_flock(&fl64, arg);
6224 if (ret) {
6225 return ret;
6227 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6228 if (ret == 0) {
6229 ret = copy_to_user_flock(arg, &fl64);
6231 break;
6233 case TARGET_F_SETLK:
6234 case TARGET_F_SETLKW:
6235 ret = copy_from_user_flock(&fl64, arg);
6236 if (ret) {
6237 return ret;
6239 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6240 break;
6242 case TARGET_F_GETLK64:
6243 ret = copy_from_user_flock64(&fl64, arg);
6244 if (ret) {
6245 return ret;
6247 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6248 if (ret == 0) {
6249 ret = copy_to_user_flock64(arg, &fl64);
6251 break;
6252 case TARGET_F_SETLK64:
6253 case TARGET_F_SETLKW64:
6254 ret = copy_from_user_flock64(&fl64, arg);
6255 if (ret) {
6256 return ret;
6258 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6259 break;
6261 case TARGET_F_GETFL:
6262 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6263 if (ret >= 0) {
6264 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6266 break;
6268 case TARGET_F_SETFL:
6269 ret = get_errno(safe_fcntl(fd, host_cmd,
6270 target_to_host_bitmask(arg,
6271 fcntl_flags_tbl)));
6272 break;
6274 #ifdef F_GETOWN_EX
6275 case TARGET_F_GETOWN_EX:
6276 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6277 if (ret >= 0) {
6278 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6279 return -TARGET_EFAULT;
6280 target_fox->type = tswap32(fox.type);
6281 target_fox->pid = tswap32(fox.pid);
6282 unlock_user_struct(target_fox, arg, 1);
6284 break;
6285 #endif
6287 #ifdef F_SETOWN_EX
6288 case TARGET_F_SETOWN_EX:
6289 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6290 return -TARGET_EFAULT;
6291 fox.type = tswap32(target_fox->type);
6292 fox.pid = tswap32(target_fox->pid);
6293 unlock_user_struct(target_fox, arg, 0);
6294 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6295 break;
6296 #endif
6298 case TARGET_F_SETOWN:
6299 case TARGET_F_GETOWN:
6300 case TARGET_F_SETSIG:
6301 case TARGET_F_GETSIG:
6302 case TARGET_F_SETLEASE:
6303 case TARGET_F_GETLEASE:
6304 case TARGET_F_SETPIPE_SZ:
6305 case TARGET_F_GETPIPE_SZ:
6306 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6307 break;
6309 default:
6310 ret = get_errno(safe_fcntl(fd, cmd, arg));
6311 break;
6313 return ret;
6316 #ifdef USE_UID16
6318 static inline int high2lowuid(int uid)
6320 if (uid > 65535)
6321 return 65534;
6322 else
6323 return uid;
6326 static inline int high2lowgid(int gid)
6328 if (gid > 65535)
6329 return 65534;
6330 else
6331 return gid;
6334 static inline int low2highuid(int uid)
6336 if ((int16_t)uid == -1)
6337 return -1;
6338 else
6339 return uid;
6342 static inline int low2highgid(int gid)
6344 if ((int16_t)gid == -1)
6345 return -1;
6346 else
6347 return gid;
6349 static inline int tswapid(int id)
6351 return tswap16(id);
6354 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6356 #else /* !USE_UID16 */
6357 static inline int high2lowuid(int uid)
6359 return uid;
6361 static inline int high2lowgid(int gid)
6363 return gid;
6365 static inline int low2highuid(int uid)
6367 return uid;
6369 static inline int low2highgid(int gid)
6371 return gid;
6373 static inline int tswapid(int id)
6375 return tswap32(id);
6378 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6380 #endif /* USE_UID16 */
6382 /* We must do direct syscalls for setting UID/GID, because we want to
6383 * implement the Linux system call semantics of "change only for this thread",
6384 * not the libc/POSIX semantics of "change for all threads in process".
6385 * (See http://ewontfix.com/17/ for more details.)
6386 * We use the 32-bit version of the syscalls if present; if it is not
6387 * then either the host architecture supports 32-bit UIDs natively with
6388 * the standard syscall, or the 16-bit UID is the best we can do.
6390 #ifdef __NR_setuid32
6391 #define __NR_sys_setuid __NR_setuid32
6392 #else
6393 #define __NR_sys_setuid __NR_setuid
6394 #endif
6395 #ifdef __NR_setgid32
6396 #define __NR_sys_setgid __NR_setgid32
6397 #else
6398 #define __NR_sys_setgid __NR_setgid
6399 #endif
6400 #ifdef __NR_setresuid32
6401 #define __NR_sys_setresuid __NR_setresuid32
6402 #else
6403 #define __NR_sys_setresuid __NR_setresuid
6404 #endif
6405 #ifdef __NR_setresgid32
6406 #define __NR_sys_setresgid __NR_setresgid32
6407 #else
6408 #define __NR_sys_setresgid __NR_setresgid
6409 #endif
6411 _syscall1(int, sys_setuid, uid_t, uid)
6412 _syscall1(int, sys_setgid, gid_t, gid)
6413 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6414 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6416 void syscall_init(void)
6418 IOCTLEntry *ie;
6419 const argtype *arg_type;
6420 int size;
6421 int i;
6423 thunk_init(STRUCT_MAX);
6425 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6426 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6427 #include "syscall_types.h"
6428 #undef STRUCT
6429 #undef STRUCT_SPECIAL
6431 /* Build target_to_host_errno_table[] table from
6432 * host_to_target_errno_table[]. */
6433 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6434 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6437 /* we patch the ioctl size if necessary. We rely on the fact that
6438 no ioctl has all the bits at '1' in the size field */
6439 ie = ioctl_entries;
6440 while (ie->target_cmd != 0) {
6441 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6442 TARGET_IOC_SIZEMASK) {
6443 arg_type = ie->arg_type;
6444 if (arg_type[0] != TYPE_PTR) {
6445 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6446 ie->target_cmd);
6447 exit(1);
6449 arg_type++;
6450 size = thunk_type_size(arg_type, 0);
6451 ie->target_cmd = (ie->target_cmd &
6452 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6453 (size << TARGET_IOC_SIZESHIFT);
6456 /* automatic consistency check if same arch */
6457 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6458 (defined(__x86_64__) && defined(TARGET_X86_64))
6459 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6460 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6461 ie->name, ie->target_cmd, ie->host_cmd);
6463 #endif
6464 ie++;
6468 #if TARGET_ABI_BITS == 32
6469 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6471 #ifdef TARGET_WORDS_BIGENDIAN
6472 return ((uint64_t)word0 << 32) | word1;
6473 #else
6474 return ((uint64_t)word1 << 32) | word0;
6475 #endif
6477 #else /* TARGET_ABI_BITS == 32 */
6478 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6480 return word0;
6482 #endif /* TARGET_ABI_BITS != 32 */
6484 #ifdef TARGET_NR_truncate64
6485 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6486 abi_long arg2,
6487 abi_long arg3,
6488 abi_long arg4)
6490 if (regpairs_aligned(cpu_env)) {
6491 arg2 = arg3;
6492 arg3 = arg4;
6494 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6496 #endif
6498 #ifdef TARGET_NR_ftruncate64
6499 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6500 abi_long arg2,
6501 abi_long arg3,
6502 abi_long arg4)
6504 if (regpairs_aligned(cpu_env)) {
6505 arg2 = arg3;
6506 arg3 = arg4;
6508 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6510 #endif
6512 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6513 abi_ulong target_addr)
6515 struct target_timespec *target_ts;
6517 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6518 return -TARGET_EFAULT;
6519 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6520 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6521 unlock_user_struct(target_ts, target_addr, 0);
6522 return 0;
6525 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6526 struct timespec *host_ts)
6528 struct target_timespec *target_ts;
6530 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6531 return -TARGET_EFAULT;
6532 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6533 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6534 unlock_user_struct(target_ts, target_addr, 1);
6535 return 0;
6538 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6539 abi_ulong target_addr)
6541 struct target_itimerspec *target_itspec;
6543 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6544 return -TARGET_EFAULT;
6547 host_itspec->it_interval.tv_sec =
6548 tswapal(target_itspec->it_interval.tv_sec);
6549 host_itspec->it_interval.tv_nsec =
6550 tswapal(target_itspec->it_interval.tv_nsec);
6551 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6552 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6554 unlock_user_struct(target_itspec, target_addr, 1);
6555 return 0;
6558 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6559 struct itimerspec *host_its)
6561 struct target_itimerspec *target_itspec;
6563 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6564 return -TARGET_EFAULT;
6567 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6568 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6570 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6571 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6573 unlock_user_struct(target_itspec, target_addr, 0);
6574 return 0;
6577 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6578 abi_ulong target_addr)
6580 struct target_sigevent *target_sevp;
6582 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6583 return -TARGET_EFAULT;
6586 /* This union is awkward on 64 bit systems because it has a 32 bit
6587 * integer and a pointer in it; we follow the conversion approach
6588 * used for handling sigval types in signal.c so the guest should get
6589 * the correct value back even if we did a 64 bit byteswap and it's
6590 * using the 32 bit integer.
6592 host_sevp->sigev_value.sival_ptr =
6593 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6594 host_sevp->sigev_signo =
6595 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6596 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6597 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6599 unlock_user_struct(target_sevp, target_addr, 1);
6600 return 0;
6603 #if defined(TARGET_NR_mlockall)
6604 static inline int target_to_host_mlockall_arg(int arg)
6606 int result = 0;
6608 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6609 result |= MCL_CURRENT;
6611 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6612 result |= MCL_FUTURE;
6614 return result;
6616 #endif
6618 static inline abi_long host_to_target_stat64(void *cpu_env,
6619 abi_ulong target_addr,
6620 struct stat *host_st)
6622 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6623 if (((CPUARMState *)cpu_env)->eabi) {
6624 struct target_eabi_stat64 *target_st;
6626 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6627 return -TARGET_EFAULT;
6628 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6629 __put_user(host_st->st_dev, &target_st->st_dev);
6630 __put_user(host_st->st_ino, &target_st->st_ino);
6631 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6632 __put_user(host_st->st_ino, &target_st->__st_ino);
6633 #endif
6634 __put_user(host_st->st_mode, &target_st->st_mode);
6635 __put_user(host_st->st_nlink, &target_st->st_nlink);
6636 __put_user(host_st->st_uid, &target_st->st_uid);
6637 __put_user(host_st->st_gid, &target_st->st_gid);
6638 __put_user(host_st->st_rdev, &target_st->st_rdev);
6639 __put_user(host_st->st_size, &target_st->st_size);
6640 __put_user(host_st->st_blksize, &target_st->st_blksize);
6641 __put_user(host_st->st_blocks, &target_st->st_blocks);
6642 __put_user(host_st->st_atime, &target_st->target_st_atime);
6643 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6644 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6645 unlock_user_struct(target_st, target_addr, 1);
6646 } else
6647 #endif
6649 #if defined(TARGET_HAS_STRUCT_STAT64)
6650 struct target_stat64 *target_st;
6651 #else
6652 struct target_stat *target_st;
6653 #endif
6655 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6656 return -TARGET_EFAULT;
6657 memset(target_st, 0, sizeof(*target_st));
6658 __put_user(host_st->st_dev, &target_st->st_dev);
6659 __put_user(host_st->st_ino, &target_st->st_ino);
6660 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6661 __put_user(host_st->st_ino, &target_st->__st_ino);
6662 #endif
6663 __put_user(host_st->st_mode, &target_st->st_mode);
6664 __put_user(host_st->st_nlink, &target_st->st_nlink);
6665 __put_user(host_st->st_uid, &target_st->st_uid);
6666 __put_user(host_st->st_gid, &target_st->st_gid);
6667 __put_user(host_st->st_rdev, &target_st->st_rdev);
6668 /* XXX: better use of kernel struct */
6669 __put_user(host_st->st_size, &target_st->st_size);
6670 __put_user(host_st->st_blksize, &target_st->st_blksize);
6671 __put_user(host_st->st_blocks, &target_st->st_blocks);
6672 __put_user(host_st->st_atime, &target_st->target_st_atime);
6673 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6674 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6675 unlock_user_struct(target_st, target_addr, 1);
6678 return 0;
6681 /* ??? Using host futex calls even when target atomic operations
6682 are not really atomic probably breaks things. However implementing
6683 futexes locally would make futexes shared between multiple processes
6684 tricky. However they're probably useless because guest atomic
6685 operations won't work either. */
6686 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6687 target_ulong uaddr2, int val3)
6689 struct timespec ts, *pts;
6690 int base_op;
6692 /* ??? We assume FUTEX_* constants are the same on both host
6693 and target. */
6694 #ifdef FUTEX_CMD_MASK
6695 base_op = op & FUTEX_CMD_MASK;
6696 #else
6697 base_op = op;
6698 #endif
6699 switch (base_op) {
6700 case FUTEX_WAIT:
6701 case FUTEX_WAIT_BITSET:
6702 if (timeout) {
6703 pts = &ts;
6704 target_to_host_timespec(pts, timeout);
6705 } else {
6706 pts = NULL;
6708 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6709 pts, NULL, val3));
6710 case FUTEX_WAKE:
6711 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6712 case FUTEX_FD:
6713 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6714 case FUTEX_REQUEUE:
6715 case FUTEX_CMP_REQUEUE:
6716 case FUTEX_WAKE_OP:
6717 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6718 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6719 But the prototype takes a `struct timespec *'; insert casts
6720 to satisfy the compiler. We do not need to tswap TIMEOUT
6721 since it's not compared to guest memory. */
6722 pts = (struct timespec *)(uintptr_t) timeout;
6723 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6724 g2h(uaddr2),
6725 (base_op == FUTEX_CMP_REQUEUE
6726 ? tswap32(val3)
6727 : val3)));
6728 default:
6729 return -TARGET_ENOSYS;
6732 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6733 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6734 abi_long handle, abi_long mount_id,
6735 abi_long flags)
6737 struct file_handle *target_fh;
6738 struct file_handle *fh;
6739 int mid = 0;
6740 abi_long ret;
6741 char *name;
6742 unsigned int size, total_size;
6744 if (get_user_s32(size, handle)) {
6745 return -TARGET_EFAULT;
6748 name = lock_user_string(pathname);
6749 if (!name) {
6750 return -TARGET_EFAULT;
6753 total_size = sizeof(struct file_handle) + size;
6754 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6755 if (!target_fh) {
6756 unlock_user(name, pathname, 0);
6757 return -TARGET_EFAULT;
6760 fh = g_malloc0(total_size);
6761 fh->handle_bytes = size;
6763 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6764 unlock_user(name, pathname, 0);
6766 /* man name_to_handle_at(2):
6767 * Other than the use of the handle_bytes field, the caller should treat
6768 * the file_handle structure as an opaque data type
6771 memcpy(target_fh, fh, total_size);
6772 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6773 target_fh->handle_type = tswap32(fh->handle_type);
6774 g_free(fh);
6775 unlock_user(target_fh, handle, total_size);
6777 if (put_user_s32(mid, mount_id)) {
6778 return -TARGET_EFAULT;
6781 return ret;
6784 #endif
6786 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6787 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6788 abi_long flags)
6790 struct file_handle *target_fh;
6791 struct file_handle *fh;
6792 unsigned int size, total_size;
6793 abi_long ret;
6795 if (get_user_s32(size, handle)) {
6796 return -TARGET_EFAULT;
6799 total_size = sizeof(struct file_handle) + size;
6800 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6801 if (!target_fh) {
6802 return -TARGET_EFAULT;
6805 fh = g_memdup(target_fh, total_size);
6806 fh->handle_bytes = size;
6807 fh->handle_type = tswap32(target_fh->handle_type);
6809 ret = get_errno(open_by_handle_at(mount_fd, fh,
6810 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6812 g_free(fh);
6814 unlock_user(target_fh, handle, total_size);
6816 return ret;
6818 #endif
6820 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6822 /* signalfd siginfo conversion */
6824 static void
6825 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6826 const struct signalfd_siginfo *info)
6828 int sig = host_to_target_signal(info->ssi_signo);
6830 /* linux/signalfd.h defines a ssi_addr_lsb
6831 * not defined in sys/signalfd.h but used by some kernels
6834 #ifdef BUS_MCEERR_AO
6835 if (tinfo->ssi_signo == SIGBUS &&
6836 (tinfo->ssi_code == BUS_MCEERR_AR ||
6837 tinfo->ssi_code == BUS_MCEERR_AO)) {
6838 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6839 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6840 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6842 #endif
6844 tinfo->ssi_signo = tswap32(sig);
6845 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6846 tinfo->ssi_code = tswap32(info->ssi_code);
6847 tinfo->ssi_pid = tswap32(info->ssi_pid);
6848 tinfo->ssi_uid = tswap32(info->ssi_uid);
6849 tinfo->ssi_fd = tswap32(info->ssi_fd);
6850 tinfo->ssi_tid = tswap32(info->ssi_tid);
6851 tinfo->ssi_band = tswap32(info->ssi_band);
6852 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6853 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6854 tinfo->ssi_status = tswap32(info->ssi_status);
6855 tinfo->ssi_int = tswap32(info->ssi_int);
6856 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6857 tinfo->ssi_utime = tswap64(info->ssi_utime);
6858 tinfo->ssi_stime = tswap64(info->ssi_stime);
6859 tinfo->ssi_addr = tswap64(info->ssi_addr);
6862 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6864 int i;
6866 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6867 host_to_target_signalfd_siginfo(buf + i, buf + i);
6870 return len;
6873 static TargetFdTrans target_signalfd_trans = {
6874 .host_to_target_data = host_to_target_data_signalfd,
6877 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6879 int host_flags;
6880 target_sigset_t *target_mask;
6881 sigset_t host_mask;
6882 abi_long ret;
6884 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6885 return -TARGET_EINVAL;
6887 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6888 return -TARGET_EFAULT;
6891 target_to_host_sigset(&host_mask, target_mask);
6893 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6895 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6896 if (ret >= 0) {
6897 fd_trans_register(ret, &target_signalfd_trans);
6900 unlock_user_struct(target_mask, mask, 0);
6902 return ret;
6904 #endif
6906 /* Map host to target signal numbers for the wait family of syscalls.
6907 Assume all other status bits are the same. */
6908 int host_to_target_waitstatus(int status)
6910 if (WIFSIGNALED(status)) {
6911 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6913 if (WIFSTOPPED(status)) {
6914 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6915 | (status & 0xff);
6917 return status;
6920 static int open_self_cmdline(void *cpu_env, int fd)
6922 int fd_orig = -1;
6923 bool word_skipped = false;
6925 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6926 if (fd_orig < 0) {
6927 return fd_orig;
6930 while (true) {
6931 ssize_t nb_read;
6932 char buf[128];
6933 char *cp_buf = buf;
6935 nb_read = read(fd_orig, buf, sizeof(buf));
6936 if (nb_read < 0) {
6937 int e = errno;
6938 fd_orig = close(fd_orig);
6939 errno = e;
6940 return -1;
6941 } else if (nb_read == 0) {
6942 break;
6945 if (!word_skipped) {
6946 /* Skip the first string, which is the path to qemu-*-static
6947 instead of the actual command. */
6948 cp_buf = memchr(buf, 0, nb_read);
6949 if (cp_buf) {
6950 /* Null byte found, skip one string */
6951 cp_buf++;
6952 nb_read -= cp_buf - buf;
6953 word_skipped = true;
6957 if (word_skipped) {
6958 if (write(fd, cp_buf, nb_read) != nb_read) {
6959 int e = errno;
6960 close(fd_orig);
6961 errno = e;
6962 return -1;
6967 return close(fd_orig);
6970 static int open_self_maps(void *cpu_env, int fd)
6972 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6973 TaskState *ts = cpu->opaque;
6974 FILE *fp;
6975 char *line = NULL;
6976 size_t len = 0;
6977 ssize_t read;
6979 fp = fopen("/proc/self/maps", "r");
6980 if (fp == NULL) {
6981 return -1;
6984 while ((read = getline(&line, &len, fp)) != -1) {
6985 int fields, dev_maj, dev_min, inode;
6986 uint64_t min, max, offset;
6987 char flag_r, flag_w, flag_x, flag_p;
6988 char path[512] = "";
6989 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6990 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6991 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6993 if ((fields < 10) || (fields > 11)) {
6994 continue;
6996 if (h2g_valid(min)) {
6997 int flags = page_get_flags(h2g(min));
6998 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6999 if (page_check_range(h2g(min), max - min, flags) == -1) {
7000 continue;
7002 if (h2g(min) == ts->info->stack_limit) {
7003 pstrcpy(path, sizeof(path), " [stack]");
7005 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7006 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7007 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7008 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7009 path[0] ? " " : "", path);
7013 free(line);
7014 fclose(fp);
7016 return 0;
7019 static int open_self_stat(void *cpu_env, int fd)
7021 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7022 TaskState *ts = cpu->opaque;
7023 abi_ulong start_stack = ts->info->start_stack;
7024 int i;
7026 for (i = 0; i < 44; i++) {
7027 char buf[128];
7028 int len;
7029 uint64_t val = 0;
7031 if (i == 0) {
7032 /* pid */
7033 val = getpid();
7034 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7035 } else if (i == 1) {
7036 /* app name */
7037 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7038 } else if (i == 27) {
7039 /* stack bottom */
7040 val = start_stack;
7041 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7042 } else {
7043 /* for the rest, there is MasterCard */
7044 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7047 len = strlen(buf);
7048 if (write(fd, buf, len) != len) {
7049 return -1;
7053 return 0;
7056 static int open_self_auxv(void *cpu_env, int fd)
7058 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7059 TaskState *ts = cpu->opaque;
7060 abi_ulong auxv = ts->info->saved_auxv;
7061 abi_ulong len = ts->info->auxv_len;
7062 char *ptr;
7065 * Auxiliary vector is stored in target process stack.
7066 * read in whole auxv vector and copy it to file
7068 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7069 if (ptr != NULL) {
7070 while (len > 0) {
7071 ssize_t r;
7072 r = write(fd, ptr, len);
7073 if (r <= 0) {
7074 break;
7076 len -= r;
7077 ptr += r;
7079 lseek(fd, 0, SEEK_SET);
7080 unlock_user(ptr, auxv, len);
7083 return 0;
7086 static int is_proc_myself(const char *filename, const char *entry)
7088 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7089 filename += strlen("/proc/");
7090 if (!strncmp(filename, "self/", strlen("self/"))) {
7091 filename += strlen("self/");
7092 } else if (*filename >= '1' && *filename <= '9') {
7093 char myself[80];
7094 snprintf(myself, sizeof(myself), "%d/", getpid());
7095 if (!strncmp(filename, myself, strlen(myself))) {
7096 filename += strlen(myself);
7097 } else {
7098 return 0;
7100 } else {
7101 return 0;
7103 if (!strcmp(filename, entry)) {
7104 return 1;
7107 return 0;
7110 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7111 static int is_proc(const char *filename, const char *entry)
7113 return strcmp(filename, entry) == 0;
7116 static int open_net_route(void *cpu_env, int fd)
7118 FILE *fp;
7119 char *line = NULL;
7120 size_t len = 0;
7121 ssize_t read;
7123 fp = fopen("/proc/net/route", "r");
7124 if (fp == NULL) {
7125 return -1;
7128 /* read header */
7130 read = getline(&line, &len, fp);
7131 dprintf(fd, "%s", line);
7133 /* read routes */
7135 while ((read = getline(&line, &len, fp)) != -1) {
7136 char iface[16];
7137 uint32_t dest, gw, mask;
7138 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7139 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7140 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7141 &mask, &mtu, &window, &irtt);
7142 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7143 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7144 metric, tswap32(mask), mtu, window, irtt);
7147 free(line);
7148 fclose(fp);
7150 return 0;
7152 #endif
7154 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7156 struct fake_open {
7157 const char *filename;
7158 int (*fill)(void *cpu_env, int fd);
7159 int (*cmp)(const char *s1, const char *s2);
7161 const struct fake_open *fake_open;
7162 static const struct fake_open fakes[] = {
7163 { "maps", open_self_maps, is_proc_myself },
7164 { "stat", open_self_stat, is_proc_myself },
7165 { "auxv", open_self_auxv, is_proc_myself },
7166 { "cmdline", open_self_cmdline, is_proc_myself },
7167 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7168 { "/proc/net/route", open_net_route, is_proc },
7169 #endif
7170 { NULL, NULL, NULL }
7173 if (is_proc_myself(pathname, "exe")) {
7174 int execfd = qemu_getauxval(AT_EXECFD);
7175 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7178 for (fake_open = fakes; fake_open->filename; fake_open++) {
7179 if (fake_open->cmp(pathname, fake_open->filename)) {
7180 break;
7184 if (fake_open->filename) {
7185 const char *tmpdir;
7186 char filename[PATH_MAX];
7187 int fd, r;
7189 /* create temporary file to map stat to */
7190 tmpdir = getenv("TMPDIR");
7191 if (!tmpdir)
7192 tmpdir = "/tmp";
7193 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7194 fd = mkstemp(filename);
7195 if (fd < 0) {
7196 return fd;
7198 unlink(filename);
7200 if ((r = fake_open->fill(cpu_env, fd))) {
7201 int e = errno;
7202 close(fd);
7203 errno = e;
7204 return r;
7206 lseek(fd, 0, SEEK_SET);
7208 return fd;
7211 return safe_openat(dirfd, path(pathname), flags, mode);
7214 #define TIMER_MAGIC 0x0caf0000
7215 #define TIMER_MAGIC_MASK 0xffff0000
7217 /* Convert QEMU provided timer ID back to internal 16bit index format */
7218 static target_timer_t get_timer_id(abi_long arg)
7220 target_timer_t timerid = arg;
7222 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7223 return -TARGET_EINVAL;
7226 timerid &= 0xffff;
7228 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7229 return -TARGET_EINVAL;
7232 return timerid;
7235 /* do_syscall() should always have a single exit point at the end so
7236 that actions, such as logging of syscall results, can be performed.
7237 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7238 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7239 abi_long arg2, abi_long arg3, abi_long arg4,
7240 abi_long arg5, abi_long arg6, abi_long arg7,
7241 abi_long arg8)
7243 CPUState *cpu = ENV_GET_CPU(cpu_env);
7244 abi_long ret;
7245 struct stat st;
7246 struct statfs stfs;
7247 void *p;
7249 #if defined(DEBUG_ERESTARTSYS)
7250 /* Debug-only code for exercising the syscall-restart code paths
7251 * in the per-architecture cpu main loops: restart every syscall
7252 * the guest makes once before letting it through.
7255 static int flag;
7257 flag = !flag;
7258 if (flag) {
7259 return -TARGET_ERESTARTSYS;
7262 #endif
7264 #ifdef DEBUG
7265 gemu_log("syscall %d", num);
7266 #endif
7267 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7268 if(do_strace)
7269 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7271 switch(num) {
7272 case TARGET_NR_exit:
7273 /* In old applications this may be used to implement _exit(2).
7274 However in threaded applictions it is used for thread termination,
7275 and _exit_group is used for application termination.
7276 Do thread termination if we have more then one thread. */
7278 if (block_signals()) {
7279 ret = -TARGET_ERESTARTSYS;
7280 break;
7283 if (CPU_NEXT(first_cpu)) {
7284 TaskState *ts;
7286 cpu_list_lock();
7287 /* Remove the CPU from the list. */
7288 QTAILQ_REMOVE(&cpus, cpu, node);
7289 cpu_list_unlock();
7290 ts = cpu->opaque;
7291 if (ts->child_tidptr) {
7292 put_user_u32(0, ts->child_tidptr);
7293 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7294 NULL, NULL, 0);
7296 thread_cpu = NULL;
7297 object_unref(OBJECT(cpu));
7298 g_free(ts);
7299 rcu_unregister_thread();
7300 pthread_exit(NULL);
7302 #ifdef TARGET_GPROF
7303 _mcleanup();
7304 #endif
7305 gdb_exit(cpu_env, arg1);
7306 _exit(arg1);
7307 ret = 0; /* avoid warning */
7308 break;
7309 case TARGET_NR_read:
7310 if (arg3 == 0)
7311 ret = 0;
7312 else {
7313 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7314 goto efault;
7315 ret = get_errno(safe_read(arg1, p, arg3));
7316 if (ret >= 0 &&
7317 fd_trans_host_to_target_data(arg1)) {
7318 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7320 unlock_user(p, arg2, ret);
7322 break;
7323 case TARGET_NR_write:
7324 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7325 goto efault;
7326 ret = get_errno(safe_write(arg1, p, arg3));
7327 unlock_user(p, arg2, 0);
7328 break;
7329 #ifdef TARGET_NR_open
7330 case TARGET_NR_open:
7331 if (!(p = lock_user_string(arg1)))
7332 goto efault;
7333 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7334 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7335 arg3));
7336 fd_trans_unregister(ret);
7337 unlock_user(p, arg1, 0);
7338 break;
7339 #endif
7340 case TARGET_NR_openat:
7341 if (!(p = lock_user_string(arg2)))
7342 goto efault;
7343 ret = get_errno(do_openat(cpu_env, arg1, p,
7344 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7345 arg4));
7346 fd_trans_unregister(ret);
7347 unlock_user(p, arg2, 0);
7348 break;
7349 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7350 case TARGET_NR_name_to_handle_at:
7351 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7352 break;
7353 #endif
7354 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7355 case TARGET_NR_open_by_handle_at:
7356 ret = do_open_by_handle_at(arg1, arg2, arg3);
7357 fd_trans_unregister(ret);
7358 break;
7359 #endif
7360 case TARGET_NR_close:
7361 fd_trans_unregister(arg1);
7362 ret = get_errno(close(arg1));
7363 break;
7364 case TARGET_NR_brk:
7365 ret = do_brk(arg1);
7366 break;
7367 #ifdef TARGET_NR_fork
7368 case TARGET_NR_fork:
7369 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7370 break;
7371 #endif
7372 #ifdef TARGET_NR_waitpid
7373 case TARGET_NR_waitpid:
7375 int status;
7376 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7377 if (!is_error(ret) && arg2 && ret
7378 && put_user_s32(host_to_target_waitstatus(status), arg2))
7379 goto efault;
7381 break;
7382 #endif
7383 #ifdef TARGET_NR_waitid
7384 case TARGET_NR_waitid:
7386 siginfo_t info;
7387 info.si_pid = 0;
7388 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7389 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7390 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7391 goto efault;
7392 host_to_target_siginfo(p, &info);
7393 unlock_user(p, arg3, sizeof(target_siginfo_t));
7396 break;
7397 #endif
7398 #ifdef TARGET_NR_creat /* not on alpha */
7399 case TARGET_NR_creat:
7400 if (!(p = lock_user_string(arg1)))
7401 goto efault;
7402 ret = get_errno(creat(p, arg2));
7403 fd_trans_unregister(ret);
7404 unlock_user(p, arg1, 0);
7405 break;
7406 #endif
7407 #ifdef TARGET_NR_link
7408 case TARGET_NR_link:
7410 void * p2;
7411 p = lock_user_string(arg1);
7412 p2 = lock_user_string(arg2);
7413 if (!p || !p2)
7414 ret = -TARGET_EFAULT;
7415 else
7416 ret = get_errno(link(p, p2));
7417 unlock_user(p2, arg2, 0);
7418 unlock_user(p, arg1, 0);
7420 break;
7421 #endif
7422 #if defined(TARGET_NR_linkat)
7423 case TARGET_NR_linkat:
7425 void * p2 = NULL;
7426 if (!arg2 || !arg4)
7427 goto efault;
7428 p = lock_user_string(arg2);
7429 p2 = lock_user_string(arg4);
7430 if (!p || !p2)
7431 ret = -TARGET_EFAULT;
7432 else
7433 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7434 unlock_user(p, arg2, 0);
7435 unlock_user(p2, arg4, 0);
7437 break;
7438 #endif
7439 #ifdef TARGET_NR_unlink
7440 case TARGET_NR_unlink:
7441 if (!(p = lock_user_string(arg1)))
7442 goto efault;
7443 ret = get_errno(unlink(p));
7444 unlock_user(p, arg1, 0);
7445 break;
7446 #endif
7447 #if defined(TARGET_NR_unlinkat)
7448 case TARGET_NR_unlinkat:
7449 if (!(p = lock_user_string(arg2)))
7450 goto efault;
7451 ret = get_errno(unlinkat(arg1, p, arg3));
7452 unlock_user(p, arg2, 0);
7453 break;
7454 #endif
7455 case TARGET_NR_execve:
7457 char **argp, **envp;
7458 int argc, envc;
7459 abi_ulong gp;
7460 abi_ulong guest_argp;
7461 abi_ulong guest_envp;
7462 abi_ulong addr;
7463 char **q;
7464 int total_size = 0;
7466 argc = 0;
7467 guest_argp = arg2;
7468 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7469 if (get_user_ual(addr, gp))
7470 goto efault;
7471 if (!addr)
7472 break;
7473 argc++;
7475 envc = 0;
7476 guest_envp = arg3;
7477 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7478 if (get_user_ual(addr, gp))
7479 goto efault;
7480 if (!addr)
7481 break;
7482 envc++;
7485 argp = alloca((argc + 1) * sizeof(void *));
7486 envp = alloca((envc + 1) * sizeof(void *));
7488 for (gp = guest_argp, q = argp; gp;
7489 gp += sizeof(abi_ulong), q++) {
7490 if (get_user_ual(addr, gp))
7491 goto execve_efault;
7492 if (!addr)
7493 break;
7494 if (!(*q = lock_user_string(addr)))
7495 goto execve_efault;
7496 total_size += strlen(*q) + 1;
7498 *q = NULL;
7500 for (gp = guest_envp, q = envp; gp;
7501 gp += sizeof(abi_ulong), q++) {
7502 if (get_user_ual(addr, gp))
7503 goto execve_efault;
7504 if (!addr)
7505 break;
7506 if (!(*q = lock_user_string(addr)))
7507 goto execve_efault;
7508 total_size += strlen(*q) + 1;
7510 *q = NULL;
7512 if (!(p = lock_user_string(arg1)))
7513 goto execve_efault;
7514 /* Although execve() is not an interruptible syscall it is
7515 * a special case where we must use the safe_syscall wrapper:
7516 * if we allow a signal to happen before we make the host
7517 * syscall then we will 'lose' it, because at the point of
7518 * execve the process leaves QEMU's control. So we use the
7519 * safe syscall wrapper to ensure that we either take the
7520 * signal as a guest signal, or else it does not happen
7521 * before the execve completes and makes it the other
7522 * program's problem.
7524 ret = get_errno(safe_execve(p, argp, envp));
7525 unlock_user(p, arg1, 0);
7527 goto execve_end;
7529 execve_efault:
7530 ret = -TARGET_EFAULT;
7532 execve_end:
7533 for (gp = guest_argp, q = argp; *q;
7534 gp += sizeof(abi_ulong), q++) {
7535 if (get_user_ual(addr, gp)
7536 || !addr)
7537 break;
7538 unlock_user(*q, addr, 0);
7540 for (gp = guest_envp, q = envp; *q;
7541 gp += sizeof(abi_ulong), q++) {
7542 if (get_user_ual(addr, gp)
7543 || !addr)
7544 break;
7545 unlock_user(*q, addr, 0);
7548 break;
7549 case TARGET_NR_chdir:
7550 if (!(p = lock_user_string(arg1)))
7551 goto efault;
7552 ret = get_errno(chdir(p));
7553 unlock_user(p, arg1, 0);
7554 break;
7555 #ifdef TARGET_NR_time
7556 case TARGET_NR_time:
7558 time_t host_time;
7559 ret = get_errno(time(&host_time));
7560 if (!is_error(ret)
7561 && arg1
7562 && put_user_sal(host_time, arg1))
7563 goto efault;
7565 break;
7566 #endif
7567 #ifdef TARGET_NR_mknod
7568 case TARGET_NR_mknod:
7569 if (!(p = lock_user_string(arg1)))
7570 goto efault;
7571 ret = get_errno(mknod(p, arg2, arg3));
7572 unlock_user(p, arg1, 0);
7573 break;
7574 #endif
7575 #if defined(TARGET_NR_mknodat)
7576 case TARGET_NR_mknodat:
7577 if (!(p = lock_user_string(arg2)))
7578 goto efault;
7579 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7580 unlock_user(p, arg2, 0);
7581 break;
7582 #endif
7583 #ifdef TARGET_NR_chmod
7584 case TARGET_NR_chmod:
7585 if (!(p = lock_user_string(arg1)))
7586 goto efault;
7587 ret = get_errno(chmod(p, arg2));
7588 unlock_user(p, arg1, 0);
7589 break;
7590 #endif
7591 #ifdef TARGET_NR_break
7592 case TARGET_NR_break:
7593 goto unimplemented;
7594 #endif
7595 #ifdef TARGET_NR_oldstat
7596 case TARGET_NR_oldstat:
7597 goto unimplemented;
7598 #endif
7599 case TARGET_NR_lseek:
7600 ret = get_errno(lseek(arg1, arg2, arg3));
7601 break;
7602 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7603 /* Alpha specific */
7604 case TARGET_NR_getxpid:
7605 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7606 ret = get_errno(getpid());
7607 break;
7608 #endif
7609 #ifdef TARGET_NR_getpid
7610 case TARGET_NR_getpid:
7611 ret = get_errno(getpid());
7612 break;
7613 #endif
7614 case TARGET_NR_mount:
7616 /* need to look at the data field */
7617 void *p2, *p3;
7619 if (arg1) {
7620 p = lock_user_string(arg1);
7621 if (!p) {
7622 goto efault;
7624 } else {
7625 p = NULL;
7628 p2 = lock_user_string(arg2);
7629 if (!p2) {
7630 if (arg1) {
7631 unlock_user(p, arg1, 0);
7633 goto efault;
7636 if (arg3) {
7637 p3 = lock_user_string(arg3);
7638 if (!p3) {
7639 if (arg1) {
7640 unlock_user(p, arg1, 0);
7642 unlock_user(p2, arg2, 0);
7643 goto efault;
7645 } else {
7646 p3 = NULL;
7649 /* FIXME - arg5 should be locked, but it isn't clear how to
7650 * do that since it's not guaranteed to be a NULL-terminated
7651 * string.
7653 if (!arg5) {
7654 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7655 } else {
7656 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7658 ret = get_errno(ret);
7660 if (arg1) {
7661 unlock_user(p, arg1, 0);
7663 unlock_user(p2, arg2, 0);
7664 if (arg3) {
7665 unlock_user(p3, arg3, 0);
7668 break;
7669 #ifdef TARGET_NR_umount
7670 case TARGET_NR_umount:
7671 if (!(p = lock_user_string(arg1)))
7672 goto efault;
7673 ret = get_errno(umount(p));
7674 unlock_user(p, arg1, 0);
7675 break;
7676 #endif
7677 #ifdef TARGET_NR_stime /* not on alpha */
7678 case TARGET_NR_stime:
7680 time_t host_time;
7681 if (get_user_sal(host_time, arg1))
7682 goto efault;
7683 ret = get_errno(stime(&host_time));
7685 break;
7686 #endif
7687 case TARGET_NR_ptrace:
7688 goto unimplemented;
7689 #ifdef TARGET_NR_alarm /* not on alpha */
7690 case TARGET_NR_alarm:
7691 ret = alarm(arg1);
7692 break;
7693 #endif
7694 #ifdef TARGET_NR_oldfstat
7695 case TARGET_NR_oldfstat:
7696 goto unimplemented;
7697 #endif
7698 #ifdef TARGET_NR_pause /* not on alpha */
7699 case TARGET_NR_pause:
7700 if (!block_signals()) {
7701 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7703 ret = -TARGET_EINTR;
7704 break;
7705 #endif
7706 #ifdef TARGET_NR_utime
7707 case TARGET_NR_utime:
7709 struct utimbuf tbuf, *host_tbuf;
7710 struct target_utimbuf *target_tbuf;
7711 if (arg2) {
7712 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7713 goto efault;
7714 tbuf.actime = tswapal(target_tbuf->actime);
7715 tbuf.modtime = tswapal(target_tbuf->modtime);
7716 unlock_user_struct(target_tbuf, arg2, 0);
7717 host_tbuf = &tbuf;
7718 } else {
7719 host_tbuf = NULL;
7721 if (!(p = lock_user_string(arg1)))
7722 goto efault;
7723 ret = get_errno(utime(p, host_tbuf));
7724 unlock_user(p, arg1, 0);
7726 break;
7727 #endif
7728 #ifdef TARGET_NR_utimes
7729 case TARGET_NR_utimes:
7731 struct timeval *tvp, tv[2];
7732 if (arg2) {
7733 if (copy_from_user_timeval(&tv[0], arg2)
7734 || copy_from_user_timeval(&tv[1],
7735 arg2 + sizeof(struct target_timeval)))
7736 goto efault;
7737 tvp = tv;
7738 } else {
7739 tvp = NULL;
7741 if (!(p = lock_user_string(arg1)))
7742 goto efault;
7743 ret = get_errno(utimes(p, tvp));
7744 unlock_user(p, arg1, 0);
7746 break;
7747 #endif
7748 #if defined(TARGET_NR_futimesat)
7749 case TARGET_NR_futimesat:
7751 struct timeval *tvp, tv[2];
7752 if (arg3) {
7753 if (copy_from_user_timeval(&tv[0], arg3)
7754 || copy_from_user_timeval(&tv[1],
7755 arg3 + sizeof(struct target_timeval)))
7756 goto efault;
7757 tvp = tv;
7758 } else {
7759 tvp = NULL;
7761 if (!(p = lock_user_string(arg2)))
7762 goto efault;
7763 ret = get_errno(futimesat(arg1, path(p), tvp));
7764 unlock_user(p, arg2, 0);
7766 break;
7767 #endif
7768 #ifdef TARGET_NR_stty
7769 case TARGET_NR_stty:
7770 goto unimplemented;
7771 #endif
7772 #ifdef TARGET_NR_gtty
7773 case TARGET_NR_gtty:
7774 goto unimplemented;
7775 #endif
7776 #ifdef TARGET_NR_access
7777 case TARGET_NR_access:
7778 if (!(p = lock_user_string(arg1)))
7779 goto efault;
7780 ret = get_errno(access(path(p), arg2));
7781 unlock_user(p, arg1, 0);
7782 break;
7783 #endif
7784 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7785 case TARGET_NR_faccessat:
7786 if (!(p = lock_user_string(arg2)))
7787 goto efault;
7788 ret = get_errno(faccessat(arg1, p, arg3, 0));
7789 unlock_user(p, arg2, 0);
7790 break;
7791 #endif
7792 #ifdef TARGET_NR_nice /* not on alpha */
7793 case TARGET_NR_nice:
7794 ret = get_errno(nice(arg1));
7795 break;
7796 #endif
7797 #ifdef TARGET_NR_ftime
7798 case TARGET_NR_ftime:
7799 goto unimplemented;
7800 #endif
7801 case TARGET_NR_sync:
7802 sync();
7803 ret = 0;
7804 break;
7805 case TARGET_NR_kill:
7806 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7807 break;
7808 #ifdef TARGET_NR_rename
7809 case TARGET_NR_rename:
7811 void *p2;
7812 p = lock_user_string(arg1);
7813 p2 = lock_user_string(arg2);
7814 if (!p || !p2)
7815 ret = -TARGET_EFAULT;
7816 else
7817 ret = get_errno(rename(p, p2));
7818 unlock_user(p2, arg2, 0);
7819 unlock_user(p, arg1, 0);
7821 break;
7822 #endif
7823 #if defined(TARGET_NR_renameat)
7824 case TARGET_NR_renameat:
7826 void *p2;
7827 p = lock_user_string(arg2);
7828 p2 = lock_user_string(arg4);
7829 if (!p || !p2)
7830 ret = -TARGET_EFAULT;
7831 else
7832 ret = get_errno(renameat(arg1, p, arg3, p2));
7833 unlock_user(p2, arg4, 0);
7834 unlock_user(p, arg2, 0);
7836 break;
7837 #endif
7838 #ifdef TARGET_NR_mkdir
7839 case TARGET_NR_mkdir:
7840 if (!(p = lock_user_string(arg1)))
7841 goto efault;
7842 ret = get_errno(mkdir(p, arg2));
7843 unlock_user(p, arg1, 0);
7844 break;
7845 #endif
7846 #if defined(TARGET_NR_mkdirat)
7847 case TARGET_NR_mkdirat:
7848 if (!(p = lock_user_string(arg2)))
7849 goto efault;
7850 ret = get_errno(mkdirat(arg1, p, arg3));
7851 unlock_user(p, arg2, 0);
7852 break;
7853 #endif
7854 #ifdef TARGET_NR_rmdir
7855 case TARGET_NR_rmdir:
7856 if (!(p = lock_user_string(arg1)))
7857 goto efault;
7858 ret = get_errno(rmdir(p));
7859 unlock_user(p, arg1, 0);
7860 break;
7861 #endif
7862 case TARGET_NR_dup:
7863 ret = get_errno(dup(arg1));
7864 if (ret >= 0) {
7865 fd_trans_dup(arg1, ret);
7867 break;
7868 #ifdef TARGET_NR_pipe
7869 case TARGET_NR_pipe:
7870 ret = do_pipe(cpu_env, arg1, 0, 0);
7871 break;
7872 #endif
7873 #ifdef TARGET_NR_pipe2
7874 case TARGET_NR_pipe2:
7875 ret = do_pipe(cpu_env, arg1,
7876 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7877 break;
7878 #endif
7879 case TARGET_NR_times:
7881 struct target_tms *tmsp;
7882 struct tms tms;
7883 ret = get_errno(times(&tms));
7884 if (arg1) {
7885 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7886 if (!tmsp)
7887 goto efault;
7888 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7889 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7890 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7891 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7893 if (!is_error(ret))
7894 ret = host_to_target_clock_t(ret);
7896 break;
7897 #ifdef TARGET_NR_prof
7898 case TARGET_NR_prof:
7899 goto unimplemented;
7900 #endif
7901 #ifdef TARGET_NR_signal
7902 case TARGET_NR_signal:
7903 goto unimplemented;
7904 #endif
7905 case TARGET_NR_acct:
7906 if (arg1 == 0) {
7907 ret = get_errno(acct(NULL));
7908 } else {
7909 if (!(p = lock_user_string(arg1)))
7910 goto efault;
7911 ret = get_errno(acct(path(p)));
7912 unlock_user(p, arg1, 0);
7914 break;
7915 #ifdef TARGET_NR_umount2
7916 case TARGET_NR_umount2:
7917 if (!(p = lock_user_string(arg1)))
7918 goto efault;
7919 ret = get_errno(umount2(p, arg2));
7920 unlock_user(p, arg1, 0);
7921 break;
7922 #endif
7923 #ifdef TARGET_NR_lock
7924 case TARGET_NR_lock:
7925 goto unimplemented;
7926 #endif
7927 case TARGET_NR_ioctl:
7928 ret = do_ioctl(arg1, arg2, arg3);
7929 break;
7930 case TARGET_NR_fcntl:
7931 ret = do_fcntl(arg1, arg2, arg3);
7932 break;
7933 #ifdef TARGET_NR_mpx
7934 case TARGET_NR_mpx:
7935 goto unimplemented;
7936 #endif
7937 case TARGET_NR_setpgid:
7938 ret = get_errno(setpgid(arg1, arg2));
7939 break;
7940 #ifdef TARGET_NR_ulimit
7941 case TARGET_NR_ulimit:
7942 goto unimplemented;
7943 #endif
7944 #ifdef TARGET_NR_oldolduname
7945 case TARGET_NR_oldolduname:
7946 goto unimplemented;
7947 #endif
7948 case TARGET_NR_umask:
7949 ret = get_errno(umask(arg1));
7950 break;
7951 case TARGET_NR_chroot:
7952 if (!(p = lock_user_string(arg1)))
7953 goto efault;
7954 ret = get_errno(chroot(p));
7955 unlock_user(p, arg1, 0);
7956 break;
7957 #ifdef TARGET_NR_ustat
7958 case TARGET_NR_ustat:
7959 goto unimplemented;
7960 #endif
7961 #ifdef TARGET_NR_dup2
7962 case TARGET_NR_dup2:
7963 ret = get_errno(dup2(arg1, arg2));
7964 if (ret >= 0) {
7965 fd_trans_dup(arg1, arg2);
7967 break;
7968 #endif
7969 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7970 case TARGET_NR_dup3:
7971 ret = get_errno(dup3(arg1, arg2, arg3));
7972 if (ret >= 0) {
7973 fd_trans_dup(arg1, arg2);
7975 break;
7976 #endif
7977 #ifdef TARGET_NR_getppid /* not on alpha */
7978 case TARGET_NR_getppid:
7979 ret = get_errno(getppid());
7980 break;
7981 #endif
7982 #ifdef TARGET_NR_getpgrp
7983 case TARGET_NR_getpgrp:
7984 ret = get_errno(getpgrp());
7985 break;
7986 #endif
7987 case TARGET_NR_setsid:
7988 ret = get_errno(setsid());
7989 break;
7990 #ifdef TARGET_NR_sigaction
7991 case TARGET_NR_sigaction:
7993 #if defined(TARGET_ALPHA)
7994 struct target_sigaction act, oact, *pact = 0;
7995 struct target_old_sigaction *old_act;
7996 if (arg2) {
7997 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7998 goto efault;
7999 act._sa_handler = old_act->_sa_handler;
8000 target_siginitset(&act.sa_mask, old_act->sa_mask);
8001 act.sa_flags = old_act->sa_flags;
8002 act.sa_restorer = 0;
8003 unlock_user_struct(old_act, arg2, 0);
8004 pact = &act;
8006 ret = get_errno(do_sigaction(arg1, pact, &oact));
8007 if (!is_error(ret) && arg3) {
8008 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8009 goto efault;
8010 old_act->_sa_handler = oact._sa_handler;
8011 old_act->sa_mask = oact.sa_mask.sig[0];
8012 old_act->sa_flags = oact.sa_flags;
8013 unlock_user_struct(old_act, arg3, 1);
8015 #elif defined(TARGET_MIPS)
8016 struct target_sigaction act, oact, *pact, *old_act;
8018 if (arg2) {
8019 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8020 goto efault;
8021 act._sa_handler = old_act->_sa_handler;
8022 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8023 act.sa_flags = old_act->sa_flags;
8024 unlock_user_struct(old_act, arg2, 0);
8025 pact = &act;
8026 } else {
8027 pact = NULL;
8030 ret = get_errno(do_sigaction(arg1, pact, &oact));
8032 if (!is_error(ret) && arg3) {
8033 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8034 goto efault;
8035 old_act->_sa_handler = oact._sa_handler;
8036 old_act->sa_flags = oact.sa_flags;
8037 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8038 old_act->sa_mask.sig[1] = 0;
8039 old_act->sa_mask.sig[2] = 0;
8040 old_act->sa_mask.sig[3] = 0;
8041 unlock_user_struct(old_act, arg3, 1);
8043 #else
8044 struct target_old_sigaction *old_act;
8045 struct target_sigaction act, oact, *pact;
8046 if (arg2) {
8047 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8048 goto efault;
8049 act._sa_handler = old_act->_sa_handler;
8050 target_siginitset(&act.sa_mask, old_act->sa_mask);
8051 act.sa_flags = old_act->sa_flags;
8052 act.sa_restorer = old_act->sa_restorer;
8053 unlock_user_struct(old_act, arg2, 0);
8054 pact = &act;
8055 } else {
8056 pact = NULL;
8058 ret = get_errno(do_sigaction(arg1, pact, &oact));
8059 if (!is_error(ret) && arg3) {
8060 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8061 goto efault;
8062 old_act->_sa_handler = oact._sa_handler;
8063 old_act->sa_mask = oact.sa_mask.sig[0];
8064 old_act->sa_flags = oact.sa_flags;
8065 old_act->sa_restorer = oact.sa_restorer;
8066 unlock_user_struct(old_act, arg3, 1);
8068 #endif
8070 break;
8071 #endif
8072 case TARGET_NR_rt_sigaction:
8074 #if defined(TARGET_ALPHA)
8075 struct target_sigaction act, oact, *pact = 0;
8076 struct target_rt_sigaction *rt_act;
8078 if (arg4 != sizeof(target_sigset_t)) {
8079 ret = -TARGET_EINVAL;
8080 break;
8082 if (arg2) {
8083 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8084 goto efault;
8085 act._sa_handler = rt_act->_sa_handler;
8086 act.sa_mask = rt_act->sa_mask;
8087 act.sa_flags = rt_act->sa_flags;
8088 act.sa_restorer = arg5;
8089 unlock_user_struct(rt_act, arg2, 0);
8090 pact = &act;
8092 ret = get_errno(do_sigaction(arg1, pact, &oact));
8093 if (!is_error(ret) && arg3) {
8094 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8095 goto efault;
8096 rt_act->_sa_handler = oact._sa_handler;
8097 rt_act->sa_mask = oact.sa_mask;
8098 rt_act->sa_flags = oact.sa_flags;
8099 unlock_user_struct(rt_act, arg3, 1);
8101 #else
8102 struct target_sigaction *act;
8103 struct target_sigaction *oact;
8105 if (arg4 != sizeof(target_sigset_t)) {
8106 ret = -TARGET_EINVAL;
8107 break;
8109 if (arg2) {
8110 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8111 goto efault;
8112 } else
8113 act = NULL;
8114 if (arg3) {
8115 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8116 ret = -TARGET_EFAULT;
8117 goto rt_sigaction_fail;
8119 } else
8120 oact = NULL;
8121 ret = get_errno(do_sigaction(arg1, act, oact));
8122 rt_sigaction_fail:
8123 if (act)
8124 unlock_user_struct(act, arg2, 0);
8125 if (oact)
8126 unlock_user_struct(oact, arg3, 1);
8127 #endif
8129 break;
8130 #ifdef TARGET_NR_sgetmask /* not on alpha */
8131 case TARGET_NR_sgetmask:
8133 sigset_t cur_set;
8134 abi_ulong target_set;
8135 ret = do_sigprocmask(0, NULL, &cur_set);
8136 if (!ret) {
8137 host_to_target_old_sigset(&target_set, &cur_set);
8138 ret = target_set;
8141 break;
8142 #endif
8143 #ifdef TARGET_NR_ssetmask /* not on alpha */
8144 case TARGET_NR_ssetmask:
8146 sigset_t set, oset, cur_set;
8147 abi_ulong target_set = arg1;
8148 /* We only have one word of the new mask so we must read
8149 * the rest of it with do_sigprocmask() and OR in this word.
8150 * We are guaranteed that a do_sigprocmask() that only queries
8151 * the signal mask will not fail.
8153 ret = do_sigprocmask(0, NULL, &cur_set);
8154 assert(!ret);
8155 target_to_host_old_sigset(&set, &target_set);
8156 sigorset(&set, &set, &cur_set);
8157 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8158 if (!ret) {
8159 host_to_target_old_sigset(&target_set, &oset);
8160 ret = target_set;
8163 break;
8164 #endif
8165 #ifdef TARGET_NR_sigprocmask
8166 case TARGET_NR_sigprocmask:
8168 #if defined(TARGET_ALPHA)
8169 sigset_t set, oldset;
8170 abi_ulong mask;
8171 int how;
8173 switch (arg1) {
8174 case TARGET_SIG_BLOCK:
8175 how = SIG_BLOCK;
8176 break;
8177 case TARGET_SIG_UNBLOCK:
8178 how = SIG_UNBLOCK;
8179 break;
8180 case TARGET_SIG_SETMASK:
8181 how = SIG_SETMASK;
8182 break;
8183 default:
8184 ret = -TARGET_EINVAL;
8185 goto fail;
8187 mask = arg2;
8188 target_to_host_old_sigset(&set, &mask);
8190 ret = do_sigprocmask(how, &set, &oldset);
8191 if (!is_error(ret)) {
8192 host_to_target_old_sigset(&mask, &oldset);
8193 ret = mask;
8194 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8196 #else
8197 sigset_t set, oldset, *set_ptr;
8198 int how;
8200 if (arg2) {
8201 switch (arg1) {
8202 case TARGET_SIG_BLOCK:
8203 how = SIG_BLOCK;
8204 break;
8205 case TARGET_SIG_UNBLOCK:
8206 how = SIG_UNBLOCK;
8207 break;
8208 case TARGET_SIG_SETMASK:
8209 how = SIG_SETMASK;
8210 break;
8211 default:
8212 ret = -TARGET_EINVAL;
8213 goto fail;
8215 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8216 goto efault;
8217 target_to_host_old_sigset(&set, p);
8218 unlock_user(p, arg2, 0);
8219 set_ptr = &set;
8220 } else {
8221 how = 0;
8222 set_ptr = NULL;
8224 ret = do_sigprocmask(how, set_ptr, &oldset);
8225 if (!is_error(ret) && arg3) {
8226 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8227 goto efault;
8228 host_to_target_old_sigset(p, &oldset);
8229 unlock_user(p, arg3, sizeof(target_sigset_t));
8231 #endif
8233 break;
8234 #endif
8235 case TARGET_NR_rt_sigprocmask:
8237 int how = arg1;
8238 sigset_t set, oldset, *set_ptr;
8240 if (arg4 != sizeof(target_sigset_t)) {
8241 ret = -TARGET_EINVAL;
8242 break;
8245 if (arg2) {
8246 switch(how) {
8247 case TARGET_SIG_BLOCK:
8248 how = SIG_BLOCK;
8249 break;
8250 case TARGET_SIG_UNBLOCK:
8251 how = SIG_UNBLOCK;
8252 break;
8253 case TARGET_SIG_SETMASK:
8254 how = SIG_SETMASK;
8255 break;
8256 default:
8257 ret = -TARGET_EINVAL;
8258 goto fail;
8260 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8261 goto efault;
8262 target_to_host_sigset(&set, p);
8263 unlock_user(p, arg2, 0);
8264 set_ptr = &set;
8265 } else {
8266 how = 0;
8267 set_ptr = NULL;
8269 ret = do_sigprocmask(how, set_ptr, &oldset);
8270 if (!is_error(ret) && arg3) {
8271 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8272 goto efault;
8273 host_to_target_sigset(p, &oldset);
8274 unlock_user(p, arg3, sizeof(target_sigset_t));
8277 break;
8278 #ifdef TARGET_NR_sigpending
8279 case TARGET_NR_sigpending:
8281 sigset_t set;
8282 ret = get_errno(sigpending(&set));
8283 if (!is_error(ret)) {
8284 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8285 goto efault;
8286 host_to_target_old_sigset(p, &set);
8287 unlock_user(p, arg1, sizeof(target_sigset_t));
8290 break;
8291 #endif
8292 case TARGET_NR_rt_sigpending:
8294 sigset_t set;
8296 /* Yes, this check is >, not != like most. We follow the kernel's
8297 * logic and it does it like this because it implements
8298 * NR_sigpending through the same code path, and in that case
8299 * the old_sigset_t is smaller in size.
8301 if (arg2 > sizeof(target_sigset_t)) {
8302 ret = -TARGET_EINVAL;
8303 break;
8306 ret = get_errno(sigpending(&set));
8307 if (!is_error(ret)) {
8308 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8309 goto efault;
8310 host_to_target_sigset(p, &set);
8311 unlock_user(p, arg1, sizeof(target_sigset_t));
8314 break;
8315 #ifdef TARGET_NR_sigsuspend
8316 case TARGET_NR_sigsuspend:
8318 TaskState *ts = cpu->opaque;
8319 #if defined(TARGET_ALPHA)
8320 abi_ulong mask = arg1;
8321 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8322 #else
8323 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8324 goto efault;
8325 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8326 unlock_user(p, arg1, 0);
8327 #endif
8328 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8329 SIGSET_T_SIZE));
8330 if (ret != -TARGET_ERESTARTSYS) {
8331 ts->in_sigsuspend = 1;
8334 break;
8335 #endif
8336 case TARGET_NR_rt_sigsuspend:
8338 TaskState *ts = cpu->opaque;
8340 if (arg2 != sizeof(target_sigset_t)) {
8341 ret = -TARGET_EINVAL;
8342 break;
8344 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8345 goto efault;
8346 target_to_host_sigset(&ts->sigsuspend_mask, p);
8347 unlock_user(p, arg1, 0);
8348 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8349 SIGSET_T_SIZE));
8350 if (ret != -TARGET_ERESTARTSYS) {
8351 ts->in_sigsuspend = 1;
8354 break;
8355 case TARGET_NR_rt_sigtimedwait:
8357 sigset_t set;
8358 struct timespec uts, *puts;
8359 siginfo_t uinfo;
8361 if (arg4 != sizeof(target_sigset_t)) {
8362 ret = -TARGET_EINVAL;
8363 break;
8366 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8367 goto efault;
8368 target_to_host_sigset(&set, p);
8369 unlock_user(p, arg1, 0);
8370 if (arg3) {
8371 puts = &uts;
8372 target_to_host_timespec(puts, arg3);
8373 } else {
8374 puts = NULL;
8376 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8377 SIGSET_T_SIZE));
8378 if (!is_error(ret)) {
8379 if (arg2) {
8380 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8382 if (!p) {
8383 goto efault;
8385 host_to_target_siginfo(p, &uinfo);
8386 unlock_user(p, arg2, sizeof(target_siginfo_t));
8388 ret = host_to_target_signal(ret);
8391 break;
8392 case TARGET_NR_rt_sigqueueinfo:
8394 siginfo_t uinfo;
8396 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8397 if (!p) {
8398 goto efault;
8400 target_to_host_siginfo(&uinfo, p);
8401 unlock_user(p, arg1, 0);
8402 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8404 break;
8405 #ifdef TARGET_NR_sigreturn
8406 case TARGET_NR_sigreturn:
8407 if (block_signals()) {
8408 ret = -TARGET_ERESTARTSYS;
8409 } else {
8410 ret = do_sigreturn(cpu_env);
8412 break;
8413 #endif
8414 case TARGET_NR_rt_sigreturn:
8415 if (block_signals()) {
8416 ret = -TARGET_ERESTARTSYS;
8417 } else {
8418 ret = do_rt_sigreturn(cpu_env);
8420 break;
8421 case TARGET_NR_sethostname:
8422 if (!(p = lock_user_string(arg1)))
8423 goto efault;
8424 ret = get_errno(sethostname(p, arg2));
8425 unlock_user(p, arg1, 0);
8426 break;
8427 case TARGET_NR_setrlimit:
8429 int resource = target_to_host_resource(arg1);
8430 struct target_rlimit *target_rlim;
8431 struct rlimit rlim;
8432 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8433 goto efault;
8434 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8435 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8436 unlock_user_struct(target_rlim, arg2, 0);
8437 ret = get_errno(setrlimit(resource, &rlim));
8439 break;
8440 case TARGET_NR_getrlimit:
8442 int resource = target_to_host_resource(arg1);
8443 struct target_rlimit *target_rlim;
8444 struct rlimit rlim;
8446 ret = get_errno(getrlimit(resource, &rlim));
8447 if (!is_error(ret)) {
8448 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8449 goto efault;
8450 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8451 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8452 unlock_user_struct(target_rlim, arg2, 1);
8455 break;
8456 case TARGET_NR_getrusage:
8458 struct rusage rusage;
8459 ret = get_errno(getrusage(arg1, &rusage));
8460 if (!is_error(ret)) {
8461 ret = host_to_target_rusage(arg2, &rusage);
8464 break;
8465 case TARGET_NR_gettimeofday:
8467 struct timeval tv;
8468 ret = get_errno(gettimeofday(&tv, NULL));
8469 if (!is_error(ret)) {
8470 if (copy_to_user_timeval(arg1, &tv))
8471 goto efault;
8474 break;
8475 case TARGET_NR_settimeofday:
8477 struct timeval tv, *ptv = NULL;
8478 struct timezone tz, *ptz = NULL;
8480 if (arg1) {
8481 if (copy_from_user_timeval(&tv, arg1)) {
8482 goto efault;
8484 ptv = &tv;
8487 if (arg2) {
8488 if (copy_from_user_timezone(&tz, arg2)) {
8489 goto efault;
8491 ptz = &tz;
8494 ret = get_errno(settimeofday(ptv, ptz));
8496 break;
8497 #if defined(TARGET_NR_select)
8498 case TARGET_NR_select:
8499 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8500 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8501 #else
8503 struct target_sel_arg_struct *sel;
8504 abi_ulong inp, outp, exp, tvp;
8505 long nsel;
8507 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8508 goto efault;
8509 nsel = tswapal(sel->n);
8510 inp = tswapal(sel->inp);
8511 outp = tswapal(sel->outp);
8512 exp = tswapal(sel->exp);
8513 tvp = tswapal(sel->tvp);
8514 unlock_user_struct(sel, arg1, 0);
8515 ret = do_select(nsel, inp, outp, exp, tvp);
8517 #endif
8518 break;
8519 #endif
8520 #ifdef TARGET_NR_pselect6
8521 case TARGET_NR_pselect6:
8523 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8524 fd_set rfds, wfds, efds;
8525 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8526 struct timespec ts, *ts_ptr;
8529 * The 6th arg is actually two args smashed together,
8530 * so we cannot use the C library.
8532 sigset_t set;
8533 struct {
8534 sigset_t *set;
8535 size_t size;
8536 } sig, *sig_ptr;
8538 abi_ulong arg_sigset, arg_sigsize, *arg7;
8539 target_sigset_t *target_sigset;
8541 n = arg1;
8542 rfd_addr = arg2;
8543 wfd_addr = arg3;
8544 efd_addr = arg4;
8545 ts_addr = arg5;
8547 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8548 if (ret) {
8549 goto fail;
8551 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8552 if (ret) {
8553 goto fail;
8555 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8556 if (ret) {
8557 goto fail;
8561 * This takes a timespec, and not a timeval, so we cannot
8562 * use the do_select() helper ...
8564 if (ts_addr) {
8565 if (target_to_host_timespec(&ts, ts_addr)) {
8566 goto efault;
8568 ts_ptr = &ts;
8569 } else {
8570 ts_ptr = NULL;
8573 /* Extract the two packed args for the sigset */
8574 if (arg6) {
8575 sig_ptr = &sig;
8576 sig.size = SIGSET_T_SIZE;
8578 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8579 if (!arg7) {
8580 goto efault;
8582 arg_sigset = tswapal(arg7[0]);
8583 arg_sigsize = tswapal(arg7[1]);
8584 unlock_user(arg7, arg6, 0);
8586 if (arg_sigset) {
8587 sig.set = &set;
8588 if (arg_sigsize != sizeof(*target_sigset)) {
8589 /* Like the kernel, we enforce correct size sigsets */
8590 ret = -TARGET_EINVAL;
8591 goto fail;
8593 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8594 sizeof(*target_sigset), 1);
8595 if (!target_sigset) {
8596 goto efault;
8598 target_to_host_sigset(&set, target_sigset);
8599 unlock_user(target_sigset, arg_sigset, 0);
8600 } else {
8601 sig.set = NULL;
8603 } else {
8604 sig_ptr = NULL;
8607 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8608 ts_ptr, sig_ptr));
8610 if (!is_error(ret)) {
8611 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8612 goto efault;
8613 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8614 goto efault;
8615 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8616 goto efault;
8618 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8619 goto efault;
8622 break;
8623 #endif
8624 #ifdef TARGET_NR_symlink
8625 case TARGET_NR_symlink:
8627 void *p2;
8628 p = lock_user_string(arg1);
8629 p2 = lock_user_string(arg2);
8630 if (!p || !p2)
8631 ret = -TARGET_EFAULT;
8632 else
8633 ret = get_errno(symlink(p, p2));
8634 unlock_user(p2, arg2, 0);
8635 unlock_user(p, arg1, 0);
8637 break;
8638 #endif
8639 #if defined(TARGET_NR_symlinkat)
8640 case TARGET_NR_symlinkat:
8642 void *p2;
8643 p = lock_user_string(arg1);
8644 p2 = lock_user_string(arg3);
8645 if (!p || !p2)
8646 ret = -TARGET_EFAULT;
8647 else
8648 ret = get_errno(symlinkat(p, arg2, p2));
8649 unlock_user(p2, arg3, 0);
8650 unlock_user(p, arg1, 0);
8652 break;
8653 #endif
8654 #ifdef TARGET_NR_oldlstat
8655 case TARGET_NR_oldlstat:
8656 goto unimplemented;
8657 #endif
8658 #ifdef TARGET_NR_readlink
8659 case TARGET_NR_readlink:
8661 void *p2;
8662 p = lock_user_string(arg1);
8663 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8664 if (!p || !p2) {
8665 ret = -TARGET_EFAULT;
8666 } else if (!arg3) {
8667 /* Short circuit this for the magic exe check. */
8668 ret = -TARGET_EINVAL;
8669 } else if (is_proc_myself((const char *)p, "exe")) {
8670 char real[PATH_MAX], *temp;
8671 temp = realpath(exec_path, real);
8672 /* Return value is # of bytes that we wrote to the buffer. */
8673 if (temp == NULL) {
8674 ret = get_errno(-1);
8675 } else {
8676 /* Don't worry about sign mismatch as earlier mapping
8677 * logic would have thrown a bad address error. */
8678 ret = MIN(strlen(real), arg3);
8679 /* We cannot NUL terminate the string. */
8680 memcpy(p2, real, ret);
8682 } else {
8683 ret = get_errno(readlink(path(p), p2, arg3));
8685 unlock_user(p2, arg2, ret);
8686 unlock_user(p, arg1, 0);
8688 break;
8689 #endif
8690 #if defined(TARGET_NR_readlinkat)
8691 case TARGET_NR_readlinkat:
8693 void *p2;
8694 p = lock_user_string(arg2);
8695 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8696 if (!p || !p2) {
8697 ret = -TARGET_EFAULT;
8698 } else if (is_proc_myself((const char *)p, "exe")) {
8699 char real[PATH_MAX], *temp;
8700 temp = realpath(exec_path, real);
8701 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8702 snprintf((char *)p2, arg4, "%s", real);
8703 } else {
8704 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8706 unlock_user(p2, arg3, ret);
8707 unlock_user(p, arg2, 0);
8709 break;
8710 #endif
8711 #ifdef TARGET_NR_uselib
8712 case TARGET_NR_uselib:
8713 goto unimplemented;
8714 #endif
8715 #ifdef TARGET_NR_swapon
8716 case TARGET_NR_swapon:
8717 if (!(p = lock_user_string(arg1)))
8718 goto efault;
8719 ret = get_errno(swapon(p, arg2));
8720 unlock_user(p, arg1, 0);
8721 break;
8722 #endif
8723 case TARGET_NR_reboot:
8724 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8725 /* arg4 must be ignored in all other cases */
8726 p = lock_user_string(arg4);
8727 if (!p) {
8728 goto efault;
8730 ret = get_errno(reboot(arg1, arg2, arg3, p));
8731 unlock_user(p, arg4, 0);
8732 } else {
8733 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8735 break;
8736 #ifdef TARGET_NR_readdir
8737 case TARGET_NR_readdir:
8738 goto unimplemented;
8739 #endif
8740 #ifdef TARGET_NR_mmap
8741 case TARGET_NR_mmap:
8742 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8743 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8744 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8745 || defined(TARGET_S390X)
8747 abi_ulong *v;
8748 abi_ulong v1, v2, v3, v4, v5, v6;
8749 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8750 goto efault;
8751 v1 = tswapal(v[0]);
8752 v2 = tswapal(v[1]);
8753 v3 = tswapal(v[2]);
8754 v4 = tswapal(v[3]);
8755 v5 = tswapal(v[4]);
8756 v6 = tswapal(v[5]);
8757 unlock_user(v, arg1, 0);
8758 ret = get_errno(target_mmap(v1, v2, v3,
8759 target_to_host_bitmask(v4, mmap_flags_tbl),
8760 v5, v6));
8762 #else
8763 ret = get_errno(target_mmap(arg1, arg2, arg3,
8764 target_to_host_bitmask(arg4, mmap_flags_tbl),
8765 arg5,
8766 arg6));
8767 #endif
8768 break;
8769 #endif
8770 #ifdef TARGET_NR_mmap2
8771 case TARGET_NR_mmap2:
8772 #ifndef MMAP_SHIFT
8773 #define MMAP_SHIFT 12
8774 #endif
8775 ret = get_errno(target_mmap(arg1, arg2, arg3,
8776 target_to_host_bitmask(arg4, mmap_flags_tbl),
8777 arg5,
8778 arg6 << MMAP_SHIFT));
8779 break;
8780 #endif
8781 case TARGET_NR_munmap:
8782 ret = get_errno(target_munmap(arg1, arg2));
8783 break;
8784 case TARGET_NR_mprotect:
8786 TaskState *ts = cpu->opaque;
8787 /* Special hack to detect libc making the stack executable. */
8788 if ((arg3 & PROT_GROWSDOWN)
8789 && arg1 >= ts->info->stack_limit
8790 && arg1 <= ts->info->start_stack) {
8791 arg3 &= ~PROT_GROWSDOWN;
8792 arg2 = arg2 + arg1 - ts->info->stack_limit;
8793 arg1 = ts->info->stack_limit;
8796 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8797 break;
8798 #ifdef TARGET_NR_mremap
8799 case TARGET_NR_mremap:
8800 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8801 break;
8802 #endif
8803 /* ??? msync/mlock/munlock are broken for softmmu. */
8804 #ifdef TARGET_NR_msync
8805 case TARGET_NR_msync:
8806 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8807 break;
8808 #endif
8809 #ifdef TARGET_NR_mlock
8810 case TARGET_NR_mlock:
8811 ret = get_errno(mlock(g2h(arg1), arg2));
8812 break;
8813 #endif
8814 #ifdef TARGET_NR_munlock
8815 case TARGET_NR_munlock:
8816 ret = get_errno(munlock(g2h(arg1), arg2));
8817 break;
8818 #endif
8819 #ifdef TARGET_NR_mlockall
8820 case TARGET_NR_mlockall:
8821 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8822 break;
8823 #endif
8824 #ifdef TARGET_NR_munlockall
8825 case TARGET_NR_munlockall:
8826 ret = get_errno(munlockall());
8827 break;
8828 #endif
8829 case TARGET_NR_truncate:
8830 if (!(p = lock_user_string(arg1)))
8831 goto efault;
8832 ret = get_errno(truncate(p, arg2));
8833 unlock_user(p, arg1, 0);
8834 break;
8835 case TARGET_NR_ftruncate:
8836 ret = get_errno(ftruncate(arg1, arg2));
8837 break;
8838 case TARGET_NR_fchmod:
8839 ret = get_errno(fchmod(arg1, arg2));
8840 break;
8841 #if defined(TARGET_NR_fchmodat)
8842 case TARGET_NR_fchmodat:
8843 if (!(p = lock_user_string(arg2)))
8844 goto efault;
8845 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8846 unlock_user(p, arg2, 0);
8847 break;
8848 #endif
8849 case TARGET_NR_getpriority:
8850 /* Note that negative values are valid for getpriority, so we must
8851 differentiate based on errno settings. */
8852 errno = 0;
8853 ret = getpriority(arg1, arg2);
8854 if (ret == -1 && errno != 0) {
8855 ret = -host_to_target_errno(errno);
8856 break;
8858 #ifdef TARGET_ALPHA
8859 /* Return value is the unbiased priority. Signal no error. */
8860 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8861 #else
8862 /* Return value is a biased priority to avoid negative numbers. */
8863 ret = 20 - ret;
8864 #endif
8865 break;
8866 case TARGET_NR_setpriority:
8867 ret = get_errno(setpriority(arg1, arg2, arg3));
8868 break;
8869 #ifdef TARGET_NR_profil
8870 case TARGET_NR_profil:
8871 goto unimplemented;
8872 #endif
8873 case TARGET_NR_statfs:
8874 if (!(p = lock_user_string(arg1)))
8875 goto efault;
8876 ret = get_errno(statfs(path(p), &stfs));
8877 unlock_user(p, arg1, 0);
8878 convert_statfs:
8879 if (!is_error(ret)) {
8880 struct target_statfs *target_stfs;
8882 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8883 goto efault;
8884 __put_user(stfs.f_type, &target_stfs->f_type);
8885 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8886 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8887 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8888 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8889 __put_user(stfs.f_files, &target_stfs->f_files);
8890 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8891 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8892 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8893 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8894 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8895 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8896 unlock_user_struct(target_stfs, arg2, 1);
8898 break;
8899 case TARGET_NR_fstatfs:
8900 ret = get_errno(fstatfs(arg1, &stfs));
8901 goto convert_statfs;
8902 #ifdef TARGET_NR_statfs64
8903 case TARGET_NR_statfs64:
8904 if (!(p = lock_user_string(arg1)))
8905 goto efault;
8906 ret = get_errno(statfs(path(p), &stfs));
8907 unlock_user(p, arg1, 0);
8908 convert_statfs64:
8909 if (!is_error(ret)) {
8910 struct target_statfs64 *target_stfs;
8912 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8913 goto efault;
8914 __put_user(stfs.f_type, &target_stfs->f_type);
8915 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8916 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8917 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8918 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8919 __put_user(stfs.f_files, &target_stfs->f_files);
8920 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8921 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8922 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8923 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8924 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8925 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8926 unlock_user_struct(target_stfs, arg3, 1);
8928 break;
8929 case TARGET_NR_fstatfs64:
8930 ret = get_errno(fstatfs(arg1, &stfs));
8931 goto convert_statfs64;
8932 #endif
8933 #ifdef TARGET_NR_ioperm
8934 case TARGET_NR_ioperm:
8935 goto unimplemented;
8936 #endif
8937 #ifdef TARGET_NR_socketcall
8938 case TARGET_NR_socketcall:
8939 ret = do_socketcall(arg1, arg2);
8940 break;
8941 #endif
8942 #ifdef TARGET_NR_accept
8943 case TARGET_NR_accept:
8944 ret = do_accept4(arg1, arg2, arg3, 0);
8945 break;
8946 #endif
8947 #ifdef TARGET_NR_accept4
8948 case TARGET_NR_accept4:
8949 ret = do_accept4(arg1, arg2, arg3, arg4);
8950 break;
8951 #endif
8952 #ifdef TARGET_NR_bind
8953 case TARGET_NR_bind:
8954 ret = do_bind(arg1, arg2, arg3);
8955 break;
8956 #endif
8957 #ifdef TARGET_NR_connect
8958 case TARGET_NR_connect:
8959 ret = do_connect(arg1, arg2, arg3);
8960 break;
8961 #endif
8962 #ifdef TARGET_NR_getpeername
8963 case TARGET_NR_getpeername:
8964 ret = do_getpeername(arg1, arg2, arg3);
8965 break;
8966 #endif
8967 #ifdef TARGET_NR_getsockname
8968 case TARGET_NR_getsockname:
8969 ret = do_getsockname(arg1, arg2, arg3);
8970 break;
8971 #endif
8972 #ifdef TARGET_NR_getsockopt
8973 case TARGET_NR_getsockopt:
8974 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8975 break;
8976 #endif
8977 #ifdef TARGET_NR_listen
8978 case TARGET_NR_listen:
8979 ret = get_errno(listen(arg1, arg2));
8980 break;
8981 #endif
8982 #ifdef TARGET_NR_recv
8983 case TARGET_NR_recv:
8984 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8985 break;
8986 #endif
8987 #ifdef TARGET_NR_recvfrom
8988 case TARGET_NR_recvfrom:
8989 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8990 break;
8991 #endif
8992 #ifdef TARGET_NR_recvmsg
8993 case TARGET_NR_recvmsg:
8994 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8995 break;
8996 #endif
8997 #ifdef TARGET_NR_send
8998 case TARGET_NR_send:
8999 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9000 break;
9001 #endif
9002 #ifdef TARGET_NR_sendmsg
9003 case TARGET_NR_sendmsg:
9004 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9005 break;
9006 #endif
9007 #ifdef TARGET_NR_sendmmsg
9008 case TARGET_NR_sendmmsg:
9009 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9010 break;
9011 case TARGET_NR_recvmmsg:
9012 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9013 break;
9014 #endif
9015 #ifdef TARGET_NR_sendto
9016 case TARGET_NR_sendto:
9017 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9018 break;
9019 #endif
9020 #ifdef TARGET_NR_shutdown
9021 case TARGET_NR_shutdown:
9022 ret = get_errno(shutdown(arg1, arg2));
9023 break;
9024 #endif
9025 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9026 case TARGET_NR_getrandom:
9027 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9028 if (!p) {
9029 goto efault;
9031 ret = get_errno(getrandom(p, arg2, arg3));
9032 unlock_user(p, arg1, ret);
9033 break;
9034 #endif
9035 #ifdef TARGET_NR_socket
9036 case TARGET_NR_socket:
9037 ret = do_socket(arg1, arg2, arg3);
9038 fd_trans_unregister(ret);
9039 break;
9040 #endif
9041 #ifdef TARGET_NR_socketpair
9042 case TARGET_NR_socketpair:
9043 ret = do_socketpair(arg1, arg2, arg3, arg4);
9044 break;
9045 #endif
9046 #ifdef TARGET_NR_setsockopt
9047 case TARGET_NR_setsockopt:
9048 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9049 break;
9050 #endif
9052 case TARGET_NR_syslog:
9053 if (!(p = lock_user_string(arg2)))
9054 goto efault;
9055 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9056 unlock_user(p, arg2, 0);
9057 break;
9059 case TARGET_NR_setitimer:
9061 struct itimerval value, ovalue, *pvalue;
9063 if (arg2) {
9064 pvalue = &value;
9065 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9066 || copy_from_user_timeval(&pvalue->it_value,
9067 arg2 + sizeof(struct target_timeval)))
9068 goto efault;
9069 } else {
9070 pvalue = NULL;
9072 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9073 if (!is_error(ret) && arg3) {
9074 if (copy_to_user_timeval(arg3,
9075 &ovalue.it_interval)
9076 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9077 &ovalue.it_value))
9078 goto efault;
9081 break;
9082 case TARGET_NR_getitimer:
9084 struct itimerval value;
9086 ret = get_errno(getitimer(arg1, &value));
9087 if (!is_error(ret) && arg2) {
9088 if (copy_to_user_timeval(arg2,
9089 &value.it_interval)
9090 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9091 &value.it_value))
9092 goto efault;
9095 break;
9096 #ifdef TARGET_NR_stat
9097 case TARGET_NR_stat:
9098 if (!(p = lock_user_string(arg1)))
9099 goto efault;
9100 ret = get_errno(stat(path(p), &st));
9101 unlock_user(p, arg1, 0);
9102 goto do_stat;
9103 #endif
9104 #ifdef TARGET_NR_lstat
9105 case TARGET_NR_lstat:
9106 if (!(p = lock_user_string(arg1)))
9107 goto efault;
9108 ret = get_errno(lstat(path(p), &st));
9109 unlock_user(p, arg1, 0);
9110 goto do_stat;
9111 #endif
9112 case TARGET_NR_fstat:
9114 ret = get_errno(fstat(arg1, &st));
9115 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9116 do_stat:
9117 #endif
9118 if (!is_error(ret)) {
9119 struct target_stat *target_st;
9121 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9122 goto efault;
9123 memset(target_st, 0, sizeof(*target_st));
9124 __put_user(st.st_dev, &target_st->st_dev);
9125 __put_user(st.st_ino, &target_st->st_ino);
9126 __put_user(st.st_mode, &target_st->st_mode);
9127 __put_user(st.st_uid, &target_st->st_uid);
9128 __put_user(st.st_gid, &target_st->st_gid);
9129 __put_user(st.st_nlink, &target_st->st_nlink);
9130 __put_user(st.st_rdev, &target_st->st_rdev);
9131 __put_user(st.st_size, &target_st->st_size);
9132 __put_user(st.st_blksize, &target_st->st_blksize);
9133 __put_user(st.st_blocks, &target_st->st_blocks);
9134 __put_user(st.st_atime, &target_st->target_st_atime);
9135 __put_user(st.st_mtime, &target_st->target_st_mtime);
9136 __put_user(st.st_ctime, &target_st->target_st_ctime);
9137 unlock_user_struct(target_st, arg2, 1);
9140 break;
9141 #ifdef TARGET_NR_olduname
9142 case TARGET_NR_olduname:
9143 goto unimplemented;
9144 #endif
9145 #ifdef TARGET_NR_iopl
9146 case TARGET_NR_iopl:
9147 goto unimplemented;
9148 #endif
9149 case TARGET_NR_vhangup:
9150 ret = get_errno(vhangup());
9151 break;
9152 #ifdef TARGET_NR_idle
9153 case TARGET_NR_idle:
9154 goto unimplemented;
9155 #endif
9156 #ifdef TARGET_NR_syscall
9157 case TARGET_NR_syscall:
9158 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9159 arg6, arg7, arg8, 0);
9160 break;
9161 #endif
9162 case TARGET_NR_wait4:
9164 int status;
9165 abi_long status_ptr = arg2;
9166 struct rusage rusage, *rusage_ptr;
9167 abi_ulong target_rusage = arg4;
9168 abi_long rusage_err;
9169 if (target_rusage)
9170 rusage_ptr = &rusage;
9171 else
9172 rusage_ptr = NULL;
9173 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9174 if (!is_error(ret)) {
9175 if (status_ptr && ret) {
9176 status = host_to_target_waitstatus(status);
9177 if (put_user_s32(status, status_ptr))
9178 goto efault;
9180 if (target_rusage) {
9181 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9182 if (rusage_err) {
9183 ret = rusage_err;
9188 break;
9189 #ifdef TARGET_NR_swapoff
9190 case TARGET_NR_swapoff:
9191 if (!(p = lock_user_string(arg1)))
9192 goto efault;
9193 ret = get_errno(swapoff(p));
9194 unlock_user(p, arg1, 0);
9195 break;
9196 #endif
9197 case TARGET_NR_sysinfo:
9199 struct target_sysinfo *target_value;
9200 struct sysinfo value;
9201 ret = get_errno(sysinfo(&value));
9202 if (!is_error(ret) && arg1)
9204 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9205 goto efault;
9206 __put_user(value.uptime, &target_value->uptime);
9207 __put_user(value.loads[0], &target_value->loads[0]);
9208 __put_user(value.loads[1], &target_value->loads[1]);
9209 __put_user(value.loads[2], &target_value->loads[2]);
9210 __put_user(value.totalram, &target_value->totalram);
9211 __put_user(value.freeram, &target_value->freeram);
9212 __put_user(value.sharedram, &target_value->sharedram);
9213 __put_user(value.bufferram, &target_value->bufferram);
9214 __put_user(value.totalswap, &target_value->totalswap);
9215 __put_user(value.freeswap, &target_value->freeswap);
9216 __put_user(value.procs, &target_value->procs);
9217 __put_user(value.totalhigh, &target_value->totalhigh);
9218 __put_user(value.freehigh, &target_value->freehigh);
9219 __put_user(value.mem_unit, &target_value->mem_unit);
9220 unlock_user_struct(target_value, arg1, 1);
9223 break;
9224 #ifdef TARGET_NR_ipc
9225 case TARGET_NR_ipc:
9226 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
9227 break;
9228 #endif
9229 #ifdef TARGET_NR_semget
9230 case TARGET_NR_semget:
9231 ret = get_errno(semget(arg1, arg2, arg3));
9232 break;
9233 #endif
9234 #ifdef TARGET_NR_semop
9235 case TARGET_NR_semop:
9236 ret = do_semop(arg1, arg2, arg3);
9237 break;
9238 #endif
9239 #ifdef TARGET_NR_semctl
9240 case TARGET_NR_semctl:
9241 ret = do_semctl(arg1, arg2, arg3, arg4);
9242 break;
9243 #endif
9244 #ifdef TARGET_NR_msgctl
9245 case TARGET_NR_msgctl:
9246 ret = do_msgctl(arg1, arg2, arg3);
9247 break;
9248 #endif
9249 #ifdef TARGET_NR_msgget
9250 case TARGET_NR_msgget:
9251 ret = get_errno(msgget(arg1, arg2));
9252 break;
9253 #endif
9254 #ifdef TARGET_NR_msgrcv
9255 case TARGET_NR_msgrcv:
9256 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9257 break;
9258 #endif
9259 #ifdef TARGET_NR_msgsnd
9260 case TARGET_NR_msgsnd:
9261 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9262 break;
9263 #endif
9264 #ifdef TARGET_NR_shmget
9265 case TARGET_NR_shmget:
9266 ret = get_errno(shmget(arg1, arg2, arg3));
9267 break;
9268 #endif
9269 #ifdef TARGET_NR_shmctl
9270 case TARGET_NR_shmctl:
9271 ret = do_shmctl(arg1, arg2, arg3);
9272 break;
9273 #endif
9274 #ifdef TARGET_NR_shmat
9275 case TARGET_NR_shmat:
9276 ret = do_shmat(arg1, arg2, arg3);
9277 break;
9278 #endif
9279 #ifdef TARGET_NR_shmdt
9280 case TARGET_NR_shmdt:
9281 ret = do_shmdt(arg1);
9282 break;
9283 #endif
9284 case TARGET_NR_fsync:
9285 ret = get_errno(fsync(arg1));
9286 break;
9287 case TARGET_NR_clone:
9288 /* Linux manages to have three different orderings for its
9289 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9290 * match the kernel's CONFIG_CLONE_* settings.
9291 * Microblaze is further special in that it uses a sixth
9292 * implicit argument to clone for the TLS pointer.
9294 #if defined(TARGET_MICROBLAZE)
9295 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9296 #elif defined(TARGET_CLONE_BACKWARDS)
9297 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9298 #elif defined(TARGET_CLONE_BACKWARDS2)
9299 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9300 #else
9301 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9302 #endif
9303 break;
9304 #ifdef __NR_exit_group
9305 /* new thread calls */
9306 case TARGET_NR_exit_group:
9307 #ifdef TARGET_GPROF
9308 _mcleanup();
9309 #endif
9310 gdb_exit(cpu_env, arg1);
9311 ret = get_errno(exit_group(arg1));
9312 break;
9313 #endif
9314 case TARGET_NR_setdomainname:
9315 if (!(p = lock_user_string(arg1)))
9316 goto efault;
9317 ret = get_errno(setdomainname(p, arg2));
9318 unlock_user(p, arg1, 0);
9319 break;
9320 case TARGET_NR_uname:
9321 /* no need to transcode because we use the linux syscall */
9323 struct new_utsname * buf;
9325 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9326 goto efault;
9327 ret = get_errno(sys_uname(buf));
9328 if (!is_error(ret)) {
9329 /* Overwrite the native machine name with whatever is being
9330 emulated. */
9331 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9332 /* Allow the user to override the reported release. */
9333 if (qemu_uname_release && *qemu_uname_release) {
9334 g_strlcpy(buf->release, qemu_uname_release,
9335 sizeof(buf->release));
9338 unlock_user_struct(buf, arg1, 1);
9340 break;
9341 #ifdef TARGET_I386
9342 case TARGET_NR_modify_ldt:
9343 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9344 break;
9345 #if !defined(TARGET_X86_64)
9346 case TARGET_NR_vm86old:
9347 goto unimplemented;
9348 case TARGET_NR_vm86:
9349 ret = do_vm86(cpu_env, arg1, arg2);
9350 break;
9351 #endif
9352 #endif
9353 case TARGET_NR_adjtimex:
9354 goto unimplemented;
9355 #ifdef TARGET_NR_create_module
9356 case TARGET_NR_create_module:
9357 #endif
9358 case TARGET_NR_init_module:
9359 case TARGET_NR_delete_module:
9360 #ifdef TARGET_NR_get_kernel_syms
9361 case TARGET_NR_get_kernel_syms:
9362 #endif
9363 goto unimplemented;
9364 case TARGET_NR_quotactl:
9365 goto unimplemented;
9366 case TARGET_NR_getpgid:
9367 ret = get_errno(getpgid(arg1));
9368 break;
9369 case TARGET_NR_fchdir:
9370 ret = get_errno(fchdir(arg1));
9371 break;
9372 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9373 case TARGET_NR_bdflush:
9374 goto unimplemented;
9375 #endif
9376 #ifdef TARGET_NR_sysfs
9377 case TARGET_NR_sysfs:
9378 goto unimplemented;
9379 #endif
9380 case TARGET_NR_personality:
9381 ret = get_errno(personality(arg1));
9382 break;
9383 #ifdef TARGET_NR_afs_syscall
9384 case TARGET_NR_afs_syscall:
9385 goto unimplemented;
9386 #endif
9387 #ifdef TARGET_NR__llseek /* Not on alpha */
9388 case TARGET_NR__llseek:
9390 int64_t res;
9391 #if !defined(__NR_llseek)
9392 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
9393 if (res == -1) {
9394 ret = get_errno(res);
9395 } else {
9396 ret = 0;
9398 #else
9399 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9400 #endif
9401 if ((ret == 0) && put_user_s64(res, arg4)) {
9402 goto efault;
9405 break;
9406 #endif
9407 #ifdef TARGET_NR_getdents
9408 case TARGET_NR_getdents:
9409 #ifdef __NR_getdents
9410 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9412 struct target_dirent *target_dirp;
9413 struct linux_dirent *dirp;
9414 abi_long count = arg3;
9416 dirp = g_try_malloc(count);
9417 if (!dirp) {
9418 ret = -TARGET_ENOMEM;
9419 goto fail;
9422 ret = get_errno(sys_getdents(arg1, dirp, count));
9423 if (!is_error(ret)) {
9424 struct linux_dirent *de;
9425 struct target_dirent *tde;
9426 int len = ret;
9427 int reclen, treclen;
9428 int count1, tnamelen;
9430 count1 = 0;
9431 de = dirp;
9432 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9433 goto efault;
9434 tde = target_dirp;
9435 while (len > 0) {
9436 reclen = de->d_reclen;
9437 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9438 assert(tnamelen >= 0);
9439 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9440 assert(count1 + treclen <= count);
9441 tde->d_reclen = tswap16(treclen);
9442 tde->d_ino = tswapal(de->d_ino);
9443 tde->d_off = tswapal(de->d_off);
9444 memcpy(tde->d_name, de->d_name, tnamelen);
9445 de = (struct linux_dirent *)((char *)de + reclen);
9446 len -= reclen;
9447 tde = (struct target_dirent *)((char *)tde + treclen);
9448 count1 += treclen;
9450 ret = count1;
9451 unlock_user(target_dirp, arg2, ret);
9453 g_free(dirp);
9455 #else
9457 struct linux_dirent *dirp;
9458 abi_long count = arg3;
9460 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9461 goto efault;
9462 ret = get_errno(sys_getdents(arg1, dirp, count));
9463 if (!is_error(ret)) {
9464 struct linux_dirent *de;
9465 int len = ret;
9466 int reclen;
9467 de = dirp;
9468 while (len > 0) {
9469 reclen = de->d_reclen;
9470 if (reclen > len)
9471 break;
9472 de->d_reclen = tswap16(reclen);
9473 tswapls(&de->d_ino);
9474 tswapls(&de->d_off);
9475 de = (struct linux_dirent *)((char *)de + reclen);
9476 len -= reclen;
9479 unlock_user(dirp, arg2, ret);
9481 #endif
9482 #else
9483 /* Implement getdents in terms of getdents64 */
9485 struct linux_dirent64 *dirp;
9486 abi_long count = arg3;
9488 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9489 if (!dirp) {
9490 goto efault;
9492 ret = get_errno(sys_getdents64(arg1, dirp, count));
9493 if (!is_error(ret)) {
9494 /* Convert the dirent64 structs to target dirent. We do this
9495 * in-place, since we can guarantee that a target_dirent is no
9496 * larger than a dirent64; however this means we have to be
9497 * careful to read everything before writing in the new format.
9499 struct linux_dirent64 *de;
9500 struct target_dirent *tde;
9501 int len = ret;
9502 int tlen = 0;
9504 de = dirp;
9505 tde = (struct target_dirent *)dirp;
9506 while (len > 0) {
9507 int namelen, treclen;
9508 int reclen = de->d_reclen;
9509 uint64_t ino = de->d_ino;
9510 int64_t off = de->d_off;
9511 uint8_t type = de->d_type;
9513 namelen = strlen(de->d_name);
9514 treclen = offsetof(struct target_dirent, d_name)
9515 + namelen + 2;
9516 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9518 memmove(tde->d_name, de->d_name, namelen + 1);
9519 tde->d_ino = tswapal(ino);
9520 tde->d_off = tswapal(off);
9521 tde->d_reclen = tswap16(treclen);
9522 /* The target_dirent type is in what was formerly a padding
9523 * byte at the end of the structure:
9525 *(((char *)tde) + treclen - 1) = type;
9527 de = (struct linux_dirent64 *)((char *)de + reclen);
9528 tde = (struct target_dirent *)((char *)tde + treclen);
9529 len -= reclen;
9530 tlen += treclen;
9532 ret = tlen;
9534 unlock_user(dirp, arg2, ret);
9536 #endif
9537 break;
9538 #endif /* TARGET_NR_getdents */
9539 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9540 case TARGET_NR_getdents64:
9542 struct linux_dirent64 *dirp;
9543 abi_long count = arg3;
9544 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9545 goto efault;
9546 ret = get_errno(sys_getdents64(arg1, dirp, count));
9547 if (!is_error(ret)) {
9548 struct linux_dirent64 *de;
9549 int len = ret;
9550 int reclen;
9551 de = dirp;
9552 while (len > 0) {
9553 reclen = de->d_reclen;
9554 if (reclen > len)
9555 break;
9556 de->d_reclen = tswap16(reclen);
9557 tswap64s((uint64_t *)&de->d_ino);
9558 tswap64s((uint64_t *)&de->d_off);
9559 de = (struct linux_dirent64 *)((char *)de + reclen);
9560 len -= reclen;
9563 unlock_user(dirp, arg2, ret);
9565 break;
9566 #endif /* TARGET_NR_getdents64 */
9567 #if defined(TARGET_NR__newselect)
9568 case TARGET_NR__newselect:
9569 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9570 break;
9571 #endif
9572 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9573 # ifdef TARGET_NR_poll
9574 case TARGET_NR_poll:
9575 # endif
9576 # ifdef TARGET_NR_ppoll
9577 case TARGET_NR_ppoll:
9578 # endif
9580 struct target_pollfd *target_pfd;
9581 unsigned int nfds = arg2;
9582 struct pollfd *pfd;
9583 unsigned int i;
9585 pfd = NULL;
9586 target_pfd = NULL;
9587 if (nfds) {
9588 target_pfd = lock_user(VERIFY_WRITE, arg1,
9589 sizeof(struct target_pollfd) * nfds, 1);
9590 if (!target_pfd) {
9591 goto efault;
9594 pfd = alloca(sizeof(struct pollfd) * nfds);
9595 for (i = 0; i < nfds; i++) {
9596 pfd[i].fd = tswap32(target_pfd[i].fd);
9597 pfd[i].events = tswap16(target_pfd[i].events);
9601 switch (num) {
9602 # ifdef TARGET_NR_ppoll
9603 case TARGET_NR_ppoll:
9605 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9606 target_sigset_t *target_set;
9607 sigset_t _set, *set = &_set;
9609 if (arg3) {
9610 if (target_to_host_timespec(timeout_ts, arg3)) {
9611 unlock_user(target_pfd, arg1, 0);
9612 goto efault;
9614 } else {
9615 timeout_ts = NULL;
9618 if (arg4) {
9619 if (arg5 != sizeof(target_sigset_t)) {
9620 unlock_user(target_pfd, arg1, 0);
9621 ret = -TARGET_EINVAL;
9622 break;
9625 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9626 if (!target_set) {
9627 unlock_user(target_pfd, arg1, 0);
9628 goto efault;
9630 target_to_host_sigset(set, target_set);
9631 } else {
9632 set = NULL;
9635 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9636 set, SIGSET_T_SIZE));
9638 if (!is_error(ret) && arg3) {
9639 host_to_target_timespec(arg3, timeout_ts);
9641 if (arg4) {
9642 unlock_user(target_set, arg4, 0);
9644 break;
9646 # endif
9647 # ifdef TARGET_NR_poll
9648 case TARGET_NR_poll:
9650 struct timespec ts, *pts;
9652 if (arg3 >= 0) {
9653 /* Convert ms to secs, ns */
9654 ts.tv_sec = arg3 / 1000;
9655 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9656 pts = &ts;
9657 } else {
9658 /* -ve poll() timeout means "infinite" */
9659 pts = NULL;
9661 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9662 break;
9664 # endif
9665 default:
9666 g_assert_not_reached();
9669 if (!is_error(ret)) {
9670 for(i = 0; i < nfds; i++) {
9671 target_pfd[i].revents = tswap16(pfd[i].revents);
9674 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9676 break;
9677 #endif
9678 case TARGET_NR_flock:
9679 /* NOTE: the flock constant seems to be the same for every
9680 Linux platform */
9681 ret = get_errno(safe_flock(arg1, arg2));
9682 break;
9683 case TARGET_NR_readv:
9685 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9686 if (vec != NULL) {
9687 ret = get_errno(safe_readv(arg1, vec, arg3));
9688 unlock_iovec(vec, arg2, arg3, 1);
9689 } else {
9690 ret = -host_to_target_errno(errno);
9693 break;
9694 case TARGET_NR_writev:
9696 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9697 if (vec != NULL) {
9698 ret = get_errno(safe_writev(arg1, vec, arg3));
9699 unlock_iovec(vec, arg2, arg3, 0);
9700 } else {
9701 ret = -host_to_target_errno(errno);
9704 break;
9705 case TARGET_NR_getsid:
9706 ret = get_errno(getsid(arg1));
9707 break;
9708 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9709 case TARGET_NR_fdatasync:
9710 ret = get_errno(fdatasync(arg1));
9711 break;
9712 #endif
9713 #ifdef TARGET_NR__sysctl
9714 case TARGET_NR__sysctl:
9715 /* We don't implement this, but ENOTDIR is always a safe
9716 return value. */
9717 ret = -TARGET_ENOTDIR;
9718 break;
9719 #endif
9720 case TARGET_NR_sched_getaffinity:
9722 unsigned int mask_size;
9723 unsigned long *mask;
9726 * sched_getaffinity needs multiples of ulong, so need to take
9727 * care of mismatches between target ulong and host ulong sizes.
9729 if (arg2 & (sizeof(abi_ulong) - 1)) {
9730 ret = -TARGET_EINVAL;
9731 break;
9733 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9735 mask = alloca(mask_size);
9736 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9738 if (!is_error(ret)) {
9739 if (ret > arg2) {
9740 /* More data returned than the caller's buffer will fit.
9741 * This only happens if sizeof(abi_long) < sizeof(long)
9742 * and the caller passed us a buffer holding an odd number
9743 * of abi_longs. If the host kernel is actually using the
9744 * extra 4 bytes then fail EINVAL; otherwise we can just
9745 * ignore them and only copy the interesting part.
9747 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9748 if (numcpus > arg2 * 8) {
9749 ret = -TARGET_EINVAL;
9750 break;
9752 ret = arg2;
9755 if (copy_to_user(arg3, mask, ret)) {
9756 goto efault;
9760 break;
9761 case TARGET_NR_sched_setaffinity:
9763 unsigned int mask_size;
9764 unsigned long *mask;
9767 * sched_setaffinity needs multiples of ulong, so need to take
9768 * care of mismatches between target ulong and host ulong sizes.
9770 if (arg2 & (sizeof(abi_ulong) - 1)) {
9771 ret = -TARGET_EINVAL;
9772 break;
9774 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9776 mask = alloca(mask_size);
9777 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9778 goto efault;
9780 memcpy(mask, p, arg2);
9781 unlock_user_struct(p, arg2, 0);
9783 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9785 break;
9786 case TARGET_NR_sched_setparam:
9788 struct sched_param *target_schp;
9789 struct sched_param schp;
9791 if (arg2 == 0) {
9792 return -TARGET_EINVAL;
9794 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9795 goto efault;
9796 schp.sched_priority = tswap32(target_schp->sched_priority);
9797 unlock_user_struct(target_schp, arg2, 0);
9798 ret = get_errno(sched_setparam(arg1, &schp));
9800 break;
9801 case TARGET_NR_sched_getparam:
9803 struct sched_param *target_schp;
9804 struct sched_param schp;
9806 if (arg2 == 0) {
9807 return -TARGET_EINVAL;
9809 ret = get_errno(sched_getparam(arg1, &schp));
9810 if (!is_error(ret)) {
9811 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9812 goto efault;
9813 target_schp->sched_priority = tswap32(schp.sched_priority);
9814 unlock_user_struct(target_schp, arg2, 1);
9817 break;
9818 case TARGET_NR_sched_setscheduler:
9820 struct sched_param *target_schp;
9821 struct sched_param schp;
9822 if (arg3 == 0) {
9823 return -TARGET_EINVAL;
9825 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9826 goto efault;
9827 schp.sched_priority = tswap32(target_schp->sched_priority);
9828 unlock_user_struct(target_schp, arg3, 0);
9829 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9831 break;
9832 case TARGET_NR_sched_getscheduler:
9833 ret = get_errno(sched_getscheduler(arg1));
9834 break;
9835 case TARGET_NR_sched_yield:
9836 ret = get_errno(sched_yield());
9837 break;
9838 case TARGET_NR_sched_get_priority_max:
9839 ret = get_errno(sched_get_priority_max(arg1));
9840 break;
9841 case TARGET_NR_sched_get_priority_min:
9842 ret = get_errno(sched_get_priority_min(arg1));
9843 break;
9844 case TARGET_NR_sched_rr_get_interval:
9846 struct timespec ts;
9847 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9848 if (!is_error(ret)) {
9849 ret = host_to_target_timespec(arg2, &ts);
9852 break;
9853 case TARGET_NR_nanosleep:
9855 struct timespec req, rem;
9856 target_to_host_timespec(&req, arg1);
9857 ret = get_errno(safe_nanosleep(&req, &rem));
9858 if (is_error(ret) && arg2) {
9859 host_to_target_timespec(arg2, &rem);
9862 break;
9863 #ifdef TARGET_NR_query_module
9864 case TARGET_NR_query_module:
9865 goto unimplemented;
9866 #endif
9867 #ifdef TARGET_NR_nfsservctl
9868 case TARGET_NR_nfsservctl:
9869 goto unimplemented;
9870 #endif
9871 case TARGET_NR_prctl:
9872 switch (arg1) {
9873 case PR_GET_PDEATHSIG:
9875 int deathsig;
9876 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9877 if (!is_error(ret) && arg2
9878 && put_user_ual(deathsig, arg2)) {
9879 goto efault;
9881 break;
9883 #ifdef PR_GET_NAME
9884 case PR_GET_NAME:
9886 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9887 if (!name) {
9888 goto efault;
9890 ret = get_errno(prctl(arg1, (unsigned long)name,
9891 arg3, arg4, arg5));
9892 unlock_user(name, arg2, 16);
9893 break;
9895 case PR_SET_NAME:
9897 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9898 if (!name) {
9899 goto efault;
9901 ret = get_errno(prctl(arg1, (unsigned long)name,
9902 arg3, arg4, arg5));
9903 unlock_user(name, arg2, 0);
9904 break;
9906 #endif
9907 default:
9908 /* Most prctl options have no pointer arguments */
9909 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9910 break;
9912 break;
9913 #ifdef TARGET_NR_arch_prctl
9914 case TARGET_NR_arch_prctl:
9915 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9916 ret = do_arch_prctl(cpu_env, arg1, arg2);
9917 break;
9918 #else
9919 goto unimplemented;
9920 #endif
9921 #endif
9922 #ifdef TARGET_NR_pread64
9923 case TARGET_NR_pread64:
9924 if (regpairs_aligned(cpu_env)) {
9925 arg4 = arg5;
9926 arg5 = arg6;
9928 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9929 goto efault;
9930 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9931 unlock_user(p, arg2, ret);
9932 break;
9933 case TARGET_NR_pwrite64:
9934 if (regpairs_aligned(cpu_env)) {
9935 arg4 = arg5;
9936 arg5 = arg6;
9938 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9939 goto efault;
9940 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9941 unlock_user(p, arg2, 0);
9942 break;
9943 #endif
9944 case TARGET_NR_getcwd:
9945 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9946 goto efault;
9947 ret = get_errno(sys_getcwd1(p, arg2));
9948 unlock_user(p, arg1, ret);
9949 break;
9950 case TARGET_NR_capget:
9951 case TARGET_NR_capset:
9953 struct target_user_cap_header *target_header;
9954 struct target_user_cap_data *target_data = NULL;
9955 struct __user_cap_header_struct header;
9956 struct __user_cap_data_struct data[2];
9957 struct __user_cap_data_struct *dataptr = NULL;
9958 int i, target_datalen;
9959 int data_items = 1;
9961 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9962 goto efault;
9964 header.version = tswap32(target_header->version);
9965 header.pid = tswap32(target_header->pid);
9967 if (header.version != _LINUX_CAPABILITY_VERSION) {
9968 /* Version 2 and up takes pointer to two user_data structs */
9969 data_items = 2;
9972 target_datalen = sizeof(*target_data) * data_items;
9974 if (arg2) {
9975 if (num == TARGET_NR_capget) {
9976 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9977 } else {
9978 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9980 if (!target_data) {
9981 unlock_user_struct(target_header, arg1, 0);
9982 goto efault;
9985 if (num == TARGET_NR_capset) {
9986 for (i = 0; i < data_items; i++) {
9987 data[i].effective = tswap32(target_data[i].effective);
9988 data[i].permitted = tswap32(target_data[i].permitted);
9989 data[i].inheritable = tswap32(target_data[i].inheritable);
9993 dataptr = data;
9996 if (num == TARGET_NR_capget) {
9997 ret = get_errno(capget(&header, dataptr));
9998 } else {
9999 ret = get_errno(capset(&header, dataptr));
10002 /* The kernel always updates version for both capget and capset */
10003 target_header->version = tswap32(header.version);
10004 unlock_user_struct(target_header, arg1, 1);
10006 if (arg2) {
10007 if (num == TARGET_NR_capget) {
10008 for (i = 0; i < data_items; i++) {
10009 target_data[i].effective = tswap32(data[i].effective);
10010 target_data[i].permitted = tswap32(data[i].permitted);
10011 target_data[i].inheritable = tswap32(data[i].inheritable);
10013 unlock_user(target_data, arg2, target_datalen);
10014 } else {
10015 unlock_user(target_data, arg2, 0);
10018 break;
10020 case TARGET_NR_sigaltstack:
10021 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10022 break;
10024 #ifdef CONFIG_SENDFILE
10025 case TARGET_NR_sendfile:
10027 off_t *offp = NULL;
10028 off_t off;
10029 if (arg3) {
10030 ret = get_user_sal(off, arg3);
10031 if (is_error(ret)) {
10032 break;
10034 offp = &off;
10036 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10037 if (!is_error(ret) && arg3) {
10038 abi_long ret2 = put_user_sal(off, arg3);
10039 if (is_error(ret2)) {
10040 ret = ret2;
10043 break;
10045 #ifdef TARGET_NR_sendfile64
10046 case TARGET_NR_sendfile64:
10048 off_t *offp = NULL;
10049 off_t off;
10050 if (arg3) {
10051 ret = get_user_s64(off, arg3);
10052 if (is_error(ret)) {
10053 break;
10055 offp = &off;
10057 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10058 if (!is_error(ret) && arg3) {
10059 abi_long ret2 = put_user_s64(off, arg3);
10060 if (is_error(ret2)) {
10061 ret = ret2;
10064 break;
10066 #endif
10067 #else
10068 case TARGET_NR_sendfile:
10069 #ifdef TARGET_NR_sendfile64
10070 case TARGET_NR_sendfile64:
10071 #endif
10072 goto unimplemented;
10073 #endif
10075 #ifdef TARGET_NR_getpmsg
10076 case TARGET_NR_getpmsg:
10077 goto unimplemented;
10078 #endif
10079 #ifdef TARGET_NR_putpmsg
10080 case TARGET_NR_putpmsg:
10081 goto unimplemented;
10082 #endif
10083 #ifdef TARGET_NR_vfork
10084 case TARGET_NR_vfork:
10085 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10086 0, 0, 0, 0));
10087 break;
10088 #endif
10089 #ifdef TARGET_NR_ugetrlimit
10090 case TARGET_NR_ugetrlimit:
10092 struct rlimit rlim;
10093 int resource = target_to_host_resource(arg1);
10094 ret = get_errno(getrlimit(resource, &rlim));
10095 if (!is_error(ret)) {
10096 struct target_rlimit *target_rlim;
10097 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10098 goto efault;
10099 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10100 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10101 unlock_user_struct(target_rlim, arg2, 1);
10103 break;
10105 #endif
10106 #ifdef TARGET_NR_truncate64
10107 case TARGET_NR_truncate64:
10108 if (!(p = lock_user_string(arg1)))
10109 goto efault;
10110 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10111 unlock_user(p, arg1, 0);
10112 break;
10113 #endif
10114 #ifdef TARGET_NR_ftruncate64
10115 case TARGET_NR_ftruncate64:
10116 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10117 break;
10118 #endif
10119 #ifdef TARGET_NR_stat64
10120 case TARGET_NR_stat64:
10121 if (!(p = lock_user_string(arg1)))
10122 goto efault;
10123 ret = get_errno(stat(path(p), &st));
10124 unlock_user(p, arg1, 0);
10125 if (!is_error(ret))
10126 ret = host_to_target_stat64(cpu_env, arg2, &st);
10127 break;
10128 #endif
10129 #ifdef TARGET_NR_lstat64
10130 case TARGET_NR_lstat64:
10131 if (!(p = lock_user_string(arg1)))
10132 goto efault;
10133 ret = get_errno(lstat(path(p), &st));
10134 unlock_user(p, arg1, 0);
10135 if (!is_error(ret))
10136 ret = host_to_target_stat64(cpu_env, arg2, &st);
10137 break;
10138 #endif
10139 #ifdef TARGET_NR_fstat64
10140 case TARGET_NR_fstat64:
10141 ret = get_errno(fstat(arg1, &st));
10142 if (!is_error(ret))
10143 ret = host_to_target_stat64(cpu_env, arg2, &st);
10144 break;
10145 #endif
10146 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10147 #ifdef TARGET_NR_fstatat64
10148 case TARGET_NR_fstatat64:
10149 #endif
10150 #ifdef TARGET_NR_newfstatat
10151 case TARGET_NR_newfstatat:
10152 #endif
10153 if (!(p = lock_user_string(arg2)))
10154 goto efault;
10155 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10156 if (!is_error(ret))
10157 ret = host_to_target_stat64(cpu_env, arg3, &st);
10158 break;
10159 #endif
10160 #ifdef TARGET_NR_lchown
10161 case TARGET_NR_lchown:
10162 if (!(p = lock_user_string(arg1)))
10163 goto efault;
10164 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10165 unlock_user(p, arg1, 0);
10166 break;
10167 #endif
10168 #ifdef TARGET_NR_getuid
10169 case TARGET_NR_getuid:
10170 ret = get_errno(high2lowuid(getuid()));
10171 break;
10172 #endif
10173 #ifdef TARGET_NR_getgid
10174 case TARGET_NR_getgid:
10175 ret = get_errno(high2lowgid(getgid()));
10176 break;
10177 #endif
10178 #ifdef TARGET_NR_geteuid
10179 case TARGET_NR_geteuid:
10180 ret = get_errno(high2lowuid(geteuid()));
10181 break;
10182 #endif
10183 #ifdef TARGET_NR_getegid
10184 case TARGET_NR_getegid:
10185 ret = get_errno(high2lowgid(getegid()));
10186 break;
10187 #endif
10188 case TARGET_NR_setreuid:
10189 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10190 break;
10191 case TARGET_NR_setregid:
10192 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10193 break;
10194 case TARGET_NR_getgroups:
10196 int gidsetsize = arg1;
10197 target_id *target_grouplist;
10198 gid_t *grouplist;
10199 int i;
10201 grouplist = alloca(gidsetsize * sizeof(gid_t));
10202 ret = get_errno(getgroups(gidsetsize, grouplist));
10203 if (gidsetsize == 0)
10204 break;
10205 if (!is_error(ret)) {
10206 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10207 if (!target_grouplist)
10208 goto efault;
10209 for(i = 0;i < ret; i++)
10210 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10211 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10214 break;
10215 case TARGET_NR_setgroups:
10217 int gidsetsize = arg1;
10218 target_id *target_grouplist;
10219 gid_t *grouplist = NULL;
10220 int i;
10221 if (gidsetsize) {
10222 grouplist = alloca(gidsetsize * sizeof(gid_t));
10223 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10224 if (!target_grouplist) {
10225 ret = -TARGET_EFAULT;
10226 goto fail;
10228 for (i = 0; i < gidsetsize; i++) {
10229 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10231 unlock_user(target_grouplist, arg2, 0);
10233 ret = get_errno(setgroups(gidsetsize, grouplist));
10235 break;
10236 case TARGET_NR_fchown:
10237 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10238 break;
10239 #if defined(TARGET_NR_fchownat)
10240 case TARGET_NR_fchownat:
10241 if (!(p = lock_user_string(arg2)))
10242 goto efault;
10243 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10244 low2highgid(arg4), arg5));
10245 unlock_user(p, arg2, 0);
10246 break;
10247 #endif
10248 #ifdef TARGET_NR_setresuid
10249 case TARGET_NR_setresuid:
10250 ret = get_errno(sys_setresuid(low2highuid(arg1),
10251 low2highuid(arg2),
10252 low2highuid(arg3)));
10253 break;
10254 #endif
10255 #ifdef TARGET_NR_getresuid
10256 case TARGET_NR_getresuid:
10258 uid_t ruid, euid, suid;
10259 ret = get_errno(getresuid(&ruid, &euid, &suid));
10260 if (!is_error(ret)) {
10261 if (put_user_id(high2lowuid(ruid), arg1)
10262 || put_user_id(high2lowuid(euid), arg2)
10263 || put_user_id(high2lowuid(suid), arg3))
10264 goto efault;
10267 break;
10268 #endif
10269 #ifdef TARGET_NR_getresgid
10270 case TARGET_NR_setresgid:
10271 ret = get_errno(sys_setresgid(low2highgid(arg1),
10272 low2highgid(arg2),
10273 low2highgid(arg3)));
10274 break;
10275 #endif
10276 #ifdef TARGET_NR_getresgid
10277 case TARGET_NR_getresgid:
10279 gid_t rgid, egid, sgid;
10280 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10281 if (!is_error(ret)) {
10282 if (put_user_id(high2lowgid(rgid), arg1)
10283 || put_user_id(high2lowgid(egid), arg2)
10284 || put_user_id(high2lowgid(sgid), arg3))
10285 goto efault;
10288 break;
10289 #endif
10290 #ifdef TARGET_NR_chown
10291 case TARGET_NR_chown:
10292 if (!(p = lock_user_string(arg1)))
10293 goto efault;
10294 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10295 unlock_user(p, arg1, 0);
10296 break;
10297 #endif
10298 case TARGET_NR_setuid:
10299 ret = get_errno(sys_setuid(low2highuid(arg1)));
10300 break;
10301 case TARGET_NR_setgid:
10302 ret = get_errno(sys_setgid(low2highgid(arg1)));
10303 break;
10304 case TARGET_NR_setfsuid:
10305 ret = get_errno(setfsuid(arg1));
10306 break;
10307 case TARGET_NR_setfsgid:
10308 ret = get_errno(setfsgid(arg1));
10309 break;
10311 #ifdef TARGET_NR_lchown32
10312 case TARGET_NR_lchown32:
10313 if (!(p = lock_user_string(arg1)))
10314 goto efault;
10315 ret = get_errno(lchown(p, arg2, arg3));
10316 unlock_user(p, arg1, 0);
10317 break;
10318 #endif
10319 #ifdef TARGET_NR_getuid32
10320 case TARGET_NR_getuid32:
10321 ret = get_errno(getuid());
10322 break;
10323 #endif
10325 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10326 /* Alpha specific */
10327 case TARGET_NR_getxuid:
10329 uid_t euid;
10330 euid=geteuid();
10331 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10333 ret = get_errno(getuid());
10334 break;
10335 #endif
10336 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10337 /* Alpha specific */
10338 case TARGET_NR_getxgid:
10340 uid_t egid;
10341 egid=getegid();
10342 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10344 ret = get_errno(getgid());
10345 break;
10346 #endif
10347 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10348 /* Alpha specific */
10349 case TARGET_NR_osf_getsysinfo:
10350 ret = -TARGET_EOPNOTSUPP;
10351 switch (arg1) {
10352 case TARGET_GSI_IEEE_FP_CONTROL:
10354 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10356 /* Copied from linux ieee_fpcr_to_swcr. */
10357 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10358 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10359 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10360 | SWCR_TRAP_ENABLE_DZE
10361 | SWCR_TRAP_ENABLE_OVF);
10362 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10363 | SWCR_TRAP_ENABLE_INE);
10364 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10365 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10367 if (put_user_u64 (swcr, arg2))
10368 goto efault;
10369 ret = 0;
10371 break;
10373 /* case GSI_IEEE_STATE_AT_SIGNAL:
10374 -- Not implemented in linux kernel.
10375 case GSI_UACPROC:
10376 -- Retrieves current unaligned access state; not much used.
10377 case GSI_PROC_TYPE:
10378 -- Retrieves implver information; surely not used.
10379 case GSI_GET_HWRPB:
10380 -- Grabs a copy of the HWRPB; surely not used.
10383 break;
10384 #endif
10385 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10386 /* Alpha specific */
10387 case TARGET_NR_osf_setsysinfo:
10388 ret = -TARGET_EOPNOTSUPP;
10389 switch (arg1) {
10390 case TARGET_SSI_IEEE_FP_CONTROL:
10392 uint64_t swcr, fpcr, orig_fpcr;
10394 if (get_user_u64 (swcr, arg2)) {
10395 goto efault;
10397 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10398 fpcr = orig_fpcr & FPCR_DYN_MASK;
10400 /* Copied from linux ieee_swcr_to_fpcr. */
10401 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10402 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10403 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10404 | SWCR_TRAP_ENABLE_DZE
10405 | SWCR_TRAP_ENABLE_OVF)) << 48;
10406 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10407 | SWCR_TRAP_ENABLE_INE)) << 57;
10408 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10409 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10411 cpu_alpha_store_fpcr(cpu_env, fpcr);
10412 ret = 0;
10414 break;
10416 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10418 uint64_t exc, fpcr, orig_fpcr;
10419 int si_code;
10421 if (get_user_u64(exc, arg2)) {
10422 goto efault;
10425 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10427 /* We only add to the exception status here. */
10428 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10430 cpu_alpha_store_fpcr(cpu_env, fpcr);
10431 ret = 0;
10433 /* Old exceptions are not signaled. */
10434 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10436 /* If any exceptions set by this call,
10437 and are unmasked, send a signal. */
10438 si_code = 0;
10439 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10440 si_code = TARGET_FPE_FLTRES;
10442 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10443 si_code = TARGET_FPE_FLTUND;
10445 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10446 si_code = TARGET_FPE_FLTOVF;
10448 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10449 si_code = TARGET_FPE_FLTDIV;
10451 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10452 si_code = TARGET_FPE_FLTINV;
10454 if (si_code != 0) {
10455 target_siginfo_t info;
10456 info.si_signo = SIGFPE;
10457 info.si_errno = 0;
10458 info.si_code = si_code;
10459 info._sifields._sigfault._addr
10460 = ((CPUArchState *)cpu_env)->pc;
10461 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10464 break;
10466 /* case SSI_NVPAIRS:
10467 -- Used with SSIN_UACPROC to enable unaligned accesses.
10468 case SSI_IEEE_STATE_AT_SIGNAL:
10469 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10470 -- Not implemented in linux kernel
10473 break;
10474 #endif
10475 #ifdef TARGET_NR_osf_sigprocmask
10476 /* Alpha specific. */
10477 case TARGET_NR_osf_sigprocmask:
10479 abi_ulong mask;
10480 int how;
10481 sigset_t set, oldset;
10483 switch(arg1) {
10484 case TARGET_SIG_BLOCK:
10485 how = SIG_BLOCK;
10486 break;
10487 case TARGET_SIG_UNBLOCK:
10488 how = SIG_UNBLOCK;
10489 break;
10490 case TARGET_SIG_SETMASK:
10491 how = SIG_SETMASK;
10492 break;
10493 default:
10494 ret = -TARGET_EINVAL;
10495 goto fail;
10497 mask = arg2;
10498 target_to_host_old_sigset(&set, &mask);
10499 ret = do_sigprocmask(how, &set, &oldset);
10500 if (!ret) {
10501 host_to_target_old_sigset(&mask, &oldset);
10502 ret = mask;
10505 break;
10506 #endif
10508 #ifdef TARGET_NR_getgid32
10509 case TARGET_NR_getgid32:
10510 ret = get_errno(getgid());
10511 break;
10512 #endif
10513 #ifdef TARGET_NR_geteuid32
10514 case TARGET_NR_geteuid32:
10515 ret = get_errno(geteuid());
10516 break;
10517 #endif
10518 #ifdef TARGET_NR_getegid32
10519 case TARGET_NR_getegid32:
10520 ret = get_errno(getegid());
10521 break;
10522 #endif
10523 #ifdef TARGET_NR_setreuid32
10524 case TARGET_NR_setreuid32:
10525 ret = get_errno(setreuid(arg1, arg2));
10526 break;
10527 #endif
10528 #ifdef TARGET_NR_setregid32
10529 case TARGET_NR_setregid32:
10530 ret = get_errno(setregid(arg1, arg2));
10531 break;
10532 #endif
10533 #ifdef TARGET_NR_getgroups32
10534 case TARGET_NR_getgroups32:
10536 int gidsetsize = arg1;
10537 uint32_t *target_grouplist;
10538 gid_t *grouplist;
10539 int i;
10541 grouplist = alloca(gidsetsize * sizeof(gid_t));
10542 ret = get_errno(getgroups(gidsetsize, grouplist));
10543 if (gidsetsize == 0)
10544 break;
10545 if (!is_error(ret)) {
10546 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10547 if (!target_grouplist) {
10548 ret = -TARGET_EFAULT;
10549 goto fail;
10551 for(i = 0;i < ret; i++)
10552 target_grouplist[i] = tswap32(grouplist[i]);
10553 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10556 break;
10557 #endif
10558 #ifdef TARGET_NR_setgroups32
10559 case TARGET_NR_setgroups32:
10561 int gidsetsize = arg1;
10562 uint32_t *target_grouplist;
10563 gid_t *grouplist;
10564 int i;
10566 grouplist = alloca(gidsetsize * sizeof(gid_t));
10567 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10568 if (!target_grouplist) {
10569 ret = -TARGET_EFAULT;
10570 goto fail;
10572 for(i = 0;i < gidsetsize; i++)
10573 grouplist[i] = tswap32(target_grouplist[i]);
10574 unlock_user(target_grouplist, arg2, 0);
10575 ret = get_errno(setgroups(gidsetsize, grouplist));
10577 break;
10578 #endif
10579 #ifdef TARGET_NR_fchown32
10580 case TARGET_NR_fchown32:
10581 ret = get_errno(fchown(arg1, arg2, arg3));
10582 break;
10583 #endif
10584 #ifdef TARGET_NR_setresuid32
10585 case TARGET_NR_setresuid32:
10586 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10587 break;
10588 #endif
10589 #ifdef TARGET_NR_getresuid32
10590 case TARGET_NR_getresuid32:
10592 uid_t ruid, euid, suid;
10593 ret = get_errno(getresuid(&ruid, &euid, &suid));
10594 if (!is_error(ret)) {
10595 if (put_user_u32(ruid, arg1)
10596 || put_user_u32(euid, arg2)
10597 || put_user_u32(suid, arg3))
10598 goto efault;
10601 break;
10602 #endif
10603 #ifdef TARGET_NR_setresgid32
10604 case TARGET_NR_setresgid32:
10605 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10606 break;
10607 #endif
10608 #ifdef TARGET_NR_getresgid32
10609 case TARGET_NR_getresgid32:
10611 gid_t rgid, egid, sgid;
10612 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10613 if (!is_error(ret)) {
10614 if (put_user_u32(rgid, arg1)
10615 || put_user_u32(egid, arg2)
10616 || put_user_u32(sgid, arg3))
10617 goto efault;
10620 break;
10621 #endif
10622 #ifdef TARGET_NR_chown32
10623 case TARGET_NR_chown32:
10624 if (!(p = lock_user_string(arg1)))
10625 goto efault;
10626 ret = get_errno(chown(p, arg2, arg3));
10627 unlock_user(p, arg1, 0);
10628 break;
10629 #endif
10630 #ifdef TARGET_NR_setuid32
10631 case TARGET_NR_setuid32:
10632 ret = get_errno(sys_setuid(arg1));
10633 break;
10634 #endif
10635 #ifdef TARGET_NR_setgid32
10636 case TARGET_NR_setgid32:
10637 ret = get_errno(sys_setgid(arg1));
10638 break;
10639 #endif
10640 #ifdef TARGET_NR_setfsuid32
10641 case TARGET_NR_setfsuid32:
10642 ret = get_errno(setfsuid(arg1));
10643 break;
10644 #endif
10645 #ifdef TARGET_NR_setfsgid32
10646 case TARGET_NR_setfsgid32:
10647 ret = get_errno(setfsgid(arg1));
10648 break;
10649 #endif
10651 case TARGET_NR_pivot_root:
10652 goto unimplemented;
10653 #ifdef TARGET_NR_mincore
10654 case TARGET_NR_mincore:
10656 void *a;
10657 ret = -TARGET_EFAULT;
10658 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10659 goto efault;
10660 if (!(p = lock_user_string(arg3)))
10661 goto mincore_fail;
10662 ret = get_errno(mincore(a, arg2, p));
10663 unlock_user(p, arg3, ret);
10664 mincore_fail:
10665 unlock_user(a, arg1, 0);
10667 break;
10668 #endif
10669 #ifdef TARGET_NR_arm_fadvise64_64
10670 case TARGET_NR_arm_fadvise64_64:
10671 /* arm_fadvise64_64 looks like fadvise64_64 but
10672 * with different argument order: fd, advice, offset, len
10673 * rather than the usual fd, offset, len, advice.
10674 * Note that offset and len are both 64-bit so appear as
10675 * pairs of 32-bit registers.
10677 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10678 target_offset64(arg5, arg6), arg2);
10679 ret = -host_to_target_errno(ret);
10680 break;
10681 #endif
10683 #if TARGET_ABI_BITS == 32
10685 #ifdef TARGET_NR_fadvise64_64
10686 case TARGET_NR_fadvise64_64:
10687 /* 6 args: fd, offset (high, low), len (high, low), advice */
10688 if (regpairs_aligned(cpu_env)) {
10689 /* offset is in (3,4), len in (5,6) and advice in 7 */
10690 arg2 = arg3;
10691 arg3 = arg4;
10692 arg4 = arg5;
10693 arg5 = arg6;
10694 arg6 = arg7;
10696 ret = -host_to_target_errno(posix_fadvise(arg1,
10697 target_offset64(arg2, arg3),
10698 target_offset64(arg4, arg5),
10699 arg6));
10700 break;
10701 #endif
10703 #ifdef TARGET_NR_fadvise64
10704 case TARGET_NR_fadvise64:
10705 /* 5 args: fd, offset (high, low), len, advice */
10706 if (regpairs_aligned(cpu_env)) {
10707 /* offset is in (3,4), len in 5 and advice in 6 */
10708 arg2 = arg3;
10709 arg3 = arg4;
10710 arg4 = arg5;
10711 arg5 = arg6;
10713 ret = -host_to_target_errno(posix_fadvise(arg1,
10714 target_offset64(arg2, arg3),
10715 arg4, arg5));
10716 break;
10717 #endif
10719 #else /* not a 32-bit ABI */
10720 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10721 #ifdef TARGET_NR_fadvise64_64
10722 case TARGET_NR_fadvise64_64:
10723 #endif
10724 #ifdef TARGET_NR_fadvise64
10725 case TARGET_NR_fadvise64:
10726 #endif
10727 #ifdef TARGET_S390X
10728 switch (arg4) {
10729 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10730 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10731 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10732 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10733 default: break;
10735 #endif
10736 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10737 break;
10738 #endif
10739 #endif /* end of 64-bit ABI fadvise handling */
10741 #ifdef TARGET_NR_madvise
10742 case TARGET_NR_madvise:
10743 /* A straight passthrough may not be safe because qemu sometimes
10744 turns private file-backed mappings into anonymous mappings.
10745 This will break MADV_DONTNEED.
10746 This is a hint, so ignoring and returning success is ok. */
10747 ret = get_errno(0);
10748 break;
10749 #endif
10750 #if TARGET_ABI_BITS == 32
10751 case TARGET_NR_fcntl64:
10753 int cmd;
10754 struct flock64 fl;
10755 from_flock64_fn *copyfrom = copy_from_user_flock64;
10756 to_flock64_fn *copyto = copy_to_user_flock64;
10758 #ifdef TARGET_ARM
10759 if (((CPUARMState *)cpu_env)->eabi) {
10760 copyfrom = copy_from_user_eabi_flock64;
10761 copyto = copy_to_user_eabi_flock64;
10763 #endif
10765 cmd = target_to_host_fcntl_cmd(arg2);
10766 if (cmd == -TARGET_EINVAL) {
10767 ret = cmd;
10768 break;
10771 switch(arg2) {
10772 case TARGET_F_GETLK64:
10773 ret = copyfrom(&fl, arg3);
10774 if (ret) {
10775 break;
10777 ret = get_errno(fcntl(arg1, cmd, &fl));
10778 if (ret == 0) {
10779 ret = copyto(arg3, &fl);
10781 break;
10783 case TARGET_F_SETLK64:
10784 case TARGET_F_SETLKW64:
10785 ret = copyfrom(&fl, arg3);
10786 if (ret) {
10787 break;
10789 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10790 break;
10791 default:
10792 ret = do_fcntl(arg1, arg2, arg3);
10793 break;
10795 break;
10797 #endif
10798 #ifdef TARGET_NR_cacheflush
10799 case TARGET_NR_cacheflush:
10800 /* self-modifying code is handled automatically, so nothing needed */
10801 ret = 0;
10802 break;
10803 #endif
10804 #ifdef TARGET_NR_security
10805 case TARGET_NR_security:
10806 goto unimplemented;
10807 #endif
10808 #ifdef TARGET_NR_getpagesize
10809 case TARGET_NR_getpagesize:
10810 ret = TARGET_PAGE_SIZE;
10811 break;
10812 #endif
10813 case TARGET_NR_gettid:
10814 ret = get_errno(gettid());
10815 break;
10816 #ifdef TARGET_NR_readahead
10817 case TARGET_NR_readahead:
10818 #if TARGET_ABI_BITS == 32
10819 if (regpairs_aligned(cpu_env)) {
10820 arg2 = arg3;
10821 arg3 = arg4;
10822 arg4 = arg5;
10824 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10825 #else
10826 ret = get_errno(readahead(arg1, arg2, arg3));
10827 #endif
10828 break;
10829 #endif
10830 #ifdef CONFIG_ATTR
10831 #ifdef TARGET_NR_setxattr
10832 case TARGET_NR_listxattr:
10833 case TARGET_NR_llistxattr:
10835 void *p, *b = 0;
10836 if (arg2) {
10837 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10838 if (!b) {
10839 ret = -TARGET_EFAULT;
10840 break;
10843 p = lock_user_string(arg1);
10844 if (p) {
10845 if (num == TARGET_NR_listxattr) {
10846 ret = get_errno(listxattr(p, b, arg3));
10847 } else {
10848 ret = get_errno(llistxattr(p, b, arg3));
10850 } else {
10851 ret = -TARGET_EFAULT;
10853 unlock_user(p, arg1, 0);
10854 unlock_user(b, arg2, arg3);
10855 break;
10857 case TARGET_NR_flistxattr:
10859 void *b = 0;
10860 if (arg2) {
10861 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10862 if (!b) {
10863 ret = -TARGET_EFAULT;
10864 break;
10867 ret = get_errno(flistxattr(arg1, b, arg3));
10868 unlock_user(b, arg2, arg3);
10869 break;
10871 case TARGET_NR_setxattr:
10872 case TARGET_NR_lsetxattr:
10874 void *p, *n, *v = 0;
10875 if (arg3) {
10876 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10877 if (!v) {
10878 ret = -TARGET_EFAULT;
10879 break;
10882 p = lock_user_string(arg1);
10883 n = lock_user_string(arg2);
10884 if (p && n) {
10885 if (num == TARGET_NR_setxattr) {
10886 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10887 } else {
10888 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10890 } else {
10891 ret = -TARGET_EFAULT;
10893 unlock_user(p, arg1, 0);
10894 unlock_user(n, arg2, 0);
10895 unlock_user(v, arg3, 0);
10897 break;
10898 case TARGET_NR_fsetxattr:
10900 void *n, *v = 0;
10901 if (arg3) {
10902 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10903 if (!v) {
10904 ret = -TARGET_EFAULT;
10905 break;
10908 n = lock_user_string(arg2);
10909 if (n) {
10910 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10911 } else {
10912 ret = -TARGET_EFAULT;
10914 unlock_user(n, arg2, 0);
10915 unlock_user(v, arg3, 0);
10917 break;
10918 case TARGET_NR_getxattr:
10919 case TARGET_NR_lgetxattr:
10921 void *p, *n, *v = 0;
10922 if (arg3) {
10923 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10924 if (!v) {
10925 ret = -TARGET_EFAULT;
10926 break;
10929 p = lock_user_string(arg1);
10930 n = lock_user_string(arg2);
10931 if (p && n) {
10932 if (num == TARGET_NR_getxattr) {
10933 ret = get_errno(getxattr(p, n, v, arg4));
10934 } else {
10935 ret = get_errno(lgetxattr(p, n, v, arg4));
10937 } else {
10938 ret = -TARGET_EFAULT;
10940 unlock_user(p, arg1, 0);
10941 unlock_user(n, arg2, 0);
10942 unlock_user(v, arg3, arg4);
10944 break;
10945 case TARGET_NR_fgetxattr:
10947 void *n, *v = 0;
10948 if (arg3) {
10949 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10950 if (!v) {
10951 ret = -TARGET_EFAULT;
10952 break;
10955 n = lock_user_string(arg2);
10956 if (n) {
10957 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10958 } else {
10959 ret = -TARGET_EFAULT;
10961 unlock_user(n, arg2, 0);
10962 unlock_user(v, arg3, arg4);
10964 break;
10965 case TARGET_NR_removexattr:
10966 case TARGET_NR_lremovexattr:
10968 void *p, *n;
10969 p = lock_user_string(arg1);
10970 n = lock_user_string(arg2);
10971 if (p && n) {
10972 if (num == TARGET_NR_removexattr) {
10973 ret = get_errno(removexattr(p, n));
10974 } else {
10975 ret = get_errno(lremovexattr(p, n));
10977 } else {
10978 ret = -TARGET_EFAULT;
10980 unlock_user(p, arg1, 0);
10981 unlock_user(n, arg2, 0);
10983 break;
10984 case TARGET_NR_fremovexattr:
10986 void *n;
10987 n = lock_user_string(arg2);
10988 if (n) {
10989 ret = get_errno(fremovexattr(arg1, n));
10990 } else {
10991 ret = -TARGET_EFAULT;
10993 unlock_user(n, arg2, 0);
10995 break;
10996 #endif
10997 #endif /* CONFIG_ATTR */
10998 #ifdef TARGET_NR_set_thread_area
10999 case TARGET_NR_set_thread_area:
11000 #if defined(TARGET_MIPS)
11001 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11002 ret = 0;
11003 break;
11004 #elif defined(TARGET_CRIS)
11005 if (arg1 & 0xff)
11006 ret = -TARGET_EINVAL;
11007 else {
11008 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11009 ret = 0;
11011 break;
11012 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11013 ret = do_set_thread_area(cpu_env, arg1);
11014 break;
11015 #elif defined(TARGET_M68K)
11017 TaskState *ts = cpu->opaque;
11018 ts->tp_value = arg1;
11019 ret = 0;
11020 break;
11022 #else
11023 goto unimplemented_nowarn;
11024 #endif
11025 #endif
11026 #ifdef TARGET_NR_get_thread_area
11027 case TARGET_NR_get_thread_area:
11028 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11029 ret = do_get_thread_area(cpu_env, arg1);
11030 break;
11031 #elif defined(TARGET_M68K)
11033 TaskState *ts = cpu->opaque;
11034 ret = ts->tp_value;
11035 break;
11037 #else
11038 goto unimplemented_nowarn;
11039 #endif
11040 #endif
11041 #ifdef TARGET_NR_getdomainname
11042 case TARGET_NR_getdomainname:
11043 goto unimplemented_nowarn;
11044 #endif
11046 #ifdef TARGET_NR_clock_gettime
11047 case TARGET_NR_clock_gettime:
11049 struct timespec ts;
11050 ret = get_errno(clock_gettime(arg1, &ts));
11051 if (!is_error(ret)) {
11052 host_to_target_timespec(arg2, &ts);
11054 break;
11056 #endif
11057 #ifdef TARGET_NR_clock_getres
11058 case TARGET_NR_clock_getres:
11060 struct timespec ts;
11061 ret = get_errno(clock_getres(arg1, &ts));
11062 if (!is_error(ret)) {
11063 host_to_target_timespec(arg2, &ts);
11065 break;
11067 #endif
11068 #ifdef TARGET_NR_clock_nanosleep
11069 case TARGET_NR_clock_nanosleep:
11071 struct timespec ts;
11072 target_to_host_timespec(&ts, arg3);
11073 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11074 &ts, arg4 ? &ts : NULL));
11075 if (arg4)
11076 host_to_target_timespec(arg4, &ts);
11078 #if defined(TARGET_PPC)
11079 /* clock_nanosleep is odd in that it returns positive errno values.
11080 * On PPC, CR0 bit 3 should be set in such a situation. */
11081 if (ret && ret != -TARGET_ERESTARTSYS) {
11082 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11084 #endif
11085 break;
11087 #endif
11089 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11090 case TARGET_NR_set_tid_address:
11091 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11092 break;
11093 #endif
11095 case TARGET_NR_tkill:
11096 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11097 break;
11099 case TARGET_NR_tgkill:
11100 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11101 target_to_host_signal(arg3)));
11102 break;
11104 #ifdef TARGET_NR_set_robust_list
11105 case TARGET_NR_set_robust_list:
11106 case TARGET_NR_get_robust_list:
11107 /* The ABI for supporting robust futexes has userspace pass
11108 * the kernel a pointer to a linked list which is updated by
11109 * userspace after the syscall; the list is walked by the kernel
11110 * when the thread exits. Since the linked list in QEMU guest
11111 * memory isn't a valid linked list for the host and we have
11112 * no way to reliably intercept the thread-death event, we can't
11113 * support these. Silently return ENOSYS so that guest userspace
11114 * falls back to a non-robust futex implementation (which should
11115 * be OK except in the corner case of the guest crashing while
11116 * holding a mutex that is shared with another process via
11117 * shared memory).
11119 goto unimplemented_nowarn;
11120 #endif
11122 #if defined(TARGET_NR_utimensat)
11123 case TARGET_NR_utimensat:
11125 struct timespec *tsp, ts[2];
11126 if (!arg3) {
11127 tsp = NULL;
11128 } else {
11129 target_to_host_timespec(ts, arg3);
11130 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11131 tsp = ts;
11133 if (!arg2)
11134 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11135 else {
11136 if (!(p = lock_user_string(arg2))) {
11137 ret = -TARGET_EFAULT;
11138 goto fail;
11140 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11141 unlock_user(p, arg2, 0);
11144 break;
11145 #endif
11146 case TARGET_NR_futex:
11147 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11148 break;
11149 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11150 case TARGET_NR_inotify_init:
11151 ret = get_errno(sys_inotify_init());
11152 break;
11153 #endif
11154 #ifdef CONFIG_INOTIFY1
11155 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11156 case TARGET_NR_inotify_init1:
11157 ret = get_errno(sys_inotify_init1(arg1));
11158 break;
11159 #endif
11160 #endif
11161 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11162 case TARGET_NR_inotify_add_watch:
11163 p = lock_user_string(arg2);
11164 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11165 unlock_user(p, arg2, 0);
11166 break;
11167 #endif
11168 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11169 case TARGET_NR_inotify_rm_watch:
11170 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11171 break;
11172 #endif
11174 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11175 case TARGET_NR_mq_open:
11177 struct mq_attr posix_mq_attr, *attrp;
11179 p = lock_user_string(arg1 - 1);
11180 if (arg4 != 0) {
11181 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11182 attrp = &posix_mq_attr;
11183 } else {
11184 attrp = 0;
11186 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11187 unlock_user (p, arg1, 0);
11189 break;
11191 case TARGET_NR_mq_unlink:
11192 p = lock_user_string(arg1 - 1);
11193 ret = get_errno(mq_unlink(p));
11194 unlock_user (p, arg1, 0);
11195 break;
11197 case TARGET_NR_mq_timedsend:
11199 struct timespec ts;
11201 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11202 if (arg5 != 0) {
11203 target_to_host_timespec(&ts, arg5);
11204 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11205 host_to_target_timespec(arg5, &ts);
11206 } else {
11207 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11209 unlock_user (p, arg2, arg3);
11211 break;
11213 case TARGET_NR_mq_timedreceive:
11215 struct timespec ts;
11216 unsigned int prio;
11218 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11219 if (arg5 != 0) {
11220 target_to_host_timespec(&ts, arg5);
11221 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11222 &prio, &ts));
11223 host_to_target_timespec(arg5, &ts);
11224 } else {
11225 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11226 &prio, NULL));
11228 unlock_user (p, arg2, arg3);
11229 if (arg4 != 0)
11230 put_user_u32(prio, arg4);
11232 break;
11234 /* Not implemented for now... */
11235 /* case TARGET_NR_mq_notify: */
11236 /* break; */
11238 case TARGET_NR_mq_getsetattr:
11240 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11241 ret = 0;
11242 if (arg3 != 0) {
11243 ret = mq_getattr(arg1, &posix_mq_attr_out);
11244 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11246 if (arg2 != 0) {
11247 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11248 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11252 break;
11253 #endif
11255 #ifdef CONFIG_SPLICE
11256 #ifdef TARGET_NR_tee
11257 case TARGET_NR_tee:
11259 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11261 break;
11262 #endif
11263 #ifdef TARGET_NR_splice
11264 case TARGET_NR_splice:
11266 loff_t loff_in, loff_out;
11267 loff_t *ploff_in = NULL, *ploff_out = NULL;
11268 if (arg2) {
11269 if (get_user_u64(loff_in, arg2)) {
11270 goto efault;
11272 ploff_in = &loff_in;
11274 if (arg4) {
11275 if (get_user_u64(loff_out, arg4)) {
11276 goto efault;
11278 ploff_out = &loff_out;
11280 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11281 if (arg2) {
11282 if (put_user_u64(loff_in, arg2)) {
11283 goto efault;
11286 if (arg4) {
11287 if (put_user_u64(loff_out, arg4)) {
11288 goto efault;
11292 break;
11293 #endif
11294 #ifdef TARGET_NR_vmsplice
11295 case TARGET_NR_vmsplice:
11297 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11298 if (vec != NULL) {
11299 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11300 unlock_iovec(vec, arg2, arg3, 0);
11301 } else {
11302 ret = -host_to_target_errno(errno);
11305 break;
11306 #endif
11307 #endif /* CONFIG_SPLICE */
11308 #ifdef CONFIG_EVENTFD
11309 #if defined(TARGET_NR_eventfd)
11310 case TARGET_NR_eventfd:
11311 ret = get_errno(eventfd(arg1, 0));
11312 fd_trans_unregister(ret);
11313 break;
11314 #endif
11315 #if defined(TARGET_NR_eventfd2)
11316 case TARGET_NR_eventfd2:
11318 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11319 if (arg2 & TARGET_O_NONBLOCK) {
11320 host_flags |= O_NONBLOCK;
11322 if (arg2 & TARGET_O_CLOEXEC) {
11323 host_flags |= O_CLOEXEC;
11325 ret = get_errno(eventfd(arg1, host_flags));
11326 fd_trans_unregister(ret);
11327 break;
11329 #endif
11330 #endif /* CONFIG_EVENTFD */
11331 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11332 case TARGET_NR_fallocate:
11333 #if TARGET_ABI_BITS == 32
11334 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11335 target_offset64(arg5, arg6)));
11336 #else
11337 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11338 #endif
11339 break;
11340 #endif
11341 #if defined(CONFIG_SYNC_FILE_RANGE)
11342 #if defined(TARGET_NR_sync_file_range)
11343 case TARGET_NR_sync_file_range:
11344 #if TARGET_ABI_BITS == 32
11345 #if defined(TARGET_MIPS)
11346 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11347 target_offset64(arg5, arg6), arg7));
11348 #else
11349 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11350 target_offset64(arg4, arg5), arg6));
11351 #endif /* !TARGET_MIPS */
11352 #else
11353 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11354 #endif
11355 break;
11356 #endif
11357 #if defined(TARGET_NR_sync_file_range2)
11358 case TARGET_NR_sync_file_range2:
11359 /* This is like sync_file_range but the arguments are reordered */
11360 #if TARGET_ABI_BITS == 32
11361 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11362 target_offset64(arg5, arg6), arg2));
11363 #else
11364 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11365 #endif
11366 break;
11367 #endif
11368 #endif
11369 #if defined(TARGET_NR_signalfd4)
11370 case TARGET_NR_signalfd4:
11371 ret = do_signalfd4(arg1, arg2, arg4);
11372 break;
11373 #endif
11374 #if defined(TARGET_NR_signalfd)
11375 case TARGET_NR_signalfd:
11376 ret = do_signalfd4(arg1, arg2, 0);
11377 break;
11378 #endif
11379 #if defined(CONFIG_EPOLL)
11380 #if defined(TARGET_NR_epoll_create)
11381 case TARGET_NR_epoll_create:
11382 ret = get_errno(epoll_create(arg1));
11383 break;
11384 #endif
11385 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11386 case TARGET_NR_epoll_create1:
11387 ret = get_errno(epoll_create1(arg1));
11388 break;
11389 #endif
11390 #if defined(TARGET_NR_epoll_ctl)
11391 case TARGET_NR_epoll_ctl:
11393 struct epoll_event ep;
11394 struct epoll_event *epp = 0;
11395 if (arg4) {
11396 struct target_epoll_event *target_ep;
11397 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11398 goto efault;
11400 ep.events = tswap32(target_ep->events);
11401 /* The epoll_data_t union is just opaque data to the kernel,
11402 * so we transfer all 64 bits across and need not worry what
11403 * actual data type it is.
11405 ep.data.u64 = tswap64(target_ep->data.u64);
11406 unlock_user_struct(target_ep, arg4, 0);
11407 epp = &ep;
11409 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11410 break;
11412 #endif
11414 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11415 #if defined(TARGET_NR_epoll_wait)
11416 case TARGET_NR_epoll_wait:
11417 #endif
11418 #if defined(TARGET_NR_epoll_pwait)
11419 case TARGET_NR_epoll_pwait:
11420 #endif
11422 struct target_epoll_event *target_ep;
11423 struct epoll_event *ep;
11424 int epfd = arg1;
11425 int maxevents = arg3;
11426 int timeout = arg4;
11428 target_ep = lock_user(VERIFY_WRITE, arg2,
11429 maxevents * sizeof(struct target_epoll_event), 1);
11430 if (!target_ep) {
11431 goto efault;
11434 ep = alloca(maxevents * sizeof(struct epoll_event));
11436 switch (num) {
11437 #if defined(TARGET_NR_epoll_pwait)
11438 case TARGET_NR_epoll_pwait:
11440 target_sigset_t *target_set;
11441 sigset_t _set, *set = &_set;
11443 if (arg5) {
11444 if (arg6 != sizeof(target_sigset_t)) {
11445 ret = -TARGET_EINVAL;
11446 break;
11449 target_set = lock_user(VERIFY_READ, arg5,
11450 sizeof(target_sigset_t), 1);
11451 if (!target_set) {
11452 unlock_user(target_ep, arg2, 0);
11453 goto efault;
11455 target_to_host_sigset(set, target_set);
11456 unlock_user(target_set, arg5, 0);
11457 } else {
11458 set = NULL;
11461 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11462 set, SIGSET_T_SIZE));
11463 break;
11465 #endif
11466 #if defined(TARGET_NR_epoll_wait)
11467 case TARGET_NR_epoll_wait:
11468 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11469 NULL, 0));
11470 break;
11471 #endif
11472 default:
11473 ret = -TARGET_ENOSYS;
11475 if (!is_error(ret)) {
11476 int i;
11477 for (i = 0; i < ret; i++) {
11478 target_ep[i].events = tswap32(ep[i].events);
11479 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11482 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11483 break;
11485 #endif
11486 #endif
11487 #ifdef TARGET_NR_prlimit64
11488 case TARGET_NR_prlimit64:
11490 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11491 struct target_rlimit64 *target_rnew, *target_rold;
11492 struct host_rlimit64 rnew, rold, *rnewp = 0;
11493 int resource = target_to_host_resource(arg2);
11494 if (arg3) {
11495 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11496 goto efault;
11498 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11499 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11500 unlock_user_struct(target_rnew, arg3, 0);
11501 rnewp = &rnew;
11504 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11505 if (!is_error(ret) && arg4) {
11506 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11507 goto efault;
11509 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11510 target_rold->rlim_max = tswap64(rold.rlim_max);
11511 unlock_user_struct(target_rold, arg4, 1);
11513 break;
11515 #endif
11516 #ifdef TARGET_NR_gethostname
11517 case TARGET_NR_gethostname:
11519 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11520 if (name) {
11521 ret = get_errno(gethostname(name, arg2));
11522 unlock_user(name, arg1, arg2);
11523 } else {
11524 ret = -TARGET_EFAULT;
11526 break;
11528 #endif
11529 #ifdef TARGET_NR_atomic_cmpxchg_32
11530 case TARGET_NR_atomic_cmpxchg_32:
11532 /* should use start_exclusive from main.c */
11533 abi_ulong mem_value;
11534 if (get_user_u32(mem_value, arg6)) {
11535 target_siginfo_t info;
11536 info.si_signo = SIGSEGV;
11537 info.si_errno = 0;
11538 info.si_code = TARGET_SEGV_MAPERR;
11539 info._sifields._sigfault._addr = arg6;
11540 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11541 ret = 0xdeadbeef;
11544 if (mem_value == arg2)
11545 put_user_u32(arg1, arg6);
11546 ret = mem_value;
11547 break;
11549 #endif
11550 #ifdef TARGET_NR_atomic_barrier
11551 case TARGET_NR_atomic_barrier:
11553 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11554 ret = 0;
11555 break;
11557 #endif
11559 #ifdef TARGET_NR_timer_create
11560 case TARGET_NR_timer_create:
11562 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11564 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11566 int clkid = arg1;
11567 int timer_index = next_free_host_timer();
11569 if (timer_index < 0) {
11570 ret = -TARGET_EAGAIN;
11571 } else {
11572 timer_t *phtimer = g_posix_timers + timer_index;
11574 if (arg2) {
11575 phost_sevp = &host_sevp;
11576 ret = target_to_host_sigevent(phost_sevp, arg2);
11577 if (ret != 0) {
11578 break;
11582 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11583 if (ret) {
11584 phtimer = NULL;
11585 } else {
11586 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11587 goto efault;
11591 break;
11593 #endif
11595 #ifdef TARGET_NR_timer_settime
11596 case TARGET_NR_timer_settime:
11598 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11599 * struct itimerspec * old_value */
11600 target_timer_t timerid = get_timer_id(arg1);
11602 if (timerid < 0) {
11603 ret = timerid;
11604 } else if (arg3 == 0) {
11605 ret = -TARGET_EINVAL;
11606 } else {
11607 timer_t htimer = g_posix_timers[timerid];
11608 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11610 target_to_host_itimerspec(&hspec_new, arg3);
11611 ret = get_errno(
11612 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11613 host_to_target_itimerspec(arg2, &hspec_old);
11615 break;
11617 #endif
11619 #ifdef TARGET_NR_timer_gettime
11620 case TARGET_NR_timer_gettime:
11622 /* args: timer_t timerid, struct itimerspec *curr_value */
11623 target_timer_t timerid = get_timer_id(arg1);
11625 if (timerid < 0) {
11626 ret = timerid;
11627 } else if (!arg2) {
11628 ret = -TARGET_EFAULT;
11629 } else {
11630 timer_t htimer = g_posix_timers[timerid];
11631 struct itimerspec hspec;
11632 ret = get_errno(timer_gettime(htimer, &hspec));
11634 if (host_to_target_itimerspec(arg2, &hspec)) {
11635 ret = -TARGET_EFAULT;
11638 break;
11640 #endif
11642 #ifdef TARGET_NR_timer_getoverrun
11643 case TARGET_NR_timer_getoverrun:
11645 /* args: timer_t timerid */
11646 target_timer_t timerid = get_timer_id(arg1);
11648 if (timerid < 0) {
11649 ret = timerid;
11650 } else {
11651 timer_t htimer = g_posix_timers[timerid];
11652 ret = get_errno(timer_getoverrun(htimer));
11654 fd_trans_unregister(ret);
11655 break;
11657 #endif
11659 #ifdef TARGET_NR_timer_delete
11660 case TARGET_NR_timer_delete:
11662 /* args: timer_t timerid */
11663 target_timer_t timerid = get_timer_id(arg1);
11665 if (timerid < 0) {
11666 ret = timerid;
11667 } else {
11668 timer_t htimer = g_posix_timers[timerid];
11669 ret = get_errno(timer_delete(htimer));
11670 g_posix_timers[timerid] = 0;
11672 break;
11674 #endif
11676 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11677 case TARGET_NR_timerfd_create:
11678 ret = get_errno(timerfd_create(arg1,
11679 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11680 break;
11681 #endif
11683 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11684 case TARGET_NR_timerfd_gettime:
11686 struct itimerspec its_curr;
11688 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11690 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11691 goto efault;
11694 break;
11695 #endif
11697 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11698 case TARGET_NR_timerfd_settime:
11700 struct itimerspec its_new, its_old, *p_new;
11702 if (arg3) {
11703 if (target_to_host_itimerspec(&its_new, arg3)) {
11704 goto efault;
11706 p_new = &its_new;
11707 } else {
11708 p_new = NULL;
11711 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11713 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11714 goto efault;
11717 break;
11718 #endif
11720 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11721 case TARGET_NR_ioprio_get:
11722 ret = get_errno(ioprio_get(arg1, arg2));
11723 break;
11724 #endif
11726 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11727 case TARGET_NR_ioprio_set:
11728 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11729 break;
11730 #endif
11732 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11733 case TARGET_NR_setns:
11734 ret = get_errno(setns(arg1, arg2));
11735 break;
11736 #endif
11737 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11738 case TARGET_NR_unshare:
11739 ret = get_errno(unshare(arg1));
11740 break;
11741 #endif
11743 default:
11744 unimplemented:
11745 gemu_log("qemu: Unsupported syscall: %d\n", num);
11746 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11747 unimplemented_nowarn:
11748 #endif
11749 ret = -TARGET_ENOSYS;
11750 break;
11752 fail:
11753 #ifdef DEBUG
11754 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11755 #endif
11756 if(do_strace)
11757 print_syscall_ret(num, ret);
11758 trace_guest_user_syscall_ret(cpu, num, ret);
11759 return ret;
11760 efault:
11761 ret = -TARGET_EFAULT;
11762 goto fail;