linux-user: Use safe_syscall wrapper for send* and recv* syscalls
[qemu/kevin.git] / linux-user / syscall.c
blobbcae62d068565b837c9d1faf4603f48731d6411b
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #endif
108 #include <linux/audit.h>
109 #include "linux_loop.h"
110 #include "uname.h"
112 #include "qemu.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 //#define DEBUG
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
132 #undef _syscall0
133 #undef _syscall1
134 #undef _syscall2
135 #undef _syscall3
136 #undef _syscall4
137 #undef _syscall5
138 #undef _syscall6
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
181 type6 arg6) \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 defined(__s390x__)
201 #define __NR__llseek __NR_lseek
202 #endif
204 /* Newer kernel ports have llseek() instead of _llseek() */
205 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
206 #define TARGET_NR__llseek TARGET_NR_llseek
207 #endif
209 #ifdef __NR_gettid
210 _syscall0(int, gettid)
211 #else
212 /* This is a replacement for the host gettid() and must return a host
213 errno. */
214 static int gettid(void) {
215 return -ENOSYS;
217 #endif
218 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
219 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
220 #endif
221 #if !defined(__NR_getdents) || \
222 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
223 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
224 #endif
225 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
226 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
227 loff_t *, res, uint, wh);
228 #endif
229 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
230 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
239 const struct timespec *,timeout,int *,uaddr2,int,val3)
240 #endif
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
246 unsigned long *, user_mask_ptr);
247 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
248 void *, arg);
249 _syscall2(int, capget, struct __user_cap_header_struct *, header,
250 struct __user_cap_data_struct *, data);
251 _syscall2(int, capset, struct __user_cap_header_struct *, header,
252 struct __user_cap_data_struct *, data);
253 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
254 _syscall2(int, ioprio_get, int, which, int, who)
255 #endif
256 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
257 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
258 #endif
259 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
260 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
261 #endif
263 static bitmask_transtbl fcntl_flags_tbl[] = {
264 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
265 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
266 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
267 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
268 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
269 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
270 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
271 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
272 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
273 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
274 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
275 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
276 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
279 #endif
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
282 #endif
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
285 #endif
286 #if defined(O_PATH)
287 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
288 #endif
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
292 #endif
293 { 0, 0, 0, 0 }
296 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
297 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
298 typedef struct TargetFdTrans {
299 TargetFdDataFunc host_to_target_data;
300 TargetFdDataFunc target_to_host_data;
301 TargetFdAddrFunc target_to_host_addr;
302 } TargetFdTrans;
304 static TargetFdTrans **target_fd_trans;
306 static unsigned int target_fd_max;
308 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
310 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
311 return target_fd_trans[fd]->target_to_host_data;
313 return NULL;
316 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
318 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
319 return target_fd_trans[fd]->host_to_target_data;
321 return NULL;
324 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
326 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
327 return target_fd_trans[fd]->target_to_host_addr;
329 return NULL;
332 static void fd_trans_register(int fd, TargetFdTrans *trans)
334 unsigned int oldmax;
336 if (fd >= target_fd_max) {
337 oldmax = target_fd_max;
338 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
339 target_fd_trans = g_renew(TargetFdTrans *,
340 target_fd_trans, target_fd_max);
341 memset((void *)(target_fd_trans + oldmax), 0,
342 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
344 target_fd_trans[fd] = trans;
347 static void fd_trans_unregister(int fd)
349 if (fd >= 0 && fd < target_fd_max) {
350 target_fd_trans[fd] = NULL;
354 static void fd_trans_dup(int oldfd, int newfd)
356 fd_trans_unregister(newfd);
357 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
358 fd_trans_register(newfd, target_fd_trans[oldfd]);
362 static int sys_getcwd1(char *buf, size_t size)
364 if (getcwd(buf, size) == NULL) {
365 /* getcwd() sets errno */
366 return (-1);
368 return strlen(buf)+1;
371 #ifdef TARGET_NR_utimensat
372 #ifdef CONFIG_UTIMENSAT
373 static int sys_utimensat(int dirfd, const char *pathname,
374 const struct timespec times[2], int flags)
376 if (pathname == NULL)
377 return futimens(dirfd, times);
378 else
379 return utimensat(dirfd, pathname, times, flags);
381 #elif defined(__NR_utimensat)
382 #define __NR_sys_utimensat __NR_utimensat
383 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
384 const struct timespec *,tsp,int,flags)
385 #else
386 static int sys_utimensat(int dirfd, const char *pathname,
387 const struct timespec times[2], int flags)
389 errno = ENOSYS;
390 return -1;
392 #endif
393 #endif /* TARGET_NR_utimensat */
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
401 return (inotify_init());
403 #endif
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
407 return (inotify_add_watch(fd, pathname, mask));
409 #endif
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
413 return (inotify_rm_watch(fd, wd));
415 #endif
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
420 return (inotify_init1(flags));
422 #endif
423 #endif
424 #else
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY */
432 #if defined(TARGET_NR_ppoll)
433 #ifndef __NR_ppoll
434 # define __NR_ppoll -1
435 #endif
436 #define __NR_sys_ppoll __NR_ppoll
437 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
438 struct timespec *, timeout, const sigset_t *, sigmask,
439 size_t, sigsetsize)
440 #endif
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449 uint64_t rlim_cur;
450 uint64_t rlim_max;
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
455 #endif
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
464 int k ;
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
469 return k;
472 return -1;
474 #endif
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env) {
479 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
486 * r3 as arg1 */
487 static inline int regpairs_aligned(void *cpu_env) { return 1; }
488 #else
489 static inline int regpairs_aligned(void *cpu_env) { return 0; }
490 #endif
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
504 [EAGAIN] = TARGET_EAGAIN,
505 [EIDRM] = TARGET_EIDRM,
506 [ECHRNG] = TARGET_ECHRNG,
507 [EL2NSYNC] = TARGET_EL2NSYNC,
508 [EL3HLT] = TARGET_EL3HLT,
509 [EL3RST] = TARGET_EL3RST,
510 [ELNRNG] = TARGET_ELNRNG,
511 [EUNATCH] = TARGET_EUNATCH,
512 [ENOCSI] = TARGET_ENOCSI,
513 [EL2HLT] = TARGET_EL2HLT,
514 [EDEADLK] = TARGET_EDEADLK,
515 [ENOLCK] = TARGET_ENOLCK,
516 [EBADE] = TARGET_EBADE,
517 [EBADR] = TARGET_EBADR,
518 [EXFULL] = TARGET_EXFULL,
519 [ENOANO] = TARGET_ENOANO,
520 [EBADRQC] = TARGET_EBADRQC,
521 [EBADSLT] = TARGET_EBADSLT,
522 [EBFONT] = TARGET_EBFONT,
523 [ENOSTR] = TARGET_ENOSTR,
524 [ENODATA] = TARGET_ENODATA,
525 [ETIME] = TARGET_ETIME,
526 [ENOSR] = TARGET_ENOSR,
527 [ENONET] = TARGET_ENONET,
528 [ENOPKG] = TARGET_ENOPKG,
529 [EREMOTE] = TARGET_EREMOTE,
530 [ENOLINK] = TARGET_ENOLINK,
531 [EADV] = TARGET_EADV,
532 [ESRMNT] = TARGET_ESRMNT,
533 [ECOMM] = TARGET_ECOMM,
534 [EPROTO] = TARGET_EPROTO,
535 [EDOTDOT] = TARGET_EDOTDOT,
536 [EMULTIHOP] = TARGET_EMULTIHOP,
537 [EBADMSG] = TARGET_EBADMSG,
538 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
539 [EOVERFLOW] = TARGET_EOVERFLOW,
540 [ENOTUNIQ] = TARGET_ENOTUNIQ,
541 [EBADFD] = TARGET_EBADFD,
542 [EREMCHG] = TARGET_EREMCHG,
543 [ELIBACC] = TARGET_ELIBACC,
544 [ELIBBAD] = TARGET_ELIBBAD,
545 [ELIBSCN] = TARGET_ELIBSCN,
546 [ELIBMAX] = TARGET_ELIBMAX,
547 [ELIBEXEC] = TARGET_ELIBEXEC,
548 [EILSEQ] = TARGET_EILSEQ,
549 [ENOSYS] = TARGET_ENOSYS,
550 [ELOOP] = TARGET_ELOOP,
551 [ERESTART] = TARGET_ERESTART,
552 [ESTRPIPE] = TARGET_ESTRPIPE,
553 [ENOTEMPTY] = TARGET_ENOTEMPTY,
554 [EUSERS] = TARGET_EUSERS,
555 [ENOTSOCK] = TARGET_ENOTSOCK,
556 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
557 [EMSGSIZE] = TARGET_EMSGSIZE,
558 [EPROTOTYPE] = TARGET_EPROTOTYPE,
559 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
560 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
561 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
562 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
563 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
564 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
565 [EADDRINUSE] = TARGET_EADDRINUSE,
566 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
567 [ENETDOWN] = TARGET_ENETDOWN,
568 [ENETUNREACH] = TARGET_ENETUNREACH,
569 [ENETRESET] = TARGET_ENETRESET,
570 [ECONNABORTED] = TARGET_ECONNABORTED,
571 [ECONNRESET] = TARGET_ECONNRESET,
572 [ENOBUFS] = TARGET_ENOBUFS,
573 [EISCONN] = TARGET_EISCONN,
574 [ENOTCONN] = TARGET_ENOTCONN,
575 [EUCLEAN] = TARGET_EUCLEAN,
576 [ENOTNAM] = TARGET_ENOTNAM,
577 [ENAVAIL] = TARGET_ENAVAIL,
578 [EISNAM] = TARGET_EISNAM,
579 [EREMOTEIO] = TARGET_EREMOTEIO,
580 [ESHUTDOWN] = TARGET_ESHUTDOWN,
581 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
582 [ETIMEDOUT] = TARGET_ETIMEDOUT,
583 [ECONNREFUSED] = TARGET_ECONNREFUSED,
584 [EHOSTDOWN] = TARGET_EHOSTDOWN,
585 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
586 [EALREADY] = TARGET_EALREADY,
587 [EINPROGRESS] = TARGET_EINPROGRESS,
588 [ESTALE] = TARGET_ESTALE,
589 [ECANCELED] = TARGET_ECANCELED,
590 [ENOMEDIUM] = TARGET_ENOMEDIUM,
591 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
592 #ifdef ENOKEY
593 [ENOKEY] = TARGET_ENOKEY,
594 #endif
595 #ifdef EKEYEXPIRED
596 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
597 #endif
598 #ifdef EKEYREVOKED
599 [EKEYREVOKED] = TARGET_EKEYREVOKED,
600 #endif
601 #ifdef EKEYREJECTED
602 [EKEYREJECTED] = TARGET_EKEYREJECTED,
603 #endif
604 #ifdef EOWNERDEAD
605 [EOWNERDEAD] = TARGET_EOWNERDEAD,
606 #endif
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
609 #endif
612 static inline int host_to_target_errno(int err)
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 host_to_target_errno_table[err]) {
616 return host_to_target_errno_table[err];
618 return err;
621 static inline int target_to_host_errno(int err)
623 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
624 target_to_host_errno_table[err]) {
625 return target_to_host_errno_table[err];
627 return err;
630 static inline abi_long get_errno(abi_long ret)
632 if (ret == -1)
633 return -host_to_target_errno(errno);
634 else
635 return ret;
638 static inline int is_error(abi_long ret)
640 return (abi_ulong)ret >= (abi_ulong)(-4096);
643 char *target_strerror(int err)
645 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
646 return NULL;
648 return strerror(target_to_host_errno(err));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
685 type5 arg5) \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
699 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
700 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
701 int, flags, mode_t, mode)
702 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
703 struct rusage *, rusage)
704 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
705 int, options, struct rusage *, rusage)
706 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
707 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
708 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
709 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710 const struct timespec *,timeout,int *,uaddr2,int,val3)
711 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
712 safe_syscall2(int, kill, pid_t, pid, int, sig)
713 safe_syscall2(int, tkill, int, tid, int, sig)
714 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
715 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
717 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
718 socklen_t, addrlen)
719 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
720 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
721 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
722 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
723 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
724 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
726 static inline int host_to_target_sock_type(int host_type)
728 int target_type;
730 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
731 case SOCK_DGRAM:
732 target_type = TARGET_SOCK_DGRAM;
733 break;
734 case SOCK_STREAM:
735 target_type = TARGET_SOCK_STREAM;
736 break;
737 default:
738 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
739 break;
742 #if defined(SOCK_CLOEXEC)
743 if (host_type & SOCK_CLOEXEC) {
744 target_type |= TARGET_SOCK_CLOEXEC;
746 #endif
748 #if defined(SOCK_NONBLOCK)
749 if (host_type & SOCK_NONBLOCK) {
750 target_type |= TARGET_SOCK_NONBLOCK;
752 #endif
754 return target_type;
757 static abi_ulong target_brk;
758 static abi_ulong target_original_brk;
759 static abi_ulong brk_page;
761 void target_set_brk(abi_ulong new_brk)
763 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
764 brk_page = HOST_PAGE_ALIGN(target_brk);
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
770 /* do_brk() must return target values and target errnos. */
771 abi_long do_brk(abi_ulong new_brk)
773 abi_long mapped_addr;
774 int new_alloc_size;
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
778 if (!new_brk) {
779 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
780 return target_brk;
782 if (new_brk < target_original_brk) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
784 target_brk);
785 return target_brk;
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk <= brk_page) {
791 /* Heap contents are initialized to zero, as for anonymous
792 * mapped pages. */
793 if (new_brk > target_brk) {
794 memset(g2h(target_brk), 0, new_brk - target_brk);
796 target_brk = new_brk;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
798 return target_brk;
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
807 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
808 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
809 PROT_READ|PROT_WRITE,
810 MAP_ANON|MAP_PRIVATE, 0, 0));
812 if (mapped_addr == brk_page) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
819 * then shrunken). */
820 memset(g2h(target_brk), 0, brk_page - target_brk);
822 target_brk = new_brk;
823 brk_page = HOST_PAGE_ALIGN(target_brk);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
825 target_brk);
826 return target_brk;
827 } else if (mapped_addr != -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
831 target_munmap(mapped_addr, new_alloc_size);
832 mapped_addr = -1;
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
835 else {
836 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM;
843 #endif
844 /* For everything else, return the previous break. */
845 return target_brk;
848 static inline abi_long copy_from_user_fdset(fd_set *fds,
849 abi_ulong target_fds_addr,
850 int n)
852 int i, nw, j, k;
853 abi_ulong b, *target_fds;
855 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
856 if (!(target_fds = lock_user(VERIFY_READ,
857 target_fds_addr,
858 sizeof(abi_ulong) * nw,
859 1)))
860 return -TARGET_EFAULT;
862 FD_ZERO(fds);
863 k = 0;
864 for (i = 0; i < nw; i++) {
865 /* grab the abi_ulong */
866 __get_user(b, &target_fds[i]);
867 for (j = 0; j < TARGET_ABI_BITS; j++) {
868 /* check the bit inside the abi_ulong */
869 if ((b >> j) & 1)
870 FD_SET(k, fds);
871 k++;
875 unlock_user(target_fds, target_fds_addr, 0);
877 return 0;
880 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
881 abi_ulong target_fds_addr,
882 int n)
884 if (target_fds_addr) {
885 if (copy_from_user_fdset(fds, target_fds_addr, n))
886 return -TARGET_EFAULT;
887 *fds_ptr = fds;
888 } else {
889 *fds_ptr = NULL;
891 return 0;
894 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
895 const fd_set *fds,
896 int n)
898 int i, nw, j, k;
899 abi_long v;
900 abi_ulong *target_fds;
902 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
903 if (!(target_fds = lock_user(VERIFY_WRITE,
904 target_fds_addr,
905 sizeof(abi_ulong) * nw,
906 0)))
907 return -TARGET_EFAULT;
909 k = 0;
910 for (i = 0; i < nw; i++) {
911 v = 0;
912 for (j = 0; j < TARGET_ABI_BITS; j++) {
913 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
914 k++;
916 __put_user(v, &target_fds[i]);
919 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
921 return 0;
924 #if defined(__alpha__)
925 #define HOST_HZ 1024
926 #else
927 #define HOST_HZ 100
928 #endif
930 static inline abi_long host_to_target_clock_t(long ticks)
932 #if HOST_HZ == TARGET_HZ
933 return ticks;
934 #else
935 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
936 #endif
939 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
940 const struct rusage *rusage)
942 struct target_rusage *target_rusage;
944 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
945 return -TARGET_EFAULT;
946 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
947 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
948 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
949 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
950 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
951 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
952 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
953 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
954 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
955 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
956 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
957 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
958 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
959 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
960 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
961 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
962 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
963 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
964 unlock_user_struct(target_rusage, target_addr, 1);
966 return 0;
969 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
971 abi_ulong target_rlim_swap;
972 rlim_t result;
974 target_rlim_swap = tswapal(target_rlim);
975 if (target_rlim_swap == TARGET_RLIM_INFINITY)
976 return RLIM_INFINITY;
978 result = target_rlim_swap;
979 if (target_rlim_swap != (rlim_t)result)
980 return RLIM_INFINITY;
982 return result;
985 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
987 abi_ulong target_rlim_swap;
988 abi_ulong result;
990 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
991 target_rlim_swap = TARGET_RLIM_INFINITY;
992 else
993 target_rlim_swap = rlim;
994 result = tswapal(target_rlim_swap);
996 return result;
999 static inline int target_to_host_resource(int code)
1001 switch (code) {
1002 case TARGET_RLIMIT_AS:
1003 return RLIMIT_AS;
1004 case TARGET_RLIMIT_CORE:
1005 return RLIMIT_CORE;
1006 case TARGET_RLIMIT_CPU:
1007 return RLIMIT_CPU;
1008 case TARGET_RLIMIT_DATA:
1009 return RLIMIT_DATA;
1010 case TARGET_RLIMIT_FSIZE:
1011 return RLIMIT_FSIZE;
1012 case TARGET_RLIMIT_LOCKS:
1013 return RLIMIT_LOCKS;
1014 case TARGET_RLIMIT_MEMLOCK:
1015 return RLIMIT_MEMLOCK;
1016 case TARGET_RLIMIT_MSGQUEUE:
1017 return RLIMIT_MSGQUEUE;
1018 case TARGET_RLIMIT_NICE:
1019 return RLIMIT_NICE;
1020 case TARGET_RLIMIT_NOFILE:
1021 return RLIMIT_NOFILE;
1022 case TARGET_RLIMIT_NPROC:
1023 return RLIMIT_NPROC;
1024 case TARGET_RLIMIT_RSS:
1025 return RLIMIT_RSS;
1026 case TARGET_RLIMIT_RTPRIO:
1027 return RLIMIT_RTPRIO;
1028 case TARGET_RLIMIT_SIGPENDING:
1029 return RLIMIT_SIGPENDING;
1030 case TARGET_RLIMIT_STACK:
1031 return RLIMIT_STACK;
1032 default:
1033 return code;
1037 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1038 abi_ulong target_tv_addr)
1040 struct target_timeval *target_tv;
1042 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1043 return -TARGET_EFAULT;
1045 __get_user(tv->tv_sec, &target_tv->tv_sec);
1046 __get_user(tv->tv_usec, &target_tv->tv_usec);
1048 unlock_user_struct(target_tv, target_tv_addr, 0);
1050 return 0;
1053 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1054 const struct timeval *tv)
1056 struct target_timeval *target_tv;
1058 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1059 return -TARGET_EFAULT;
1061 __put_user(tv->tv_sec, &target_tv->tv_sec);
1062 __put_user(tv->tv_usec, &target_tv->tv_usec);
1064 unlock_user_struct(target_tv, target_tv_addr, 1);
1066 return 0;
1069 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1070 abi_ulong target_tz_addr)
1072 struct target_timezone *target_tz;
1074 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1075 return -TARGET_EFAULT;
1078 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1079 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1081 unlock_user_struct(target_tz, target_tz_addr, 0);
1083 return 0;
1086 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1087 #include <mqueue.h>
1089 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1090 abi_ulong target_mq_attr_addr)
1092 struct target_mq_attr *target_mq_attr;
1094 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1095 target_mq_attr_addr, 1))
1096 return -TARGET_EFAULT;
1098 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1099 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1100 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1101 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1103 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1105 return 0;
1108 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1109 const struct mq_attr *attr)
1111 struct target_mq_attr *target_mq_attr;
1113 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1114 target_mq_attr_addr, 0))
1115 return -TARGET_EFAULT;
1117 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1118 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1119 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1120 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1122 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1124 return 0;
1126 #endif
1128 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1129 /* do_select() must return target values and target errnos. */
1130 static abi_long do_select(int n,
1131 abi_ulong rfd_addr, abi_ulong wfd_addr,
1132 abi_ulong efd_addr, abi_ulong target_tv_addr)
1134 fd_set rfds, wfds, efds;
1135 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1136 struct timeval tv;
1137 struct timespec ts, *ts_ptr;
1138 abi_long ret;
1140 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1141 if (ret) {
1142 return ret;
1144 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1145 if (ret) {
1146 return ret;
1148 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1149 if (ret) {
1150 return ret;
1153 if (target_tv_addr) {
1154 if (copy_from_user_timeval(&tv, target_tv_addr))
1155 return -TARGET_EFAULT;
1156 ts.tv_sec = tv.tv_sec;
1157 ts.tv_nsec = tv.tv_usec * 1000;
1158 ts_ptr = &ts;
1159 } else {
1160 ts_ptr = NULL;
1163 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1164 ts_ptr, NULL));
1166 if (!is_error(ret)) {
1167 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1168 return -TARGET_EFAULT;
1169 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1170 return -TARGET_EFAULT;
1171 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1172 return -TARGET_EFAULT;
1174 if (target_tv_addr) {
1175 tv.tv_sec = ts.tv_sec;
1176 tv.tv_usec = ts.tv_nsec / 1000;
1177 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1178 return -TARGET_EFAULT;
1183 return ret;
1185 #endif
1187 static abi_long do_pipe2(int host_pipe[], int flags)
1189 #ifdef CONFIG_PIPE2
1190 return pipe2(host_pipe, flags);
1191 #else
1192 return -ENOSYS;
1193 #endif
1196 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1197 int flags, int is_pipe2)
1199 int host_pipe[2];
1200 abi_long ret;
1201 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1203 if (is_error(ret))
1204 return get_errno(ret);
1206 /* Several targets have special calling conventions for the original
1207 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1208 if (!is_pipe2) {
1209 #if defined(TARGET_ALPHA)
1210 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1211 return host_pipe[0];
1212 #elif defined(TARGET_MIPS)
1213 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1214 return host_pipe[0];
1215 #elif defined(TARGET_SH4)
1216 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1217 return host_pipe[0];
1218 #elif defined(TARGET_SPARC)
1219 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1220 return host_pipe[0];
1221 #endif
1224 if (put_user_s32(host_pipe[0], pipedes)
1225 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1226 return -TARGET_EFAULT;
1227 return get_errno(ret);
1230 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1231 abi_ulong target_addr,
1232 socklen_t len)
1234 struct target_ip_mreqn *target_smreqn;
1236 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1237 if (!target_smreqn)
1238 return -TARGET_EFAULT;
1239 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1240 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1241 if (len == sizeof(struct target_ip_mreqn))
1242 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1243 unlock_user(target_smreqn, target_addr, 0);
1245 return 0;
1248 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1249 abi_ulong target_addr,
1250 socklen_t len)
1252 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1253 sa_family_t sa_family;
1254 struct target_sockaddr *target_saddr;
1256 if (fd_trans_target_to_host_addr(fd)) {
1257 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1260 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1261 if (!target_saddr)
1262 return -TARGET_EFAULT;
1264 sa_family = tswap16(target_saddr->sa_family);
1266 /* Oops. The caller might send a incomplete sun_path; sun_path
1267 * must be terminated by \0 (see the manual page), but
1268 * unfortunately it is quite common to specify sockaddr_un
1269 * length as "strlen(x->sun_path)" while it should be
1270 * "strlen(...) + 1". We'll fix that here if needed.
1271 * Linux kernel has a similar feature.
1274 if (sa_family == AF_UNIX) {
1275 if (len < unix_maxlen && len > 0) {
1276 char *cp = (char*)target_saddr;
1278 if ( cp[len-1] && !cp[len] )
1279 len++;
1281 if (len > unix_maxlen)
1282 len = unix_maxlen;
1285 memcpy(addr, target_saddr, len);
1286 addr->sa_family = sa_family;
1287 if (sa_family == AF_NETLINK) {
1288 struct sockaddr_nl *nladdr;
1290 nladdr = (struct sockaddr_nl *)addr;
1291 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1292 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1293 } else if (sa_family == AF_PACKET) {
1294 struct target_sockaddr_ll *lladdr;
1296 lladdr = (struct target_sockaddr_ll *)addr;
1297 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1298 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1300 unlock_user(target_saddr, target_addr, 0);
1302 return 0;
1305 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1306 struct sockaddr *addr,
1307 socklen_t len)
1309 struct target_sockaddr *target_saddr;
1311 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1312 if (!target_saddr)
1313 return -TARGET_EFAULT;
1314 memcpy(target_saddr, addr, len);
1315 target_saddr->sa_family = tswap16(addr->sa_family);
1316 if (addr->sa_family == AF_NETLINK) {
1317 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1318 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1319 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1321 unlock_user(target_saddr, target_addr, len);
1323 return 0;
1326 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1327 struct target_msghdr *target_msgh)
1329 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1330 abi_long msg_controllen;
1331 abi_ulong target_cmsg_addr;
1332 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1333 socklen_t space = 0;
1335 msg_controllen = tswapal(target_msgh->msg_controllen);
1336 if (msg_controllen < sizeof (struct target_cmsghdr))
1337 goto the_end;
1338 target_cmsg_addr = tswapal(target_msgh->msg_control);
1339 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1340 target_cmsg_start = target_cmsg;
1341 if (!target_cmsg)
1342 return -TARGET_EFAULT;
1344 while (cmsg && target_cmsg) {
1345 void *data = CMSG_DATA(cmsg);
1346 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1348 int len = tswapal(target_cmsg->cmsg_len)
1349 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1351 space += CMSG_SPACE(len);
1352 if (space > msgh->msg_controllen) {
1353 space -= CMSG_SPACE(len);
1354 /* This is a QEMU bug, since we allocated the payload
1355 * area ourselves (unlike overflow in host-to-target
1356 * conversion, which is just the guest giving us a buffer
1357 * that's too small). It can't happen for the payload types
1358 * we currently support; if it becomes an issue in future
1359 * we would need to improve our allocation strategy to
1360 * something more intelligent than "twice the size of the
1361 * target buffer we're reading from".
1363 gemu_log("Host cmsg overflow\n");
1364 break;
1367 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1368 cmsg->cmsg_level = SOL_SOCKET;
1369 } else {
1370 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1372 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1373 cmsg->cmsg_len = CMSG_LEN(len);
1375 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1376 int *fd = (int *)data;
1377 int *target_fd = (int *)target_data;
1378 int i, numfds = len / sizeof(int);
1380 for (i = 0; i < numfds; i++) {
1381 __get_user(fd[i], target_fd + i);
1383 } else if (cmsg->cmsg_level == SOL_SOCKET
1384 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1385 struct ucred *cred = (struct ucred *)data;
1386 struct target_ucred *target_cred =
1387 (struct target_ucred *)target_data;
1389 __get_user(cred->pid, &target_cred->pid);
1390 __get_user(cred->uid, &target_cred->uid);
1391 __get_user(cred->gid, &target_cred->gid);
1392 } else {
1393 gemu_log("Unsupported ancillary data: %d/%d\n",
1394 cmsg->cmsg_level, cmsg->cmsg_type);
1395 memcpy(data, target_data, len);
1398 cmsg = CMSG_NXTHDR(msgh, cmsg);
1399 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1400 target_cmsg_start);
1402 unlock_user(target_cmsg, target_cmsg_addr, 0);
1403 the_end:
1404 msgh->msg_controllen = space;
1405 return 0;
1408 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1409 struct msghdr *msgh)
1411 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1412 abi_long msg_controllen;
1413 abi_ulong target_cmsg_addr;
1414 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1415 socklen_t space = 0;
1417 msg_controllen = tswapal(target_msgh->msg_controllen);
1418 if (msg_controllen < sizeof (struct target_cmsghdr))
1419 goto the_end;
1420 target_cmsg_addr = tswapal(target_msgh->msg_control);
1421 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1422 target_cmsg_start = target_cmsg;
1423 if (!target_cmsg)
1424 return -TARGET_EFAULT;
1426 while (cmsg && target_cmsg) {
1427 void *data = CMSG_DATA(cmsg);
1428 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1430 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1431 int tgt_len, tgt_space;
1433 /* We never copy a half-header but may copy half-data;
1434 * this is Linux's behaviour in put_cmsg(). Note that
1435 * truncation here is a guest problem (which we report
1436 * to the guest via the CTRUNC bit), unlike truncation
1437 * in target_to_host_cmsg, which is a QEMU bug.
1439 if (msg_controllen < sizeof(struct cmsghdr)) {
1440 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1441 break;
1444 if (cmsg->cmsg_level == SOL_SOCKET) {
1445 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1446 } else {
1447 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1449 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1451 tgt_len = TARGET_CMSG_LEN(len);
1453 /* Payload types which need a different size of payload on
1454 * the target must adjust tgt_len here.
1456 switch (cmsg->cmsg_level) {
1457 case SOL_SOCKET:
1458 switch (cmsg->cmsg_type) {
1459 case SO_TIMESTAMP:
1460 tgt_len = sizeof(struct target_timeval);
1461 break;
1462 default:
1463 break;
1465 default:
1466 break;
1469 if (msg_controllen < tgt_len) {
1470 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1471 tgt_len = msg_controllen;
1474 /* We must now copy-and-convert len bytes of payload
1475 * into tgt_len bytes of destination space. Bear in mind
1476 * that in both source and destination we may be dealing
1477 * with a truncated value!
1479 switch (cmsg->cmsg_level) {
1480 case SOL_SOCKET:
1481 switch (cmsg->cmsg_type) {
1482 case SCM_RIGHTS:
1484 int *fd = (int *)data;
1485 int *target_fd = (int *)target_data;
1486 int i, numfds = tgt_len / sizeof(int);
1488 for (i = 0; i < numfds; i++) {
1489 __put_user(fd[i], target_fd + i);
1491 break;
1493 case SO_TIMESTAMP:
1495 struct timeval *tv = (struct timeval *)data;
1496 struct target_timeval *target_tv =
1497 (struct target_timeval *)target_data;
1499 if (len != sizeof(struct timeval) ||
1500 tgt_len != sizeof(struct target_timeval)) {
1501 goto unimplemented;
1504 /* copy struct timeval to target */
1505 __put_user(tv->tv_sec, &target_tv->tv_sec);
1506 __put_user(tv->tv_usec, &target_tv->tv_usec);
1507 break;
1509 case SCM_CREDENTIALS:
1511 struct ucred *cred = (struct ucred *)data;
1512 struct target_ucred *target_cred =
1513 (struct target_ucred *)target_data;
1515 __put_user(cred->pid, &target_cred->pid);
1516 __put_user(cred->uid, &target_cred->uid);
1517 __put_user(cred->gid, &target_cred->gid);
1518 break;
1520 default:
1521 goto unimplemented;
1523 break;
1525 default:
1526 unimplemented:
1527 gemu_log("Unsupported ancillary data: %d/%d\n",
1528 cmsg->cmsg_level, cmsg->cmsg_type);
1529 memcpy(target_data, data, MIN(len, tgt_len));
1530 if (tgt_len > len) {
1531 memset(target_data + len, 0, tgt_len - len);
1535 target_cmsg->cmsg_len = tswapal(tgt_len);
1536 tgt_space = TARGET_CMSG_SPACE(len);
1537 if (msg_controllen < tgt_space) {
1538 tgt_space = msg_controllen;
1540 msg_controllen -= tgt_space;
1541 space += tgt_space;
1542 cmsg = CMSG_NXTHDR(msgh, cmsg);
1543 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1544 target_cmsg_start);
1546 unlock_user(target_cmsg, target_cmsg_addr, space);
1547 the_end:
1548 target_msgh->msg_controllen = tswapal(space);
1549 return 0;
1552 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1554 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1555 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1556 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1557 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1558 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1561 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1562 size_t len,
1563 abi_long (*host_to_target_nlmsg)
1564 (struct nlmsghdr *))
1566 uint32_t nlmsg_len;
1567 abi_long ret;
1569 while (len > sizeof(struct nlmsghdr)) {
1571 nlmsg_len = nlh->nlmsg_len;
1572 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1573 nlmsg_len > len) {
1574 break;
1577 switch (nlh->nlmsg_type) {
1578 case NLMSG_DONE:
1579 tswap_nlmsghdr(nlh);
1580 return 0;
1581 case NLMSG_NOOP:
1582 break;
1583 case NLMSG_ERROR:
1585 struct nlmsgerr *e = NLMSG_DATA(nlh);
1586 e->error = tswap32(e->error);
1587 tswap_nlmsghdr(&e->msg);
1588 tswap_nlmsghdr(nlh);
1589 return 0;
1591 default:
1592 ret = host_to_target_nlmsg(nlh);
1593 if (ret < 0) {
1594 tswap_nlmsghdr(nlh);
1595 return ret;
1597 break;
1599 tswap_nlmsghdr(nlh);
1600 len -= NLMSG_ALIGN(nlmsg_len);
1601 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1603 return 0;
1606 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1607 size_t len,
1608 abi_long (*target_to_host_nlmsg)
1609 (struct nlmsghdr *))
1611 int ret;
1613 while (len > sizeof(struct nlmsghdr)) {
1614 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1615 tswap32(nlh->nlmsg_len) > len) {
1616 break;
1618 tswap_nlmsghdr(nlh);
1619 switch (nlh->nlmsg_type) {
1620 case NLMSG_DONE:
1621 return 0;
1622 case NLMSG_NOOP:
1623 break;
1624 case NLMSG_ERROR:
1626 struct nlmsgerr *e = NLMSG_DATA(nlh);
1627 e->error = tswap32(e->error);
1628 tswap_nlmsghdr(&e->msg);
1630 default:
1631 ret = target_to_host_nlmsg(nlh);
1632 if (ret < 0) {
1633 return ret;
1636 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1637 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1639 return 0;
1642 #ifdef CONFIG_RTNETLINK
1643 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1644 size_t len,
1645 abi_long (*host_to_target_rtattr)
1646 (struct rtattr *))
1648 unsigned short rta_len;
1649 abi_long ret;
1651 while (len > sizeof(struct rtattr)) {
1652 rta_len = rtattr->rta_len;
1653 if (rta_len < sizeof(struct rtattr) ||
1654 rta_len > len) {
1655 break;
1657 ret = host_to_target_rtattr(rtattr);
1658 rtattr->rta_len = tswap16(rtattr->rta_len);
1659 rtattr->rta_type = tswap16(rtattr->rta_type);
1660 if (ret < 0) {
1661 return ret;
1663 len -= RTA_ALIGN(rta_len);
1664 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1666 return 0;
1669 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1671 uint32_t *u32;
1672 struct rtnl_link_stats *st;
1673 struct rtnl_link_stats64 *st64;
1674 struct rtnl_link_ifmap *map;
1676 switch (rtattr->rta_type) {
1677 /* binary stream */
1678 case IFLA_ADDRESS:
1679 case IFLA_BROADCAST:
1680 /* string */
1681 case IFLA_IFNAME:
1682 case IFLA_QDISC:
1683 break;
1684 /* uin8_t */
1685 case IFLA_OPERSTATE:
1686 case IFLA_LINKMODE:
1687 case IFLA_CARRIER:
1688 case IFLA_PROTO_DOWN:
1689 break;
1690 /* uint32_t */
1691 case IFLA_MTU:
1692 case IFLA_LINK:
1693 case IFLA_WEIGHT:
1694 case IFLA_TXQLEN:
1695 case IFLA_CARRIER_CHANGES:
1696 case IFLA_NUM_RX_QUEUES:
1697 case IFLA_NUM_TX_QUEUES:
1698 case IFLA_PROMISCUITY:
1699 case IFLA_EXT_MASK:
1700 case IFLA_LINK_NETNSID:
1701 case IFLA_GROUP:
1702 case IFLA_MASTER:
1703 case IFLA_NUM_VF:
1704 u32 = RTA_DATA(rtattr);
1705 *u32 = tswap32(*u32);
1706 break;
1707 /* struct rtnl_link_stats */
1708 case IFLA_STATS:
1709 st = RTA_DATA(rtattr);
1710 st->rx_packets = tswap32(st->rx_packets);
1711 st->tx_packets = tswap32(st->tx_packets);
1712 st->rx_bytes = tswap32(st->rx_bytes);
1713 st->tx_bytes = tswap32(st->tx_bytes);
1714 st->rx_errors = tswap32(st->rx_errors);
1715 st->tx_errors = tswap32(st->tx_errors);
1716 st->rx_dropped = tswap32(st->rx_dropped);
1717 st->tx_dropped = tswap32(st->tx_dropped);
1718 st->multicast = tswap32(st->multicast);
1719 st->collisions = tswap32(st->collisions);
1721 /* detailed rx_errors: */
1722 st->rx_length_errors = tswap32(st->rx_length_errors);
1723 st->rx_over_errors = tswap32(st->rx_over_errors);
1724 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1725 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1726 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1727 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1729 /* detailed tx_errors */
1730 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1731 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1732 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1733 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1734 st->tx_window_errors = tswap32(st->tx_window_errors);
1736 /* for cslip etc */
1737 st->rx_compressed = tswap32(st->rx_compressed);
1738 st->tx_compressed = tswap32(st->tx_compressed);
1739 break;
1740 /* struct rtnl_link_stats64 */
1741 case IFLA_STATS64:
1742 st64 = RTA_DATA(rtattr);
1743 st64->rx_packets = tswap64(st64->rx_packets);
1744 st64->tx_packets = tswap64(st64->tx_packets);
1745 st64->rx_bytes = tswap64(st64->rx_bytes);
1746 st64->tx_bytes = tswap64(st64->tx_bytes);
1747 st64->rx_errors = tswap64(st64->rx_errors);
1748 st64->tx_errors = tswap64(st64->tx_errors);
1749 st64->rx_dropped = tswap64(st64->rx_dropped);
1750 st64->tx_dropped = tswap64(st64->tx_dropped);
1751 st64->multicast = tswap64(st64->multicast);
1752 st64->collisions = tswap64(st64->collisions);
1754 /* detailed rx_errors: */
1755 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1756 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1757 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1758 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1759 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1760 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1762 /* detailed tx_errors */
1763 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1764 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1765 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1766 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1767 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1769 /* for cslip etc */
1770 st64->rx_compressed = tswap64(st64->rx_compressed);
1771 st64->tx_compressed = tswap64(st64->tx_compressed);
1772 break;
1773 /* struct rtnl_link_ifmap */
1774 case IFLA_MAP:
1775 map = RTA_DATA(rtattr);
1776 map->mem_start = tswap64(map->mem_start);
1777 map->mem_end = tswap64(map->mem_end);
1778 map->base_addr = tswap64(map->base_addr);
1779 map->irq = tswap16(map->irq);
1780 break;
1781 /* nested */
1782 case IFLA_AF_SPEC:
1783 case IFLA_LINKINFO:
1784 /* FIXME: implement nested type */
1785 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1786 break;
1787 default:
1788 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1789 break;
1791 return 0;
1794 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1796 uint32_t *u32;
1797 struct ifa_cacheinfo *ci;
1799 switch (rtattr->rta_type) {
1800 /* binary: depends on family type */
1801 case IFA_ADDRESS:
1802 case IFA_LOCAL:
1803 break;
1804 /* string */
1805 case IFA_LABEL:
1806 break;
1807 /* u32 */
1808 case IFA_FLAGS:
1809 case IFA_BROADCAST:
1810 u32 = RTA_DATA(rtattr);
1811 *u32 = tswap32(*u32);
1812 break;
1813 /* struct ifa_cacheinfo */
1814 case IFA_CACHEINFO:
1815 ci = RTA_DATA(rtattr);
1816 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1817 ci->ifa_valid = tswap32(ci->ifa_valid);
1818 ci->cstamp = tswap32(ci->cstamp);
1819 ci->tstamp = tswap32(ci->tstamp);
1820 break;
1821 default:
1822 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1823 break;
1825 return 0;
1828 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1830 uint32_t *u32;
1831 switch (rtattr->rta_type) {
1832 /* binary: depends on family type */
1833 case RTA_GATEWAY:
1834 case RTA_DST:
1835 case RTA_PREFSRC:
1836 break;
1837 /* u32 */
1838 case RTA_PRIORITY:
1839 case RTA_TABLE:
1840 case RTA_OIF:
1841 u32 = RTA_DATA(rtattr);
1842 *u32 = tswap32(*u32);
1843 break;
1844 default:
1845 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1846 break;
1848 return 0;
1851 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1852 uint32_t rtattr_len)
1854 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1855 host_to_target_data_link_rtattr);
1858 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1859 uint32_t rtattr_len)
1861 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1862 host_to_target_data_addr_rtattr);
1865 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1866 uint32_t rtattr_len)
1868 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1869 host_to_target_data_route_rtattr);
1872 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1874 uint32_t nlmsg_len;
1875 struct ifinfomsg *ifi;
1876 struct ifaddrmsg *ifa;
1877 struct rtmsg *rtm;
1879 nlmsg_len = nlh->nlmsg_len;
1880 switch (nlh->nlmsg_type) {
1881 case RTM_NEWLINK:
1882 case RTM_DELLINK:
1883 case RTM_GETLINK:
1884 ifi = NLMSG_DATA(nlh);
1885 ifi->ifi_type = tswap16(ifi->ifi_type);
1886 ifi->ifi_index = tswap32(ifi->ifi_index);
1887 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1888 ifi->ifi_change = tswap32(ifi->ifi_change);
1889 host_to_target_link_rtattr(IFLA_RTA(ifi),
1890 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1891 break;
1892 case RTM_NEWADDR:
1893 case RTM_DELADDR:
1894 case RTM_GETADDR:
1895 ifa = NLMSG_DATA(nlh);
1896 ifa->ifa_index = tswap32(ifa->ifa_index);
1897 host_to_target_addr_rtattr(IFA_RTA(ifa),
1898 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1899 break;
1900 case RTM_NEWROUTE:
1901 case RTM_DELROUTE:
1902 case RTM_GETROUTE:
1903 rtm = NLMSG_DATA(nlh);
1904 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1905 host_to_target_route_rtattr(RTM_RTA(rtm),
1906 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1907 break;
1908 default:
1909 return -TARGET_EINVAL;
1911 return 0;
1914 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1915 size_t len)
1917 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1920 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1921 size_t len,
1922 abi_long (*target_to_host_rtattr)
1923 (struct rtattr *))
1925 abi_long ret;
1927 while (len >= sizeof(struct rtattr)) {
1928 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
1929 tswap16(rtattr->rta_len) > len) {
1930 break;
1932 rtattr->rta_len = tswap16(rtattr->rta_len);
1933 rtattr->rta_type = tswap16(rtattr->rta_type);
1934 ret = target_to_host_rtattr(rtattr);
1935 if (ret < 0) {
1936 return ret;
1938 len -= RTA_ALIGN(rtattr->rta_len);
1939 rtattr = (struct rtattr *)(((char *)rtattr) +
1940 RTA_ALIGN(rtattr->rta_len));
1942 return 0;
1945 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
1947 switch (rtattr->rta_type) {
1948 default:
1949 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
1950 break;
1952 return 0;
1955 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
1957 switch (rtattr->rta_type) {
1958 /* binary: depends on family type */
1959 case IFA_LOCAL:
1960 case IFA_ADDRESS:
1961 break;
1962 default:
1963 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
1964 break;
1966 return 0;
1969 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
1971 uint32_t *u32;
1972 switch (rtattr->rta_type) {
1973 /* binary: depends on family type */
1974 case RTA_DST:
1975 case RTA_SRC:
1976 case RTA_GATEWAY:
1977 break;
1978 /* u32 */
1979 case RTA_OIF:
1980 u32 = RTA_DATA(rtattr);
1981 *u32 = tswap32(*u32);
1982 break;
1983 default:
1984 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
1985 break;
1987 return 0;
1990 static void target_to_host_link_rtattr(struct rtattr *rtattr,
1991 uint32_t rtattr_len)
1993 target_to_host_for_each_rtattr(rtattr, rtattr_len,
1994 target_to_host_data_link_rtattr);
1997 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
1998 uint32_t rtattr_len)
2000 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2001 target_to_host_data_addr_rtattr);
2004 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2005 uint32_t rtattr_len)
2007 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2008 target_to_host_data_route_rtattr);
2011 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2013 struct ifinfomsg *ifi;
2014 struct ifaddrmsg *ifa;
2015 struct rtmsg *rtm;
2017 switch (nlh->nlmsg_type) {
2018 case RTM_GETLINK:
2019 break;
2020 case RTM_NEWLINK:
2021 case RTM_DELLINK:
2022 ifi = NLMSG_DATA(nlh);
2023 ifi->ifi_type = tswap16(ifi->ifi_type);
2024 ifi->ifi_index = tswap32(ifi->ifi_index);
2025 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2026 ifi->ifi_change = tswap32(ifi->ifi_change);
2027 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2028 NLMSG_LENGTH(sizeof(*ifi)));
2029 break;
2030 case RTM_GETADDR:
2031 case RTM_NEWADDR:
2032 case RTM_DELADDR:
2033 ifa = NLMSG_DATA(nlh);
2034 ifa->ifa_index = tswap32(ifa->ifa_index);
2035 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2036 NLMSG_LENGTH(sizeof(*ifa)));
2037 break;
2038 case RTM_GETROUTE:
2039 break;
2040 case RTM_NEWROUTE:
2041 case RTM_DELROUTE:
2042 rtm = NLMSG_DATA(nlh);
2043 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2044 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2045 NLMSG_LENGTH(sizeof(*rtm)));
2046 break;
2047 default:
2048 return -TARGET_EOPNOTSUPP;
2050 return 0;
2053 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2055 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2057 #endif /* CONFIG_RTNETLINK */
2059 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2061 switch (nlh->nlmsg_type) {
2062 default:
2063 gemu_log("Unknown host audit message type %d\n",
2064 nlh->nlmsg_type);
2065 return -TARGET_EINVAL;
2067 return 0;
2070 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2071 size_t len)
2073 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2076 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2078 switch (nlh->nlmsg_type) {
2079 case AUDIT_USER:
2080 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2081 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2082 break;
2083 default:
2084 gemu_log("Unknown target audit message type %d\n",
2085 nlh->nlmsg_type);
2086 return -TARGET_EINVAL;
2089 return 0;
2092 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2094 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2097 /* do_setsockopt() Must return target values and target errnos. */
2098 static abi_long do_setsockopt(int sockfd, int level, int optname,
2099 abi_ulong optval_addr, socklen_t optlen)
2101 abi_long ret;
2102 int val;
2103 struct ip_mreqn *ip_mreq;
2104 struct ip_mreq_source *ip_mreq_source;
2106 switch(level) {
2107 case SOL_TCP:
2108 /* TCP options all take an 'int' value. */
2109 if (optlen < sizeof(uint32_t))
2110 return -TARGET_EINVAL;
2112 if (get_user_u32(val, optval_addr))
2113 return -TARGET_EFAULT;
2114 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2115 break;
2116 case SOL_IP:
2117 switch(optname) {
2118 case IP_TOS:
2119 case IP_TTL:
2120 case IP_HDRINCL:
2121 case IP_ROUTER_ALERT:
2122 case IP_RECVOPTS:
2123 case IP_RETOPTS:
2124 case IP_PKTINFO:
2125 case IP_MTU_DISCOVER:
2126 case IP_RECVERR:
2127 case IP_RECVTOS:
2128 #ifdef IP_FREEBIND
2129 case IP_FREEBIND:
2130 #endif
2131 case IP_MULTICAST_TTL:
2132 case IP_MULTICAST_LOOP:
2133 val = 0;
2134 if (optlen >= sizeof(uint32_t)) {
2135 if (get_user_u32(val, optval_addr))
2136 return -TARGET_EFAULT;
2137 } else if (optlen >= 1) {
2138 if (get_user_u8(val, optval_addr))
2139 return -TARGET_EFAULT;
2141 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2142 break;
2143 case IP_ADD_MEMBERSHIP:
2144 case IP_DROP_MEMBERSHIP:
2145 if (optlen < sizeof (struct target_ip_mreq) ||
2146 optlen > sizeof (struct target_ip_mreqn))
2147 return -TARGET_EINVAL;
2149 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2150 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2151 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2152 break;
2154 case IP_BLOCK_SOURCE:
2155 case IP_UNBLOCK_SOURCE:
2156 case IP_ADD_SOURCE_MEMBERSHIP:
2157 case IP_DROP_SOURCE_MEMBERSHIP:
2158 if (optlen != sizeof (struct target_ip_mreq_source))
2159 return -TARGET_EINVAL;
2161 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2162 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2163 unlock_user (ip_mreq_source, optval_addr, 0);
2164 break;
2166 default:
2167 goto unimplemented;
2169 break;
2170 case SOL_IPV6:
2171 switch (optname) {
2172 case IPV6_MTU_DISCOVER:
2173 case IPV6_MTU:
2174 case IPV6_V6ONLY:
2175 case IPV6_RECVPKTINFO:
2176 val = 0;
2177 if (optlen < sizeof(uint32_t)) {
2178 return -TARGET_EINVAL;
2180 if (get_user_u32(val, optval_addr)) {
2181 return -TARGET_EFAULT;
2183 ret = get_errno(setsockopt(sockfd, level, optname,
2184 &val, sizeof(val)));
2185 break;
2186 default:
2187 goto unimplemented;
2189 break;
2190 case SOL_RAW:
2191 switch (optname) {
2192 case ICMP_FILTER:
2193 /* struct icmp_filter takes an u32 value */
2194 if (optlen < sizeof(uint32_t)) {
2195 return -TARGET_EINVAL;
2198 if (get_user_u32(val, optval_addr)) {
2199 return -TARGET_EFAULT;
2201 ret = get_errno(setsockopt(sockfd, level, optname,
2202 &val, sizeof(val)));
2203 break;
2205 default:
2206 goto unimplemented;
2208 break;
2209 case TARGET_SOL_SOCKET:
2210 switch (optname) {
2211 case TARGET_SO_RCVTIMEO:
2213 struct timeval tv;
2215 optname = SO_RCVTIMEO;
2217 set_timeout:
2218 if (optlen != sizeof(struct target_timeval)) {
2219 return -TARGET_EINVAL;
2222 if (copy_from_user_timeval(&tv, optval_addr)) {
2223 return -TARGET_EFAULT;
2226 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2227 &tv, sizeof(tv)));
2228 return ret;
2230 case TARGET_SO_SNDTIMEO:
2231 optname = SO_SNDTIMEO;
2232 goto set_timeout;
2233 case TARGET_SO_ATTACH_FILTER:
2235 struct target_sock_fprog *tfprog;
2236 struct target_sock_filter *tfilter;
2237 struct sock_fprog fprog;
2238 struct sock_filter *filter;
2239 int i;
2241 if (optlen != sizeof(*tfprog)) {
2242 return -TARGET_EINVAL;
2244 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2245 return -TARGET_EFAULT;
2247 if (!lock_user_struct(VERIFY_READ, tfilter,
2248 tswapal(tfprog->filter), 0)) {
2249 unlock_user_struct(tfprog, optval_addr, 1);
2250 return -TARGET_EFAULT;
2253 fprog.len = tswap16(tfprog->len);
2254 filter = g_try_new(struct sock_filter, fprog.len);
2255 if (filter == NULL) {
2256 unlock_user_struct(tfilter, tfprog->filter, 1);
2257 unlock_user_struct(tfprog, optval_addr, 1);
2258 return -TARGET_ENOMEM;
2260 for (i = 0; i < fprog.len; i++) {
2261 filter[i].code = tswap16(tfilter[i].code);
2262 filter[i].jt = tfilter[i].jt;
2263 filter[i].jf = tfilter[i].jf;
2264 filter[i].k = tswap32(tfilter[i].k);
2266 fprog.filter = filter;
2268 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2269 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2270 g_free(filter);
2272 unlock_user_struct(tfilter, tfprog->filter, 1);
2273 unlock_user_struct(tfprog, optval_addr, 1);
2274 return ret;
2276 case TARGET_SO_BINDTODEVICE:
2278 char *dev_ifname, *addr_ifname;
2280 if (optlen > IFNAMSIZ - 1) {
2281 optlen = IFNAMSIZ - 1;
2283 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2284 if (!dev_ifname) {
2285 return -TARGET_EFAULT;
2287 optname = SO_BINDTODEVICE;
2288 addr_ifname = alloca(IFNAMSIZ);
2289 memcpy(addr_ifname, dev_ifname, optlen);
2290 addr_ifname[optlen] = 0;
2291 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2292 addr_ifname, optlen));
2293 unlock_user (dev_ifname, optval_addr, 0);
2294 return ret;
2296 /* Options with 'int' argument. */
2297 case TARGET_SO_DEBUG:
2298 optname = SO_DEBUG;
2299 break;
2300 case TARGET_SO_REUSEADDR:
2301 optname = SO_REUSEADDR;
2302 break;
2303 case TARGET_SO_TYPE:
2304 optname = SO_TYPE;
2305 break;
2306 case TARGET_SO_ERROR:
2307 optname = SO_ERROR;
2308 break;
2309 case TARGET_SO_DONTROUTE:
2310 optname = SO_DONTROUTE;
2311 break;
2312 case TARGET_SO_BROADCAST:
2313 optname = SO_BROADCAST;
2314 break;
2315 case TARGET_SO_SNDBUF:
2316 optname = SO_SNDBUF;
2317 break;
2318 case TARGET_SO_SNDBUFFORCE:
2319 optname = SO_SNDBUFFORCE;
2320 break;
2321 case TARGET_SO_RCVBUF:
2322 optname = SO_RCVBUF;
2323 break;
2324 case TARGET_SO_RCVBUFFORCE:
2325 optname = SO_RCVBUFFORCE;
2326 break;
2327 case TARGET_SO_KEEPALIVE:
2328 optname = SO_KEEPALIVE;
2329 break;
2330 case TARGET_SO_OOBINLINE:
2331 optname = SO_OOBINLINE;
2332 break;
2333 case TARGET_SO_NO_CHECK:
2334 optname = SO_NO_CHECK;
2335 break;
2336 case TARGET_SO_PRIORITY:
2337 optname = SO_PRIORITY;
2338 break;
2339 #ifdef SO_BSDCOMPAT
2340 case TARGET_SO_BSDCOMPAT:
2341 optname = SO_BSDCOMPAT;
2342 break;
2343 #endif
2344 case TARGET_SO_PASSCRED:
2345 optname = SO_PASSCRED;
2346 break;
2347 case TARGET_SO_PASSSEC:
2348 optname = SO_PASSSEC;
2349 break;
2350 case TARGET_SO_TIMESTAMP:
2351 optname = SO_TIMESTAMP;
2352 break;
2353 case TARGET_SO_RCVLOWAT:
2354 optname = SO_RCVLOWAT;
2355 break;
2356 break;
2357 default:
2358 goto unimplemented;
2360 if (optlen < sizeof(uint32_t))
2361 return -TARGET_EINVAL;
2363 if (get_user_u32(val, optval_addr))
2364 return -TARGET_EFAULT;
2365 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2366 break;
2367 default:
2368 unimplemented:
2369 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2370 ret = -TARGET_ENOPROTOOPT;
2372 return ret;
2375 /* do_getsockopt() Must return target values and target errnos. */
2376 static abi_long do_getsockopt(int sockfd, int level, int optname,
2377 abi_ulong optval_addr, abi_ulong optlen)
2379 abi_long ret;
2380 int len, val;
2381 socklen_t lv;
2383 switch(level) {
2384 case TARGET_SOL_SOCKET:
2385 level = SOL_SOCKET;
2386 switch (optname) {
2387 /* These don't just return a single integer */
2388 case TARGET_SO_LINGER:
2389 case TARGET_SO_RCVTIMEO:
2390 case TARGET_SO_SNDTIMEO:
2391 case TARGET_SO_PEERNAME:
2392 goto unimplemented;
2393 case TARGET_SO_PEERCRED: {
2394 struct ucred cr;
2395 socklen_t crlen;
2396 struct target_ucred *tcr;
2398 if (get_user_u32(len, optlen)) {
2399 return -TARGET_EFAULT;
2401 if (len < 0) {
2402 return -TARGET_EINVAL;
2405 crlen = sizeof(cr);
2406 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2407 &cr, &crlen));
2408 if (ret < 0) {
2409 return ret;
2411 if (len > crlen) {
2412 len = crlen;
2414 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2415 return -TARGET_EFAULT;
2417 __put_user(cr.pid, &tcr->pid);
2418 __put_user(cr.uid, &tcr->uid);
2419 __put_user(cr.gid, &tcr->gid);
2420 unlock_user_struct(tcr, optval_addr, 1);
2421 if (put_user_u32(len, optlen)) {
2422 return -TARGET_EFAULT;
2424 break;
2426 /* Options with 'int' argument. */
2427 case TARGET_SO_DEBUG:
2428 optname = SO_DEBUG;
2429 goto int_case;
2430 case TARGET_SO_REUSEADDR:
2431 optname = SO_REUSEADDR;
2432 goto int_case;
2433 case TARGET_SO_TYPE:
2434 optname = SO_TYPE;
2435 goto int_case;
2436 case TARGET_SO_ERROR:
2437 optname = SO_ERROR;
2438 goto int_case;
2439 case TARGET_SO_DONTROUTE:
2440 optname = SO_DONTROUTE;
2441 goto int_case;
2442 case TARGET_SO_BROADCAST:
2443 optname = SO_BROADCAST;
2444 goto int_case;
2445 case TARGET_SO_SNDBUF:
2446 optname = SO_SNDBUF;
2447 goto int_case;
2448 case TARGET_SO_RCVBUF:
2449 optname = SO_RCVBUF;
2450 goto int_case;
2451 case TARGET_SO_KEEPALIVE:
2452 optname = SO_KEEPALIVE;
2453 goto int_case;
2454 case TARGET_SO_OOBINLINE:
2455 optname = SO_OOBINLINE;
2456 goto int_case;
2457 case TARGET_SO_NO_CHECK:
2458 optname = SO_NO_CHECK;
2459 goto int_case;
2460 case TARGET_SO_PRIORITY:
2461 optname = SO_PRIORITY;
2462 goto int_case;
2463 #ifdef SO_BSDCOMPAT
2464 case TARGET_SO_BSDCOMPAT:
2465 optname = SO_BSDCOMPAT;
2466 goto int_case;
2467 #endif
2468 case TARGET_SO_PASSCRED:
2469 optname = SO_PASSCRED;
2470 goto int_case;
2471 case TARGET_SO_TIMESTAMP:
2472 optname = SO_TIMESTAMP;
2473 goto int_case;
2474 case TARGET_SO_RCVLOWAT:
2475 optname = SO_RCVLOWAT;
2476 goto int_case;
2477 case TARGET_SO_ACCEPTCONN:
2478 optname = SO_ACCEPTCONN;
2479 goto int_case;
2480 default:
2481 goto int_case;
2483 break;
2484 case SOL_TCP:
2485 /* TCP options all take an 'int' value. */
2486 int_case:
2487 if (get_user_u32(len, optlen))
2488 return -TARGET_EFAULT;
2489 if (len < 0)
2490 return -TARGET_EINVAL;
2491 lv = sizeof(lv);
2492 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2493 if (ret < 0)
2494 return ret;
2495 if (optname == SO_TYPE) {
2496 val = host_to_target_sock_type(val);
2498 if (len > lv)
2499 len = lv;
2500 if (len == 4) {
2501 if (put_user_u32(val, optval_addr))
2502 return -TARGET_EFAULT;
2503 } else {
2504 if (put_user_u8(val, optval_addr))
2505 return -TARGET_EFAULT;
2507 if (put_user_u32(len, optlen))
2508 return -TARGET_EFAULT;
2509 break;
2510 case SOL_IP:
2511 switch(optname) {
2512 case IP_TOS:
2513 case IP_TTL:
2514 case IP_HDRINCL:
2515 case IP_ROUTER_ALERT:
2516 case IP_RECVOPTS:
2517 case IP_RETOPTS:
2518 case IP_PKTINFO:
2519 case IP_MTU_DISCOVER:
2520 case IP_RECVERR:
2521 case IP_RECVTOS:
2522 #ifdef IP_FREEBIND
2523 case IP_FREEBIND:
2524 #endif
2525 case IP_MULTICAST_TTL:
2526 case IP_MULTICAST_LOOP:
2527 if (get_user_u32(len, optlen))
2528 return -TARGET_EFAULT;
2529 if (len < 0)
2530 return -TARGET_EINVAL;
2531 lv = sizeof(lv);
2532 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2533 if (ret < 0)
2534 return ret;
2535 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2536 len = 1;
2537 if (put_user_u32(len, optlen)
2538 || put_user_u8(val, optval_addr))
2539 return -TARGET_EFAULT;
2540 } else {
2541 if (len > sizeof(int))
2542 len = sizeof(int);
2543 if (put_user_u32(len, optlen)
2544 || put_user_u32(val, optval_addr))
2545 return -TARGET_EFAULT;
2547 break;
2548 default:
2549 ret = -TARGET_ENOPROTOOPT;
2550 break;
2552 break;
2553 default:
2554 unimplemented:
2555 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2556 level, optname);
2557 ret = -TARGET_EOPNOTSUPP;
2558 break;
2560 return ret;
2563 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2564 int count, int copy)
2566 struct target_iovec *target_vec;
2567 struct iovec *vec;
2568 abi_ulong total_len, max_len;
2569 int i;
2570 int err = 0;
2571 bool bad_address = false;
2573 if (count == 0) {
2574 errno = 0;
2575 return NULL;
2577 if (count < 0 || count > IOV_MAX) {
2578 errno = EINVAL;
2579 return NULL;
2582 vec = g_try_new0(struct iovec, count);
2583 if (vec == NULL) {
2584 errno = ENOMEM;
2585 return NULL;
2588 target_vec = lock_user(VERIFY_READ, target_addr,
2589 count * sizeof(struct target_iovec), 1);
2590 if (target_vec == NULL) {
2591 err = EFAULT;
2592 goto fail2;
2595 /* ??? If host page size > target page size, this will result in a
2596 value larger than what we can actually support. */
2597 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2598 total_len = 0;
2600 for (i = 0; i < count; i++) {
2601 abi_ulong base = tswapal(target_vec[i].iov_base);
2602 abi_long len = tswapal(target_vec[i].iov_len);
2604 if (len < 0) {
2605 err = EINVAL;
2606 goto fail;
2607 } else if (len == 0) {
2608 /* Zero length pointer is ignored. */
2609 vec[i].iov_base = 0;
2610 } else {
2611 vec[i].iov_base = lock_user(type, base, len, copy);
2612 /* If the first buffer pointer is bad, this is a fault. But
2613 * subsequent bad buffers will result in a partial write; this
2614 * is realized by filling the vector with null pointers and
2615 * zero lengths. */
2616 if (!vec[i].iov_base) {
2617 if (i == 0) {
2618 err = EFAULT;
2619 goto fail;
2620 } else {
2621 bad_address = true;
2624 if (bad_address) {
2625 len = 0;
2627 if (len > max_len - total_len) {
2628 len = max_len - total_len;
2631 vec[i].iov_len = len;
2632 total_len += len;
2635 unlock_user(target_vec, target_addr, 0);
2636 return vec;
2638 fail:
2639 while (--i >= 0) {
2640 if (tswapal(target_vec[i].iov_len) > 0) {
2641 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2644 unlock_user(target_vec, target_addr, 0);
2645 fail2:
2646 g_free(vec);
2647 errno = err;
2648 return NULL;
2651 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2652 int count, int copy)
2654 struct target_iovec *target_vec;
2655 int i;
2657 target_vec = lock_user(VERIFY_READ, target_addr,
2658 count * sizeof(struct target_iovec), 1);
2659 if (target_vec) {
2660 for (i = 0; i < count; i++) {
2661 abi_ulong base = tswapal(target_vec[i].iov_base);
2662 abi_long len = tswapal(target_vec[i].iov_len);
2663 if (len < 0) {
2664 break;
2666 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2668 unlock_user(target_vec, target_addr, 0);
2671 g_free(vec);
2674 static inline int target_to_host_sock_type(int *type)
2676 int host_type = 0;
2677 int target_type = *type;
2679 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2680 case TARGET_SOCK_DGRAM:
2681 host_type = SOCK_DGRAM;
2682 break;
2683 case TARGET_SOCK_STREAM:
2684 host_type = SOCK_STREAM;
2685 break;
2686 default:
2687 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2688 break;
2690 if (target_type & TARGET_SOCK_CLOEXEC) {
2691 #if defined(SOCK_CLOEXEC)
2692 host_type |= SOCK_CLOEXEC;
2693 #else
2694 return -TARGET_EINVAL;
2695 #endif
2697 if (target_type & TARGET_SOCK_NONBLOCK) {
2698 #if defined(SOCK_NONBLOCK)
2699 host_type |= SOCK_NONBLOCK;
2700 #elif !defined(O_NONBLOCK)
2701 return -TARGET_EINVAL;
2702 #endif
2704 *type = host_type;
2705 return 0;
2708 /* Try to emulate socket type flags after socket creation. */
2709 static int sock_flags_fixup(int fd, int target_type)
2711 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2712 if (target_type & TARGET_SOCK_NONBLOCK) {
2713 int flags = fcntl(fd, F_GETFL);
2714 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2715 close(fd);
2716 return -TARGET_EINVAL;
2719 #endif
2720 return fd;
2723 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2724 abi_ulong target_addr,
2725 socklen_t len)
2727 struct sockaddr *addr = host_addr;
2728 struct target_sockaddr *target_saddr;
2730 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2731 if (!target_saddr) {
2732 return -TARGET_EFAULT;
2735 memcpy(addr, target_saddr, len);
2736 addr->sa_family = tswap16(target_saddr->sa_family);
2737 /* spkt_protocol is big-endian */
2739 unlock_user(target_saddr, target_addr, 0);
2740 return 0;
2743 static TargetFdTrans target_packet_trans = {
2744 .target_to_host_addr = packet_target_to_host_sockaddr,
2747 #ifdef CONFIG_RTNETLINK
2748 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2750 return target_to_host_nlmsg_route(buf, len);
2753 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2755 return host_to_target_nlmsg_route(buf, len);
2758 static TargetFdTrans target_netlink_route_trans = {
2759 .target_to_host_data = netlink_route_target_to_host,
2760 .host_to_target_data = netlink_route_host_to_target,
2762 #endif /* CONFIG_RTNETLINK */
2764 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2766 return target_to_host_nlmsg_audit(buf, len);
2769 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2771 return host_to_target_nlmsg_audit(buf, len);
2774 static TargetFdTrans target_netlink_audit_trans = {
2775 .target_to_host_data = netlink_audit_target_to_host,
2776 .host_to_target_data = netlink_audit_host_to_target,
2779 /* do_socket() Must return target values and target errnos. */
2780 static abi_long do_socket(int domain, int type, int protocol)
2782 int target_type = type;
2783 int ret;
2785 ret = target_to_host_sock_type(&type);
2786 if (ret) {
2787 return ret;
2790 if (domain == PF_NETLINK && !(
2791 #ifdef CONFIG_RTNETLINK
2792 protocol == NETLINK_ROUTE ||
2793 #endif
2794 protocol == NETLINK_KOBJECT_UEVENT ||
2795 protocol == NETLINK_AUDIT)) {
2796 return -EPFNOSUPPORT;
2799 if (domain == AF_PACKET ||
2800 (domain == AF_INET && type == SOCK_PACKET)) {
2801 protocol = tswap16(protocol);
2804 ret = get_errno(socket(domain, type, protocol));
2805 if (ret >= 0) {
2806 ret = sock_flags_fixup(ret, target_type);
2807 if (type == SOCK_PACKET) {
2808 /* Manage an obsolete case :
2809 * if socket type is SOCK_PACKET, bind by name
2811 fd_trans_register(ret, &target_packet_trans);
2812 } else if (domain == PF_NETLINK) {
2813 switch (protocol) {
2814 #ifdef CONFIG_RTNETLINK
2815 case NETLINK_ROUTE:
2816 fd_trans_register(ret, &target_netlink_route_trans);
2817 break;
2818 #endif
2819 case NETLINK_KOBJECT_UEVENT:
2820 /* nothing to do: messages are strings */
2821 break;
2822 case NETLINK_AUDIT:
2823 fd_trans_register(ret, &target_netlink_audit_trans);
2824 break;
2825 default:
2826 g_assert_not_reached();
2830 return ret;
2833 /* do_bind() Must return target values and target errnos. */
2834 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2835 socklen_t addrlen)
2837 void *addr;
2838 abi_long ret;
2840 if ((int)addrlen < 0) {
2841 return -TARGET_EINVAL;
2844 addr = alloca(addrlen+1);
2846 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2847 if (ret)
2848 return ret;
2850 return get_errno(bind(sockfd, addr, addrlen));
2853 /* do_connect() Must return target values and target errnos. */
2854 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2855 socklen_t addrlen)
2857 void *addr;
2858 abi_long ret;
2860 if ((int)addrlen < 0) {
2861 return -TARGET_EINVAL;
2864 addr = alloca(addrlen+1);
2866 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2867 if (ret)
2868 return ret;
2870 return get_errno(safe_connect(sockfd, addr, addrlen));
2873 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2874 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2875 int flags, int send)
2877 abi_long ret, len;
2878 struct msghdr msg;
2879 int count;
2880 struct iovec *vec;
2881 abi_ulong target_vec;
2883 if (msgp->msg_name) {
2884 msg.msg_namelen = tswap32(msgp->msg_namelen);
2885 msg.msg_name = alloca(msg.msg_namelen+1);
2886 ret = target_to_host_sockaddr(fd, msg.msg_name,
2887 tswapal(msgp->msg_name),
2888 msg.msg_namelen);
2889 if (ret) {
2890 goto out2;
2892 } else {
2893 msg.msg_name = NULL;
2894 msg.msg_namelen = 0;
2896 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2897 msg.msg_control = alloca(msg.msg_controllen);
2898 msg.msg_flags = tswap32(msgp->msg_flags);
2900 count = tswapal(msgp->msg_iovlen);
2901 target_vec = tswapal(msgp->msg_iov);
2902 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2903 target_vec, count, send);
2904 if (vec == NULL) {
2905 ret = -host_to_target_errno(errno);
2906 goto out2;
2908 msg.msg_iovlen = count;
2909 msg.msg_iov = vec;
2911 if (send) {
2912 if (fd_trans_target_to_host_data(fd)) {
2913 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2914 msg.msg_iov->iov_len);
2915 } else {
2916 ret = target_to_host_cmsg(&msg, msgp);
2918 if (ret == 0) {
2919 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2921 } else {
2922 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2923 if (!is_error(ret)) {
2924 len = ret;
2925 if (fd_trans_host_to_target_data(fd)) {
2926 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2927 msg.msg_iov->iov_len);
2928 } else {
2929 ret = host_to_target_cmsg(msgp, &msg);
2931 if (!is_error(ret)) {
2932 msgp->msg_namelen = tswap32(msg.msg_namelen);
2933 if (msg.msg_name != NULL) {
2934 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2935 msg.msg_name, msg.msg_namelen);
2936 if (ret) {
2937 goto out;
2941 ret = len;
2946 out:
2947 unlock_iovec(vec, target_vec, count, !send);
2948 out2:
2949 return ret;
2952 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2953 int flags, int send)
2955 abi_long ret;
2956 struct target_msghdr *msgp;
2958 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2959 msgp,
2960 target_msg,
2961 send ? 1 : 0)) {
2962 return -TARGET_EFAULT;
2964 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2965 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2966 return ret;
2969 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2970 * so it might not have this *mmsg-specific flag either.
2972 #ifndef MSG_WAITFORONE
2973 #define MSG_WAITFORONE 0x10000
2974 #endif
2976 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2977 unsigned int vlen, unsigned int flags,
2978 int send)
2980 struct target_mmsghdr *mmsgp;
2981 abi_long ret = 0;
2982 int i;
2984 if (vlen > UIO_MAXIOV) {
2985 vlen = UIO_MAXIOV;
2988 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2989 if (!mmsgp) {
2990 return -TARGET_EFAULT;
2993 for (i = 0; i < vlen; i++) {
2994 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2995 if (is_error(ret)) {
2996 break;
2998 mmsgp[i].msg_len = tswap32(ret);
2999 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3000 if (flags & MSG_WAITFORONE) {
3001 flags |= MSG_DONTWAIT;
3005 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3007 /* Return number of datagrams sent if we sent any at all;
3008 * otherwise return the error.
3010 if (i) {
3011 return i;
3013 return ret;
3016 /* If we don't have a system accept4() then just call accept.
3017 * The callsites to do_accept4() will ensure that they don't
3018 * pass a non-zero flags argument in this config.
3020 #ifndef CONFIG_ACCEPT4
3021 static inline int accept4(int sockfd, struct sockaddr *addr,
3022 socklen_t *addrlen, int flags)
3024 assert(flags == 0);
3025 return accept(sockfd, addr, addrlen);
3027 #endif
3029 /* do_accept4() Must return target values and target errnos. */
3030 static abi_long do_accept4(int fd, abi_ulong target_addr,
3031 abi_ulong target_addrlen_addr, int flags)
3033 socklen_t addrlen;
3034 void *addr;
3035 abi_long ret;
3036 int host_flags;
3038 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3040 if (target_addr == 0) {
3041 return get_errno(accept4(fd, NULL, NULL, host_flags));
3044 /* linux returns EINVAL if addrlen pointer is invalid */
3045 if (get_user_u32(addrlen, target_addrlen_addr))
3046 return -TARGET_EINVAL;
3048 if ((int)addrlen < 0) {
3049 return -TARGET_EINVAL;
3052 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3053 return -TARGET_EINVAL;
3055 addr = alloca(addrlen);
3057 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
3058 if (!is_error(ret)) {
3059 host_to_target_sockaddr(target_addr, addr, addrlen);
3060 if (put_user_u32(addrlen, target_addrlen_addr))
3061 ret = -TARGET_EFAULT;
3063 return ret;
3066 /* do_getpeername() Must return target values and target errnos. */
3067 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3068 abi_ulong target_addrlen_addr)
3070 socklen_t addrlen;
3071 void *addr;
3072 abi_long ret;
3074 if (get_user_u32(addrlen, target_addrlen_addr))
3075 return -TARGET_EFAULT;
3077 if ((int)addrlen < 0) {
3078 return -TARGET_EINVAL;
3081 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3082 return -TARGET_EFAULT;
3084 addr = alloca(addrlen);
3086 ret = get_errno(getpeername(fd, addr, &addrlen));
3087 if (!is_error(ret)) {
3088 host_to_target_sockaddr(target_addr, addr, addrlen);
3089 if (put_user_u32(addrlen, target_addrlen_addr))
3090 ret = -TARGET_EFAULT;
3092 return ret;
3095 /* do_getsockname() Must return target values and target errnos. */
3096 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3097 abi_ulong target_addrlen_addr)
3099 socklen_t addrlen;
3100 void *addr;
3101 abi_long ret;
3103 if (get_user_u32(addrlen, target_addrlen_addr))
3104 return -TARGET_EFAULT;
3106 if ((int)addrlen < 0) {
3107 return -TARGET_EINVAL;
3110 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3111 return -TARGET_EFAULT;
3113 addr = alloca(addrlen);
3115 ret = get_errno(getsockname(fd, addr, &addrlen));
3116 if (!is_error(ret)) {
3117 host_to_target_sockaddr(target_addr, addr, addrlen);
3118 if (put_user_u32(addrlen, target_addrlen_addr))
3119 ret = -TARGET_EFAULT;
3121 return ret;
3124 /* do_socketpair() Must return target values and target errnos. */
3125 static abi_long do_socketpair(int domain, int type, int protocol,
3126 abi_ulong target_tab_addr)
3128 int tab[2];
3129 abi_long ret;
3131 target_to_host_sock_type(&type);
3133 ret = get_errno(socketpair(domain, type, protocol, tab));
3134 if (!is_error(ret)) {
3135 if (put_user_s32(tab[0], target_tab_addr)
3136 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3137 ret = -TARGET_EFAULT;
3139 return ret;
3142 /* do_sendto() Must return target values and target errnos. */
3143 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3144 abi_ulong target_addr, socklen_t addrlen)
3146 void *addr;
3147 void *host_msg;
3148 abi_long ret;
3150 if ((int)addrlen < 0) {
3151 return -TARGET_EINVAL;
3154 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3155 if (!host_msg)
3156 return -TARGET_EFAULT;
3157 if (fd_trans_target_to_host_data(fd)) {
3158 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3159 if (ret < 0) {
3160 unlock_user(host_msg, msg, 0);
3161 return ret;
3164 if (target_addr) {
3165 addr = alloca(addrlen+1);
3166 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3167 if (ret) {
3168 unlock_user(host_msg, msg, 0);
3169 return ret;
3171 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3172 } else {
3173 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3175 unlock_user(host_msg, msg, 0);
3176 return ret;
3179 /* do_recvfrom() Must return target values and target errnos. */
3180 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3181 abi_ulong target_addr,
3182 abi_ulong target_addrlen)
3184 socklen_t addrlen;
3185 void *addr;
3186 void *host_msg;
3187 abi_long ret;
3189 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3190 if (!host_msg)
3191 return -TARGET_EFAULT;
3192 if (target_addr) {
3193 if (get_user_u32(addrlen, target_addrlen)) {
3194 ret = -TARGET_EFAULT;
3195 goto fail;
3197 if ((int)addrlen < 0) {
3198 ret = -TARGET_EINVAL;
3199 goto fail;
3201 addr = alloca(addrlen);
3202 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3203 addr, &addrlen));
3204 } else {
3205 addr = NULL; /* To keep compiler quiet. */
3206 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3208 if (!is_error(ret)) {
3209 if (target_addr) {
3210 host_to_target_sockaddr(target_addr, addr, addrlen);
3211 if (put_user_u32(addrlen, target_addrlen)) {
3212 ret = -TARGET_EFAULT;
3213 goto fail;
3216 unlock_user(host_msg, msg, len);
3217 } else {
3218 fail:
3219 unlock_user(host_msg, msg, 0);
3221 return ret;
3224 #ifdef TARGET_NR_socketcall
3225 /* do_socketcall() Must return target values and target errnos. */
3226 static abi_long do_socketcall(int num, abi_ulong vptr)
3228 static const unsigned ac[] = { /* number of arguments per call */
3229 [SOCKOP_socket] = 3, /* domain, type, protocol */
3230 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3231 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3232 [SOCKOP_listen] = 2, /* sockfd, backlog */
3233 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3234 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3235 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3236 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3237 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3238 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3239 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3240 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3241 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3242 [SOCKOP_shutdown] = 2, /* sockfd, how */
3243 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3244 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3245 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3246 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3247 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3248 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3250 abi_long a[6]; /* max 6 args */
3252 /* first, collect the arguments in a[] according to ac[] */
3253 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3254 unsigned i;
3255 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3256 for (i = 0; i < ac[num]; ++i) {
3257 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3258 return -TARGET_EFAULT;
3263 /* now when we have the args, actually handle the call */
3264 switch (num) {
3265 case SOCKOP_socket: /* domain, type, protocol */
3266 return do_socket(a[0], a[1], a[2]);
3267 case SOCKOP_bind: /* sockfd, addr, addrlen */
3268 return do_bind(a[0], a[1], a[2]);
3269 case SOCKOP_connect: /* sockfd, addr, addrlen */
3270 return do_connect(a[0], a[1], a[2]);
3271 case SOCKOP_listen: /* sockfd, backlog */
3272 return get_errno(listen(a[0], a[1]));
3273 case SOCKOP_accept: /* sockfd, addr, addrlen */
3274 return do_accept4(a[0], a[1], a[2], 0);
3275 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3276 return do_accept4(a[0], a[1], a[2], a[3]);
3277 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3278 return do_getsockname(a[0], a[1], a[2]);
3279 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3280 return do_getpeername(a[0], a[1], a[2]);
3281 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3282 return do_socketpair(a[0], a[1], a[2], a[3]);
3283 case SOCKOP_send: /* sockfd, msg, len, flags */
3284 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3285 case SOCKOP_recv: /* sockfd, msg, len, flags */
3286 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3287 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3288 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3289 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3290 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3291 case SOCKOP_shutdown: /* sockfd, how */
3292 return get_errno(shutdown(a[0], a[1]));
3293 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3294 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3295 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3296 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3297 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3298 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3299 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3300 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3301 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3302 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3303 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3304 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3305 default:
3306 gemu_log("Unsupported socketcall: %d\n", num);
3307 return -TARGET_ENOSYS;
3310 #endif
3312 #define N_SHM_REGIONS 32
3314 static struct shm_region {
3315 abi_ulong start;
3316 abi_ulong size;
3317 bool in_use;
3318 } shm_regions[N_SHM_REGIONS];
3320 struct target_semid_ds
3322 struct target_ipc_perm sem_perm;
3323 abi_ulong sem_otime;
3324 #if !defined(TARGET_PPC64)
3325 abi_ulong __unused1;
3326 #endif
3327 abi_ulong sem_ctime;
3328 #if !defined(TARGET_PPC64)
3329 abi_ulong __unused2;
3330 #endif
3331 abi_ulong sem_nsems;
3332 abi_ulong __unused3;
3333 abi_ulong __unused4;
3336 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3337 abi_ulong target_addr)
3339 struct target_ipc_perm *target_ip;
3340 struct target_semid_ds *target_sd;
3342 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3343 return -TARGET_EFAULT;
3344 target_ip = &(target_sd->sem_perm);
3345 host_ip->__key = tswap32(target_ip->__key);
3346 host_ip->uid = tswap32(target_ip->uid);
3347 host_ip->gid = tswap32(target_ip->gid);
3348 host_ip->cuid = tswap32(target_ip->cuid);
3349 host_ip->cgid = tswap32(target_ip->cgid);
3350 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3351 host_ip->mode = tswap32(target_ip->mode);
3352 #else
3353 host_ip->mode = tswap16(target_ip->mode);
3354 #endif
3355 #if defined(TARGET_PPC)
3356 host_ip->__seq = tswap32(target_ip->__seq);
3357 #else
3358 host_ip->__seq = tswap16(target_ip->__seq);
3359 #endif
3360 unlock_user_struct(target_sd, target_addr, 0);
3361 return 0;
3364 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3365 struct ipc_perm *host_ip)
3367 struct target_ipc_perm *target_ip;
3368 struct target_semid_ds *target_sd;
3370 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3371 return -TARGET_EFAULT;
3372 target_ip = &(target_sd->sem_perm);
3373 target_ip->__key = tswap32(host_ip->__key);
3374 target_ip->uid = tswap32(host_ip->uid);
3375 target_ip->gid = tswap32(host_ip->gid);
3376 target_ip->cuid = tswap32(host_ip->cuid);
3377 target_ip->cgid = tswap32(host_ip->cgid);
3378 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3379 target_ip->mode = tswap32(host_ip->mode);
3380 #else
3381 target_ip->mode = tswap16(host_ip->mode);
3382 #endif
3383 #if defined(TARGET_PPC)
3384 target_ip->__seq = tswap32(host_ip->__seq);
3385 #else
3386 target_ip->__seq = tswap16(host_ip->__seq);
3387 #endif
3388 unlock_user_struct(target_sd, target_addr, 1);
3389 return 0;
3392 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3393 abi_ulong target_addr)
3395 struct target_semid_ds *target_sd;
3397 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3398 return -TARGET_EFAULT;
3399 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3400 return -TARGET_EFAULT;
3401 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3402 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3403 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3404 unlock_user_struct(target_sd, target_addr, 0);
3405 return 0;
3408 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3409 struct semid_ds *host_sd)
3411 struct target_semid_ds *target_sd;
3413 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3414 return -TARGET_EFAULT;
3415 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3416 return -TARGET_EFAULT;
3417 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3418 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3419 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3420 unlock_user_struct(target_sd, target_addr, 1);
3421 return 0;
3424 struct target_seminfo {
3425 int semmap;
3426 int semmni;
3427 int semmns;
3428 int semmnu;
3429 int semmsl;
3430 int semopm;
3431 int semume;
3432 int semusz;
3433 int semvmx;
3434 int semaem;
3437 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3438 struct seminfo *host_seminfo)
3440 struct target_seminfo *target_seminfo;
3441 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3442 return -TARGET_EFAULT;
3443 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3444 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3445 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3446 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3447 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3448 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3449 __put_user(host_seminfo->semume, &target_seminfo->semume);
3450 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3451 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3452 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3453 unlock_user_struct(target_seminfo, target_addr, 1);
3454 return 0;
3457 union semun {
3458 int val;
3459 struct semid_ds *buf;
3460 unsigned short *array;
3461 struct seminfo *__buf;
3464 union target_semun {
3465 int val;
3466 abi_ulong buf;
3467 abi_ulong array;
3468 abi_ulong __buf;
3471 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3472 abi_ulong target_addr)
3474 int nsems;
3475 unsigned short *array;
3476 union semun semun;
3477 struct semid_ds semid_ds;
3478 int i, ret;
3480 semun.buf = &semid_ds;
3482 ret = semctl(semid, 0, IPC_STAT, semun);
3483 if (ret == -1)
3484 return get_errno(ret);
3486 nsems = semid_ds.sem_nsems;
3488 *host_array = g_try_new(unsigned short, nsems);
3489 if (!*host_array) {
3490 return -TARGET_ENOMEM;
3492 array = lock_user(VERIFY_READ, target_addr,
3493 nsems*sizeof(unsigned short), 1);
3494 if (!array) {
3495 g_free(*host_array);
3496 return -TARGET_EFAULT;
3499 for(i=0; i<nsems; i++) {
3500 __get_user((*host_array)[i], &array[i]);
3502 unlock_user(array, target_addr, 0);
3504 return 0;
3507 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3508 unsigned short **host_array)
3510 int nsems;
3511 unsigned short *array;
3512 union semun semun;
3513 struct semid_ds semid_ds;
3514 int i, ret;
3516 semun.buf = &semid_ds;
3518 ret = semctl(semid, 0, IPC_STAT, semun);
3519 if (ret == -1)
3520 return get_errno(ret);
3522 nsems = semid_ds.sem_nsems;
3524 array = lock_user(VERIFY_WRITE, target_addr,
3525 nsems*sizeof(unsigned short), 0);
3526 if (!array)
3527 return -TARGET_EFAULT;
3529 for(i=0; i<nsems; i++) {
3530 __put_user((*host_array)[i], &array[i]);
3532 g_free(*host_array);
3533 unlock_user(array, target_addr, 1);
3535 return 0;
3538 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3539 abi_ulong target_arg)
3541 union target_semun target_su = { .buf = target_arg };
3542 union semun arg;
3543 struct semid_ds dsarg;
3544 unsigned short *array = NULL;
3545 struct seminfo seminfo;
3546 abi_long ret = -TARGET_EINVAL;
3547 abi_long err;
3548 cmd &= 0xff;
3550 switch( cmd ) {
3551 case GETVAL:
3552 case SETVAL:
3553 /* In 64 bit cross-endian situations, we will erroneously pick up
3554 * the wrong half of the union for the "val" element. To rectify
3555 * this, the entire 8-byte structure is byteswapped, followed by
3556 * a swap of the 4 byte val field. In other cases, the data is
3557 * already in proper host byte order. */
3558 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3559 target_su.buf = tswapal(target_su.buf);
3560 arg.val = tswap32(target_su.val);
3561 } else {
3562 arg.val = target_su.val;
3564 ret = get_errno(semctl(semid, semnum, cmd, arg));
3565 break;
3566 case GETALL:
3567 case SETALL:
3568 err = target_to_host_semarray(semid, &array, target_su.array);
3569 if (err)
3570 return err;
3571 arg.array = array;
3572 ret = get_errno(semctl(semid, semnum, cmd, arg));
3573 err = host_to_target_semarray(semid, target_su.array, &array);
3574 if (err)
3575 return err;
3576 break;
3577 case IPC_STAT:
3578 case IPC_SET:
3579 case SEM_STAT:
3580 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3581 if (err)
3582 return err;
3583 arg.buf = &dsarg;
3584 ret = get_errno(semctl(semid, semnum, cmd, arg));
3585 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3586 if (err)
3587 return err;
3588 break;
3589 case IPC_INFO:
3590 case SEM_INFO:
3591 arg.__buf = &seminfo;
3592 ret = get_errno(semctl(semid, semnum, cmd, arg));
3593 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3594 if (err)
3595 return err;
3596 break;
3597 case IPC_RMID:
3598 case GETPID:
3599 case GETNCNT:
3600 case GETZCNT:
3601 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3602 break;
3605 return ret;
3608 struct target_sembuf {
3609 unsigned short sem_num;
3610 short sem_op;
3611 short sem_flg;
3614 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3615 abi_ulong target_addr,
3616 unsigned nsops)
3618 struct target_sembuf *target_sembuf;
3619 int i;
3621 target_sembuf = lock_user(VERIFY_READ, target_addr,
3622 nsops*sizeof(struct target_sembuf), 1);
3623 if (!target_sembuf)
3624 return -TARGET_EFAULT;
3626 for(i=0; i<nsops; i++) {
3627 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3628 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3629 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3632 unlock_user(target_sembuf, target_addr, 0);
3634 return 0;
3637 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3639 struct sembuf sops[nsops];
3641 if (target_to_host_sembuf(sops, ptr, nsops))
3642 return -TARGET_EFAULT;
3644 return get_errno(semop(semid, sops, nsops));
3647 struct target_msqid_ds
3649 struct target_ipc_perm msg_perm;
3650 abi_ulong msg_stime;
3651 #if TARGET_ABI_BITS == 32
3652 abi_ulong __unused1;
3653 #endif
3654 abi_ulong msg_rtime;
3655 #if TARGET_ABI_BITS == 32
3656 abi_ulong __unused2;
3657 #endif
3658 abi_ulong msg_ctime;
3659 #if TARGET_ABI_BITS == 32
3660 abi_ulong __unused3;
3661 #endif
3662 abi_ulong __msg_cbytes;
3663 abi_ulong msg_qnum;
3664 abi_ulong msg_qbytes;
3665 abi_ulong msg_lspid;
3666 abi_ulong msg_lrpid;
3667 abi_ulong __unused4;
3668 abi_ulong __unused5;
3671 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3672 abi_ulong target_addr)
3674 struct target_msqid_ds *target_md;
3676 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3677 return -TARGET_EFAULT;
3678 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3679 return -TARGET_EFAULT;
3680 host_md->msg_stime = tswapal(target_md->msg_stime);
3681 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3682 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3683 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3684 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3685 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3686 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3687 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3688 unlock_user_struct(target_md, target_addr, 0);
3689 return 0;
3692 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3693 struct msqid_ds *host_md)
3695 struct target_msqid_ds *target_md;
3697 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3698 return -TARGET_EFAULT;
3699 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3700 return -TARGET_EFAULT;
3701 target_md->msg_stime = tswapal(host_md->msg_stime);
3702 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3703 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3704 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3705 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3706 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3707 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3708 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3709 unlock_user_struct(target_md, target_addr, 1);
3710 return 0;
3713 struct target_msginfo {
3714 int msgpool;
3715 int msgmap;
3716 int msgmax;
3717 int msgmnb;
3718 int msgmni;
3719 int msgssz;
3720 int msgtql;
3721 unsigned short int msgseg;
3724 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3725 struct msginfo *host_msginfo)
3727 struct target_msginfo *target_msginfo;
3728 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3729 return -TARGET_EFAULT;
3730 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3731 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3732 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3733 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3734 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3735 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3736 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3737 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3738 unlock_user_struct(target_msginfo, target_addr, 1);
3739 return 0;
3742 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3744 struct msqid_ds dsarg;
3745 struct msginfo msginfo;
3746 abi_long ret = -TARGET_EINVAL;
3748 cmd &= 0xff;
3750 switch (cmd) {
3751 case IPC_STAT:
3752 case IPC_SET:
3753 case MSG_STAT:
3754 if (target_to_host_msqid_ds(&dsarg,ptr))
3755 return -TARGET_EFAULT;
3756 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3757 if (host_to_target_msqid_ds(ptr,&dsarg))
3758 return -TARGET_EFAULT;
3759 break;
3760 case IPC_RMID:
3761 ret = get_errno(msgctl(msgid, cmd, NULL));
3762 break;
3763 case IPC_INFO:
3764 case MSG_INFO:
3765 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3766 if (host_to_target_msginfo(ptr, &msginfo))
3767 return -TARGET_EFAULT;
3768 break;
3771 return ret;
3774 struct target_msgbuf {
3775 abi_long mtype;
3776 char mtext[1];
3779 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3780 ssize_t msgsz, int msgflg)
3782 struct target_msgbuf *target_mb;
3783 struct msgbuf *host_mb;
3784 abi_long ret = 0;
3786 if (msgsz < 0) {
3787 return -TARGET_EINVAL;
3790 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3791 return -TARGET_EFAULT;
3792 host_mb = g_try_malloc(msgsz + sizeof(long));
3793 if (!host_mb) {
3794 unlock_user_struct(target_mb, msgp, 0);
3795 return -TARGET_ENOMEM;
3797 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3798 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3799 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3800 g_free(host_mb);
3801 unlock_user_struct(target_mb, msgp, 0);
3803 return ret;
3806 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3807 ssize_t msgsz, abi_long msgtyp,
3808 int msgflg)
3810 struct target_msgbuf *target_mb;
3811 char *target_mtext;
3812 struct msgbuf *host_mb;
3813 abi_long ret = 0;
3815 if (msgsz < 0) {
3816 return -TARGET_EINVAL;
3819 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3820 return -TARGET_EFAULT;
3822 host_mb = g_try_malloc(msgsz + sizeof(long));
3823 if (!host_mb) {
3824 ret = -TARGET_ENOMEM;
3825 goto end;
3827 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3829 if (ret > 0) {
3830 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3831 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3832 if (!target_mtext) {
3833 ret = -TARGET_EFAULT;
3834 goto end;
3836 memcpy(target_mb->mtext, host_mb->mtext, ret);
3837 unlock_user(target_mtext, target_mtext_addr, ret);
3840 target_mb->mtype = tswapal(host_mb->mtype);
3842 end:
3843 if (target_mb)
3844 unlock_user_struct(target_mb, msgp, 1);
3845 g_free(host_mb);
3846 return ret;
3849 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3850 abi_ulong target_addr)
3852 struct target_shmid_ds *target_sd;
3854 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3855 return -TARGET_EFAULT;
3856 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3857 return -TARGET_EFAULT;
3858 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3859 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3860 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3861 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3862 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3863 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3864 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3865 unlock_user_struct(target_sd, target_addr, 0);
3866 return 0;
3869 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3870 struct shmid_ds *host_sd)
3872 struct target_shmid_ds *target_sd;
3874 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3875 return -TARGET_EFAULT;
3876 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3877 return -TARGET_EFAULT;
3878 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3879 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3880 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3881 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3882 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3883 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3884 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3885 unlock_user_struct(target_sd, target_addr, 1);
3886 return 0;
3889 struct target_shminfo {
3890 abi_ulong shmmax;
3891 abi_ulong shmmin;
3892 abi_ulong shmmni;
3893 abi_ulong shmseg;
3894 abi_ulong shmall;
3897 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3898 struct shminfo *host_shminfo)
3900 struct target_shminfo *target_shminfo;
3901 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3902 return -TARGET_EFAULT;
3903 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3904 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3905 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3906 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3907 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3908 unlock_user_struct(target_shminfo, target_addr, 1);
3909 return 0;
3912 struct target_shm_info {
3913 int used_ids;
3914 abi_ulong shm_tot;
3915 abi_ulong shm_rss;
3916 abi_ulong shm_swp;
3917 abi_ulong swap_attempts;
3918 abi_ulong swap_successes;
3921 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3922 struct shm_info *host_shm_info)
3924 struct target_shm_info *target_shm_info;
3925 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3926 return -TARGET_EFAULT;
3927 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3928 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3929 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3930 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3931 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3932 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3933 unlock_user_struct(target_shm_info, target_addr, 1);
3934 return 0;
3937 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3939 struct shmid_ds dsarg;
3940 struct shminfo shminfo;
3941 struct shm_info shm_info;
3942 abi_long ret = -TARGET_EINVAL;
3944 cmd &= 0xff;
3946 switch(cmd) {
3947 case IPC_STAT:
3948 case IPC_SET:
3949 case SHM_STAT:
3950 if (target_to_host_shmid_ds(&dsarg, buf))
3951 return -TARGET_EFAULT;
3952 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3953 if (host_to_target_shmid_ds(buf, &dsarg))
3954 return -TARGET_EFAULT;
3955 break;
3956 case IPC_INFO:
3957 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3958 if (host_to_target_shminfo(buf, &shminfo))
3959 return -TARGET_EFAULT;
3960 break;
3961 case SHM_INFO:
3962 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3963 if (host_to_target_shm_info(buf, &shm_info))
3964 return -TARGET_EFAULT;
3965 break;
3966 case IPC_RMID:
3967 case SHM_LOCK:
3968 case SHM_UNLOCK:
3969 ret = get_errno(shmctl(shmid, cmd, NULL));
3970 break;
3973 return ret;
3976 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3978 abi_long raddr;
3979 void *host_raddr;
3980 struct shmid_ds shm_info;
3981 int i,ret;
3983 /* find out the length of the shared memory segment */
3984 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3985 if (is_error(ret)) {
3986 /* can't get length, bail out */
3987 return ret;
3990 mmap_lock();
3992 if (shmaddr)
3993 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3994 else {
3995 abi_ulong mmap_start;
3997 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3999 if (mmap_start == -1) {
4000 errno = ENOMEM;
4001 host_raddr = (void *)-1;
4002 } else
4003 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4006 if (host_raddr == (void *)-1) {
4007 mmap_unlock();
4008 return get_errno((long)host_raddr);
4010 raddr=h2g((unsigned long)host_raddr);
4012 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4013 PAGE_VALID | PAGE_READ |
4014 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4016 for (i = 0; i < N_SHM_REGIONS; i++) {
4017 if (!shm_regions[i].in_use) {
4018 shm_regions[i].in_use = true;
4019 shm_regions[i].start = raddr;
4020 shm_regions[i].size = shm_info.shm_segsz;
4021 break;
4025 mmap_unlock();
4026 return raddr;
4030 static inline abi_long do_shmdt(abi_ulong shmaddr)
4032 int i;
4034 for (i = 0; i < N_SHM_REGIONS; ++i) {
4035 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4036 shm_regions[i].in_use = false;
4037 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4038 break;
4042 return get_errno(shmdt(g2h(shmaddr)));
4045 #ifdef TARGET_NR_ipc
4046 /* ??? This only works with linear mappings. */
4047 /* do_ipc() must return target values and target errnos. */
4048 static abi_long do_ipc(unsigned int call, abi_long first,
4049 abi_long second, abi_long third,
4050 abi_long ptr, abi_long fifth)
4052 int version;
4053 abi_long ret = 0;
4055 version = call >> 16;
4056 call &= 0xffff;
4058 switch (call) {
4059 case IPCOP_semop:
4060 ret = do_semop(first, ptr, second);
4061 break;
4063 case IPCOP_semget:
4064 ret = get_errno(semget(first, second, third));
4065 break;
4067 case IPCOP_semctl: {
4068 /* The semun argument to semctl is passed by value, so dereference the
4069 * ptr argument. */
4070 abi_ulong atptr;
4071 get_user_ual(atptr, ptr);
4072 ret = do_semctl(first, second, third, atptr);
4073 break;
4076 case IPCOP_msgget:
4077 ret = get_errno(msgget(first, second));
4078 break;
4080 case IPCOP_msgsnd:
4081 ret = do_msgsnd(first, ptr, second, third);
4082 break;
4084 case IPCOP_msgctl:
4085 ret = do_msgctl(first, second, ptr);
4086 break;
4088 case IPCOP_msgrcv:
4089 switch (version) {
4090 case 0:
4092 struct target_ipc_kludge {
4093 abi_long msgp;
4094 abi_long msgtyp;
4095 } *tmp;
4097 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4098 ret = -TARGET_EFAULT;
4099 break;
4102 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4104 unlock_user_struct(tmp, ptr, 0);
4105 break;
4107 default:
4108 ret = do_msgrcv(first, ptr, second, fifth, third);
4110 break;
4112 case IPCOP_shmat:
4113 switch (version) {
4114 default:
4116 abi_ulong raddr;
4117 raddr = do_shmat(first, ptr, second);
4118 if (is_error(raddr))
4119 return get_errno(raddr);
4120 if (put_user_ual(raddr, third))
4121 return -TARGET_EFAULT;
4122 break;
4124 case 1:
4125 ret = -TARGET_EINVAL;
4126 break;
4128 break;
4129 case IPCOP_shmdt:
4130 ret = do_shmdt(ptr);
4131 break;
4133 case IPCOP_shmget:
4134 /* IPC_* flag values are the same on all linux platforms */
4135 ret = get_errno(shmget(first, second, third));
4136 break;
4138 /* IPC_* and SHM_* command values are the same on all linux platforms */
4139 case IPCOP_shmctl:
4140 ret = do_shmctl(first, second, ptr);
4141 break;
4142 default:
4143 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4144 ret = -TARGET_ENOSYS;
4145 break;
4147 return ret;
4149 #endif
4151 /* kernel structure types definitions */
4153 #define STRUCT(name, ...) STRUCT_ ## name,
4154 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4155 enum {
4156 #include "syscall_types.h"
4157 STRUCT_MAX
4159 #undef STRUCT
4160 #undef STRUCT_SPECIAL
4162 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4163 #define STRUCT_SPECIAL(name)
4164 #include "syscall_types.h"
4165 #undef STRUCT
4166 #undef STRUCT_SPECIAL
4168 typedef struct IOCTLEntry IOCTLEntry;
4170 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4171 int fd, int cmd, abi_long arg);
4173 struct IOCTLEntry {
4174 int target_cmd;
4175 unsigned int host_cmd;
4176 const char *name;
4177 int access;
4178 do_ioctl_fn *do_ioctl;
4179 const argtype arg_type[5];
4182 #define IOC_R 0x0001
4183 #define IOC_W 0x0002
4184 #define IOC_RW (IOC_R | IOC_W)
4186 #define MAX_STRUCT_SIZE 4096
4188 #ifdef CONFIG_FIEMAP
4189 /* So fiemap access checks don't overflow on 32 bit systems.
4190 * This is very slightly smaller than the limit imposed by
4191 * the underlying kernel.
4193 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4194 / sizeof(struct fiemap_extent))
4196 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4197 int fd, int cmd, abi_long arg)
4199 /* The parameter for this ioctl is a struct fiemap followed
4200 * by an array of struct fiemap_extent whose size is set
4201 * in fiemap->fm_extent_count. The array is filled in by the
4202 * ioctl.
4204 int target_size_in, target_size_out;
4205 struct fiemap *fm;
4206 const argtype *arg_type = ie->arg_type;
4207 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4208 void *argptr, *p;
4209 abi_long ret;
4210 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4211 uint32_t outbufsz;
4212 int free_fm = 0;
4214 assert(arg_type[0] == TYPE_PTR);
4215 assert(ie->access == IOC_RW);
4216 arg_type++;
4217 target_size_in = thunk_type_size(arg_type, 0);
4218 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4219 if (!argptr) {
4220 return -TARGET_EFAULT;
4222 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4223 unlock_user(argptr, arg, 0);
4224 fm = (struct fiemap *)buf_temp;
4225 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4226 return -TARGET_EINVAL;
4229 outbufsz = sizeof (*fm) +
4230 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4232 if (outbufsz > MAX_STRUCT_SIZE) {
4233 /* We can't fit all the extents into the fixed size buffer.
4234 * Allocate one that is large enough and use it instead.
4236 fm = g_try_malloc(outbufsz);
4237 if (!fm) {
4238 return -TARGET_ENOMEM;
4240 memcpy(fm, buf_temp, sizeof(struct fiemap));
4241 free_fm = 1;
4243 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
4244 if (!is_error(ret)) {
4245 target_size_out = target_size_in;
4246 /* An extent_count of 0 means we were only counting the extents
4247 * so there are no structs to copy
4249 if (fm->fm_extent_count != 0) {
4250 target_size_out += fm->fm_mapped_extents * extent_size;
4252 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4253 if (!argptr) {
4254 ret = -TARGET_EFAULT;
4255 } else {
4256 /* Convert the struct fiemap */
4257 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4258 if (fm->fm_extent_count != 0) {
4259 p = argptr + target_size_in;
4260 /* ...and then all the struct fiemap_extents */
4261 for (i = 0; i < fm->fm_mapped_extents; i++) {
4262 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4263 THUNK_TARGET);
4264 p += extent_size;
4267 unlock_user(argptr, arg, target_size_out);
4270 if (free_fm) {
4271 g_free(fm);
4273 return ret;
4275 #endif
4277 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4278 int fd, int cmd, abi_long arg)
4280 const argtype *arg_type = ie->arg_type;
4281 int target_size;
4282 void *argptr;
4283 int ret;
4284 struct ifconf *host_ifconf;
4285 uint32_t outbufsz;
4286 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4287 int target_ifreq_size;
4288 int nb_ifreq;
4289 int free_buf = 0;
4290 int i;
4291 int target_ifc_len;
4292 abi_long target_ifc_buf;
4293 int host_ifc_len;
4294 char *host_ifc_buf;
4296 assert(arg_type[0] == TYPE_PTR);
4297 assert(ie->access == IOC_RW);
4299 arg_type++;
4300 target_size = thunk_type_size(arg_type, 0);
4302 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4303 if (!argptr)
4304 return -TARGET_EFAULT;
4305 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4306 unlock_user(argptr, arg, 0);
4308 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4309 target_ifc_len = host_ifconf->ifc_len;
4310 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4312 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4313 nb_ifreq = target_ifc_len / target_ifreq_size;
4314 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4316 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4317 if (outbufsz > MAX_STRUCT_SIZE) {
4318 /* We can't fit all the extents into the fixed size buffer.
4319 * Allocate one that is large enough and use it instead.
4321 host_ifconf = malloc(outbufsz);
4322 if (!host_ifconf) {
4323 return -TARGET_ENOMEM;
4325 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4326 free_buf = 1;
4328 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4330 host_ifconf->ifc_len = host_ifc_len;
4331 host_ifconf->ifc_buf = host_ifc_buf;
4333 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
4334 if (!is_error(ret)) {
4335 /* convert host ifc_len to target ifc_len */
4337 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4338 target_ifc_len = nb_ifreq * target_ifreq_size;
4339 host_ifconf->ifc_len = target_ifc_len;
4341 /* restore target ifc_buf */
4343 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4345 /* copy struct ifconf to target user */
4347 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4348 if (!argptr)
4349 return -TARGET_EFAULT;
4350 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4351 unlock_user(argptr, arg, target_size);
4353 /* copy ifreq[] to target user */
4355 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4356 for (i = 0; i < nb_ifreq ; i++) {
4357 thunk_convert(argptr + i * target_ifreq_size,
4358 host_ifc_buf + i * sizeof(struct ifreq),
4359 ifreq_arg_type, THUNK_TARGET);
4361 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4364 if (free_buf) {
4365 free(host_ifconf);
4368 return ret;
4371 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4372 int cmd, abi_long arg)
4374 void *argptr;
4375 struct dm_ioctl *host_dm;
4376 abi_long guest_data;
4377 uint32_t guest_data_size;
4378 int target_size;
4379 const argtype *arg_type = ie->arg_type;
4380 abi_long ret;
4381 void *big_buf = NULL;
4382 char *host_data;
4384 arg_type++;
4385 target_size = thunk_type_size(arg_type, 0);
4386 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4387 if (!argptr) {
4388 ret = -TARGET_EFAULT;
4389 goto out;
4391 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4392 unlock_user(argptr, arg, 0);
4394 /* buf_temp is too small, so fetch things into a bigger buffer */
4395 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4396 memcpy(big_buf, buf_temp, target_size);
4397 buf_temp = big_buf;
4398 host_dm = big_buf;
4400 guest_data = arg + host_dm->data_start;
4401 if ((guest_data - arg) < 0) {
4402 ret = -EINVAL;
4403 goto out;
4405 guest_data_size = host_dm->data_size - host_dm->data_start;
4406 host_data = (char*)host_dm + host_dm->data_start;
4408 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4409 switch (ie->host_cmd) {
4410 case DM_REMOVE_ALL:
4411 case DM_LIST_DEVICES:
4412 case DM_DEV_CREATE:
4413 case DM_DEV_REMOVE:
4414 case DM_DEV_SUSPEND:
4415 case DM_DEV_STATUS:
4416 case DM_DEV_WAIT:
4417 case DM_TABLE_STATUS:
4418 case DM_TABLE_CLEAR:
4419 case DM_TABLE_DEPS:
4420 case DM_LIST_VERSIONS:
4421 /* no input data */
4422 break;
4423 case DM_DEV_RENAME:
4424 case DM_DEV_SET_GEOMETRY:
4425 /* data contains only strings */
4426 memcpy(host_data, argptr, guest_data_size);
4427 break;
4428 case DM_TARGET_MSG:
4429 memcpy(host_data, argptr, guest_data_size);
4430 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4431 break;
4432 case DM_TABLE_LOAD:
4434 void *gspec = argptr;
4435 void *cur_data = host_data;
4436 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4437 int spec_size = thunk_type_size(arg_type, 0);
4438 int i;
4440 for (i = 0; i < host_dm->target_count; i++) {
4441 struct dm_target_spec *spec = cur_data;
4442 uint32_t next;
4443 int slen;
4445 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4446 slen = strlen((char*)gspec + spec_size) + 1;
4447 next = spec->next;
4448 spec->next = sizeof(*spec) + slen;
4449 strcpy((char*)&spec[1], gspec + spec_size);
4450 gspec += next;
4451 cur_data += spec->next;
4453 break;
4455 default:
4456 ret = -TARGET_EINVAL;
4457 unlock_user(argptr, guest_data, 0);
4458 goto out;
4460 unlock_user(argptr, guest_data, 0);
4462 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4463 if (!is_error(ret)) {
4464 guest_data = arg + host_dm->data_start;
4465 guest_data_size = host_dm->data_size - host_dm->data_start;
4466 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4467 switch (ie->host_cmd) {
4468 case DM_REMOVE_ALL:
4469 case DM_DEV_CREATE:
4470 case DM_DEV_REMOVE:
4471 case DM_DEV_RENAME:
4472 case DM_DEV_SUSPEND:
4473 case DM_DEV_STATUS:
4474 case DM_TABLE_LOAD:
4475 case DM_TABLE_CLEAR:
4476 case DM_TARGET_MSG:
4477 case DM_DEV_SET_GEOMETRY:
4478 /* no return data */
4479 break;
4480 case DM_LIST_DEVICES:
4482 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4483 uint32_t remaining_data = guest_data_size;
4484 void *cur_data = argptr;
4485 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4486 int nl_size = 12; /* can't use thunk_size due to alignment */
4488 while (1) {
4489 uint32_t next = nl->next;
4490 if (next) {
4491 nl->next = nl_size + (strlen(nl->name) + 1);
4493 if (remaining_data < nl->next) {
4494 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4495 break;
4497 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4498 strcpy(cur_data + nl_size, nl->name);
4499 cur_data += nl->next;
4500 remaining_data -= nl->next;
4501 if (!next) {
4502 break;
4504 nl = (void*)nl + next;
4506 break;
4508 case DM_DEV_WAIT:
4509 case DM_TABLE_STATUS:
4511 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4512 void *cur_data = argptr;
4513 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4514 int spec_size = thunk_type_size(arg_type, 0);
4515 int i;
4517 for (i = 0; i < host_dm->target_count; i++) {
4518 uint32_t next = spec->next;
4519 int slen = strlen((char*)&spec[1]) + 1;
4520 spec->next = (cur_data - argptr) + spec_size + slen;
4521 if (guest_data_size < spec->next) {
4522 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4523 break;
4525 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4526 strcpy(cur_data + spec_size, (char*)&spec[1]);
4527 cur_data = argptr + spec->next;
4528 spec = (void*)host_dm + host_dm->data_start + next;
4530 break;
4532 case DM_TABLE_DEPS:
4534 void *hdata = (void*)host_dm + host_dm->data_start;
4535 int count = *(uint32_t*)hdata;
4536 uint64_t *hdev = hdata + 8;
4537 uint64_t *gdev = argptr + 8;
4538 int i;
4540 *(uint32_t*)argptr = tswap32(count);
4541 for (i = 0; i < count; i++) {
4542 *gdev = tswap64(*hdev);
4543 gdev++;
4544 hdev++;
4546 break;
4548 case DM_LIST_VERSIONS:
4550 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4551 uint32_t remaining_data = guest_data_size;
4552 void *cur_data = argptr;
4553 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4554 int vers_size = thunk_type_size(arg_type, 0);
4556 while (1) {
4557 uint32_t next = vers->next;
4558 if (next) {
4559 vers->next = vers_size + (strlen(vers->name) + 1);
4561 if (remaining_data < vers->next) {
4562 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4563 break;
4565 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4566 strcpy(cur_data + vers_size, vers->name);
4567 cur_data += vers->next;
4568 remaining_data -= vers->next;
4569 if (!next) {
4570 break;
4572 vers = (void*)vers + next;
4574 break;
4576 default:
4577 unlock_user(argptr, guest_data, 0);
4578 ret = -TARGET_EINVAL;
4579 goto out;
4581 unlock_user(argptr, guest_data, guest_data_size);
4583 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4584 if (!argptr) {
4585 ret = -TARGET_EFAULT;
4586 goto out;
4588 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4589 unlock_user(argptr, arg, target_size);
4591 out:
4592 g_free(big_buf);
4593 return ret;
4596 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4597 int cmd, abi_long arg)
4599 void *argptr;
4600 int target_size;
4601 const argtype *arg_type = ie->arg_type;
4602 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4603 abi_long ret;
4605 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4606 struct blkpg_partition host_part;
4608 /* Read and convert blkpg */
4609 arg_type++;
4610 target_size = thunk_type_size(arg_type, 0);
4611 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4612 if (!argptr) {
4613 ret = -TARGET_EFAULT;
4614 goto out;
4616 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4617 unlock_user(argptr, arg, 0);
4619 switch (host_blkpg->op) {
4620 case BLKPG_ADD_PARTITION:
4621 case BLKPG_DEL_PARTITION:
4622 /* payload is struct blkpg_partition */
4623 break;
4624 default:
4625 /* Unknown opcode */
4626 ret = -TARGET_EINVAL;
4627 goto out;
4630 /* Read and convert blkpg->data */
4631 arg = (abi_long)(uintptr_t)host_blkpg->data;
4632 target_size = thunk_type_size(part_arg_type, 0);
4633 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4634 if (!argptr) {
4635 ret = -TARGET_EFAULT;
4636 goto out;
4638 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4639 unlock_user(argptr, arg, 0);
4641 /* Swizzle the data pointer to our local copy and call! */
4642 host_blkpg->data = &host_part;
4643 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
4645 out:
4646 return ret;
4649 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4650 int fd, int cmd, abi_long arg)
4652 const argtype *arg_type = ie->arg_type;
4653 const StructEntry *se;
4654 const argtype *field_types;
4655 const int *dst_offsets, *src_offsets;
4656 int target_size;
4657 void *argptr;
4658 abi_ulong *target_rt_dev_ptr;
4659 unsigned long *host_rt_dev_ptr;
4660 abi_long ret;
4661 int i;
4663 assert(ie->access == IOC_W);
4664 assert(*arg_type == TYPE_PTR);
4665 arg_type++;
4666 assert(*arg_type == TYPE_STRUCT);
4667 target_size = thunk_type_size(arg_type, 0);
4668 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4669 if (!argptr) {
4670 return -TARGET_EFAULT;
4672 arg_type++;
4673 assert(*arg_type == (int)STRUCT_rtentry);
4674 se = struct_entries + *arg_type++;
4675 assert(se->convert[0] == NULL);
4676 /* convert struct here to be able to catch rt_dev string */
4677 field_types = se->field_types;
4678 dst_offsets = se->field_offsets[THUNK_HOST];
4679 src_offsets = se->field_offsets[THUNK_TARGET];
4680 for (i = 0; i < se->nb_fields; i++) {
4681 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4682 assert(*field_types == TYPE_PTRVOID);
4683 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4684 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4685 if (*target_rt_dev_ptr != 0) {
4686 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4687 tswapal(*target_rt_dev_ptr));
4688 if (!*host_rt_dev_ptr) {
4689 unlock_user(argptr, arg, 0);
4690 return -TARGET_EFAULT;
4692 } else {
4693 *host_rt_dev_ptr = 0;
4695 field_types++;
4696 continue;
4698 field_types = thunk_convert(buf_temp + dst_offsets[i],
4699 argptr + src_offsets[i],
4700 field_types, THUNK_HOST);
4702 unlock_user(argptr, arg, 0);
4704 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4705 if (*host_rt_dev_ptr != 0) {
4706 unlock_user((void *)*host_rt_dev_ptr,
4707 *target_rt_dev_ptr, 0);
4709 return ret;
4712 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4713 int fd, int cmd, abi_long arg)
4715 int sig = target_to_host_signal(arg);
4716 return get_errno(ioctl(fd, ie->host_cmd, sig));
4719 static IOCTLEntry ioctl_entries[] = {
4720 #define IOCTL(cmd, access, ...) \
4721 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4722 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4723 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4724 #include "ioctls.h"
4725 { 0, 0, },
4728 /* ??? Implement proper locking for ioctls. */
4729 /* do_ioctl() Must return target values and target errnos. */
4730 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4732 const IOCTLEntry *ie;
4733 const argtype *arg_type;
4734 abi_long ret;
4735 uint8_t buf_temp[MAX_STRUCT_SIZE];
4736 int target_size;
4737 void *argptr;
4739 ie = ioctl_entries;
4740 for(;;) {
4741 if (ie->target_cmd == 0) {
4742 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4743 return -TARGET_ENOSYS;
4745 if (ie->target_cmd == cmd)
4746 break;
4747 ie++;
4749 arg_type = ie->arg_type;
4750 #if defined(DEBUG)
4751 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4752 #endif
4753 if (ie->do_ioctl) {
4754 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4757 switch(arg_type[0]) {
4758 case TYPE_NULL:
4759 /* no argument */
4760 ret = get_errno(ioctl(fd, ie->host_cmd));
4761 break;
4762 case TYPE_PTRVOID:
4763 case TYPE_INT:
4764 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4765 break;
4766 case TYPE_PTR:
4767 arg_type++;
4768 target_size = thunk_type_size(arg_type, 0);
4769 switch(ie->access) {
4770 case IOC_R:
4771 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4772 if (!is_error(ret)) {
4773 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4774 if (!argptr)
4775 return -TARGET_EFAULT;
4776 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4777 unlock_user(argptr, arg, target_size);
4779 break;
4780 case IOC_W:
4781 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4782 if (!argptr)
4783 return -TARGET_EFAULT;
4784 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4785 unlock_user(argptr, arg, 0);
4786 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4787 break;
4788 default:
4789 case IOC_RW:
4790 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4791 if (!argptr)
4792 return -TARGET_EFAULT;
4793 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4794 unlock_user(argptr, arg, 0);
4795 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4796 if (!is_error(ret)) {
4797 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4798 if (!argptr)
4799 return -TARGET_EFAULT;
4800 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4801 unlock_user(argptr, arg, target_size);
4803 break;
4805 break;
4806 default:
4807 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4808 (long)cmd, arg_type[0]);
4809 ret = -TARGET_ENOSYS;
4810 break;
4812 return ret;
4815 static const bitmask_transtbl iflag_tbl[] = {
4816 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4817 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4818 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4819 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4820 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4821 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4822 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4823 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4824 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4825 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4826 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4827 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4828 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4829 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4830 { 0, 0, 0, 0 }
4833 static const bitmask_transtbl oflag_tbl[] = {
4834 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4835 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4836 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4837 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4838 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4839 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4840 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4841 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4842 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4843 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4844 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4845 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4846 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4847 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4848 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4849 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4850 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4851 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4852 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4853 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4854 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4855 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4856 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4857 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4858 { 0, 0, 0, 0 }
4861 static const bitmask_transtbl cflag_tbl[] = {
4862 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4863 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4864 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4865 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4866 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4867 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4868 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4869 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4870 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4871 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4872 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4873 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4874 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4875 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4876 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4877 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4878 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4879 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4880 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4881 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4882 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4883 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4884 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4885 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4886 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4887 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4888 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4889 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4890 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4891 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4892 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4893 { 0, 0, 0, 0 }
4896 static const bitmask_transtbl lflag_tbl[] = {
4897 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4898 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4899 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4900 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4901 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4902 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4903 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4904 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4905 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4906 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4907 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4908 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4909 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4910 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4911 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4912 { 0, 0, 0, 0 }
4915 static void target_to_host_termios (void *dst, const void *src)
4917 struct host_termios *host = dst;
4918 const struct target_termios *target = src;
4920 host->c_iflag =
4921 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4922 host->c_oflag =
4923 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4924 host->c_cflag =
4925 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4926 host->c_lflag =
4927 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4928 host->c_line = target->c_line;
4930 memset(host->c_cc, 0, sizeof(host->c_cc));
4931 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4932 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4933 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4934 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4935 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4936 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4937 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4938 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4939 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4940 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4941 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4942 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4943 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4944 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4945 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4946 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4947 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4950 static void host_to_target_termios (void *dst, const void *src)
4952 struct target_termios *target = dst;
4953 const struct host_termios *host = src;
4955 target->c_iflag =
4956 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4957 target->c_oflag =
4958 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4959 target->c_cflag =
4960 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4961 target->c_lflag =
4962 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4963 target->c_line = host->c_line;
4965 memset(target->c_cc, 0, sizeof(target->c_cc));
4966 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4967 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4968 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4969 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4970 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4971 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4972 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4973 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4974 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4975 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4976 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4977 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4978 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4979 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4980 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4981 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4982 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4985 static const StructEntry struct_termios_def = {
4986 .convert = { host_to_target_termios, target_to_host_termios },
4987 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4988 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4991 static bitmask_transtbl mmap_flags_tbl[] = {
4992 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4993 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4994 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4995 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4996 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4997 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4998 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4999 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5000 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5001 MAP_NORESERVE },
5002 { 0, 0, 0, 0 }
5005 #if defined(TARGET_I386)
5007 /* NOTE: there is really one LDT for all the threads */
5008 static uint8_t *ldt_table;
5010 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5012 int size;
5013 void *p;
5015 if (!ldt_table)
5016 return 0;
5017 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5018 if (size > bytecount)
5019 size = bytecount;
5020 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5021 if (!p)
5022 return -TARGET_EFAULT;
5023 /* ??? Should this by byteswapped? */
5024 memcpy(p, ldt_table, size);
5025 unlock_user(p, ptr, size);
5026 return size;
5029 /* XXX: add locking support */
5030 static abi_long write_ldt(CPUX86State *env,
5031 abi_ulong ptr, unsigned long bytecount, int oldmode)
5033 struct target_modify_ldt_ldt_s ldt_info;
5034 struct target_modify_ldt_ldt_s *target_ldt_info;
5035 int seg_32bit, contents, read_exec_only, limit_in_pages;
5036 int seg_not_present, useable, lm;
5037 uint32_t *lp, entry_1, entry_2;
5039 if (bytecount != sizeof(ldt_info))
5040 return -TARGET_EINVAL;
5041 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5042 return -TARGET_EFAULT;
5043 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5044 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5045 ldt_info.limit = tswap32(target_ldt_info->limit);
5046 ldt_info.flags = tswap32(target_ldt_info->flags);
5047 unlock_user_struct(target_ldt_info, ptr, 0);
5049 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5050 return -TARGET_EINVAL;
5051 seg_32bit = ldt_info.flags & 1;
5052 contents = (ldt_info.flags >> 1) & 3;
5053 read_exec_only = (ldt_info.flags >> 3) & 1;
5054 limit_in_pages = (ldt_info.flags >> 4) & 1;
5055 seg_not_present = (ldt_info.flags >> 5) & 1;
5056 useable = (ldt_info.flags >> 6) & 1;
5057 #ifdef TARGET_ABI32
5058 lm = 0;
5059 #else
5060 lm = (ldt_info.flags >> 7) & 1;
5061 #endif
5062 if (contents == 3) {
5063 if (oldmode)
5064 return -TARGET_EINVAL;
5065 if (seg_not_present == 0)
5066 return -TARGET_EINVAL;
5068 /* allocate the LDT */
5069 if (!ldt_table) {
5070 env->ldt.base = target_mmap(0,
5071 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5072 PROT_READ|PROT_WRITE,
5073 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5074 if (env->ldt.base == -1)
5075 return -TARGET_ENOMEM;
5076 memset(g2h(env->ldt.base), 0,
5077 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5078 env->ldt.limit = 0xffff;
5079 ldt_table = g2h(env->ldt.base);
5082 /* NOTE: same code as Linux kernel */
5083 /* Allow LDTs to be cleared by the user. */
5084 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5085 if (oldmode ||
5086 (contents == 0 &&
5087 read_exec_only == 1 &&
5088 seg_32bit == 0 &&
5089 limit_in_pages == 0 &&
5090 seg_not_present == 1 &&
5091 useable == 0 )) {
5092 entry_1 = 0;
5093 entry_2 = 0;
5094 goto install;
5098 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5099 (ldt_info.limit & 0x0ffff);
5100 entry_2 = (ldt_info.base_addr & 0xff000000) |
5101 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5102 (ldt_info.limit & 0xf0000) |
5103 ((read_exec_only ^ 1) << 9) |
5104 (contents << 10) |
5105 ((seg_not_present ^ 1) << 15) |
5106 (seg_32bit << 22) |
5107 (limit_in_pages << 23) |
5108 (lm << 21) |
5109 0x7000;
5110 if (!oldmode)
5111 entry_2 |= (useable << 20);
5113 /* Install the new entry ... */
5114 install:
5115 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5116 lp[0] = tswap32(entry_1);
5117 lp[1] = tswap32(entry_2);
5118 return 0;
5121 /* specific and weird i386 syscalls */
5122 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5123 unsigned long bytecount)
5125 abi_long ret;
5127 switch (func) {
5128 case 0:
5129 ret = read_ldt(ptr, bytecount);
5130 break;
5131 case 1:
5132 ret = write_ldt(env, ptr, bytecount, 1);
5133 break;
5134 case 0x11:
5135 ret = write_ldt(env, ptr, bytecount, 0);
5136 break;
5137 default:
5138 ret = -TARGET_ENOSYS;
5139 break;
5141 return ret;
5144 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5145 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5147 uint64_t *gdt_table = g2h(env->gdt.base);
5148 struct target_modify_ldt_ldt_s ldt_info;
5149 struct target_modify_ldt_ldt_s *target_ldt_info;
5150 int seg_32bit, contents, read_exec_only, limit_in_pages;
5151 int seg_not_present, useable, lm;
5152 uint32_t *lp, entry_1, entry_2;
5153 int i;
5155 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5156 if (!target_ldt_info)
5157 return -TARGET_EFAULT;
5158 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5159 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5160 ldt_info.limit = tswap32(target_ldt_info->limit);
5161 ldt_info.flags = tswap32(target_ldt_info->flags);
5162 if (ldt_info.entry_number == -1) {
5163 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5164 if (gdt_table[i] == 0) {
5165 ldt_info.entry_number = i;
5166 target_ldt_info->entry_number = tswap32(i);
5167 break;
5171 unlock_user_struct(target_ldt_info, ptr, 1);
5173 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5174 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5175 return -TARGET_EINVAL;
5176 seg_32bit = ldt_info.flags & 1;
5177 contents = (ldt_info.flags >> 1) & 3;
5178 read_exec_only = (ldt_info.flags >> 3) & 1;
5179 limit_in_pages = (ldt_info.flags >> 4) & 1;
5180 seg_not_present = (ldt_info.flags >> 5) & 1;
5181 useable = (ldt_info.flags >> 6) & 1;
5182 #ifdef TARGET_ABI32
5183 lm = 0;
5184 #else
5185 lm = (ldt_info.flags >> 7) & 1;
5186 #endif
5188 if (contents == 3) {
5189 if (seg_not_present == 0)
5190 return -TARGET_EINVAL;
5193 /* NOTE: same code as Linux kernel */
5194 /* Allow LDTs to be cleared by the user. */
5195 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5196 if ((contents == 0 &&
5197 read_exec_only == 1 &&
5198 seg_32bit == 0 &&
5199 limit_in_pages == 0 &&
5200 seg_not_present == 1 &&
5201 useable == 0 )) {
5202 entry_1 = 0;
5203 entry_2 = 0;
5204 goto install;
5208 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5209 (ldt_info.limit & 0x0ffff);
5210 entry_2 = (ldt_info.base_addr & 0xff000000) |
5211 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5212 (ldt_info.limit & 0xf0000) |
5213 ((read_exec_only ^ 1) << 9) |
5214 (contents << 10) |
5215 ((seg_not_present ^ 1) << 15) |
5216 (seg_32bit << 22) |
5217 (limit_in_pages << 23) |
5218 (useable << 20) |
5219 (lm << 21) |
5220 0x7000;
5222 /* Install the new entry ... */
5223 install:
5224 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5225 lp[0] = tswap32(entry_1);
5226 lp[1] = tswap32(entry_2);
5227 return 0;
5230 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5232 struct target_modify_ldt_ldt_s *target_ldt_info;
5233 uint64_t *gdt_table = g2h(env->gdt.base);
5234 uint32_t base_addr, limit, flags;
5235 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5236 int seg_not_present, useable, lm;
5237 uint32_t *lp, entry_1, entry_2;
5239 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5240 if (!target_ldt_info)
5241 return -TARGET_EFAULT;
5242 idx = tswap32(target_ldt_info->entry_number);
5243 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5244 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5245 unlock_user_struct(target_ldt_info, ptr, 1);
5246 return -TARGET_EINVAL;
5248 lp = (uint32_t *)(gdt_table + idx);
5249 entry_1 = tswap32(lp[0]);
5250 entry_2 = tswap32(lp[1]);
5252 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5253 contents = (entry_2 >> 10) & 3;
5254 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5255 seg_32bit = (entry_2 >> 22) & 1;
5256 limit_in_pages = (entry_2 >> 23) & 1;
5257 useable = (entry_2 >> 20) & 1;
5258 #ifdef TARGET_ABI32
5259 lm = 0;
5260 #else
5261 lm = (entry_2 >> 21) & 1;
5262 #endif
5263 flags = (seg_32bit << 0) | (contents << 1) |
5264 (read_exec_only << 3) | (limit_in_pages << 4) |
5265 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5266 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5267 base_addr = (entry_1 >> 16) |
5268 (entry_2 & 0xff000000) |
5269 ((entry_2 & 0xff) << 16);
5270 target_ldt_info->base_addr = tswapal(base_addr);
5271 target_ldt_info->limit = tswap32(limit);
5272 target_ldt_info->flags = tswap32(flags);
5273 unlock_user_struct(target_ldt_info, ptr, 1);
5274 return 0;
5276 #endif /* TARGET_I386 && TARGET_ABI32 */
5278 #ifndef TARGET_ABI32
5279 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5281 abi_long ret = 0;
5282 abi_ulong val;
5283 int idx;
5285 switch(code) {
5286 case TARGET_ARCH_SET_GS:
5287 case TARGET_ARCH_SET_FS:
5288 if (code == TARGET_ARCH_SET_GS)
5289 idx = R_GS;
5290 else
5291 idx = R_FS;
5292 cpu_x86_load_seg(env, idx, 0);
5293 env->segs[idx].base = addr;
5294 break;
5295 case TARGET_ARCH_GET_GS:
5296 case TARGET_ARCH_GET_FS:
5297 if (code == TARGET_ARCH_GET_GS)
5298 idx = R_GS;
5299 else
5300 idx = R_FS;
5301 val = env->segs[idx].base;
5302 if (put_user(val, addr, abi_ulong))
5303 ret = -TARGET_EFAULT;
5304 break;
5305 default:
5306 ret = -TARGET_EINVAL;
5307 break;
5309 return ret;
5311 #endif
5313 #endif /* defined(TARGET_I386) */
5315 #define NEW_STACK_SIZE 0x40000
5318 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5319 typedef struct {
5320 CPUArchState *env;
5321 pthread_mutex_t mutex;
5322 pthread_cond_t cond;
5323 pthread_t thread;
5324 uint32_t tid;
5325 abi_ulong child_tidptr;
5326 abi_ulong parent_tidptr;
5327 sigset_t sigmask;
5328 } new_thread_info;
5330 static void *clone_func(void *arg)
5332 new_thread_info *info = arg;
5333 CPUArchState *env;
5334 CPUState *cpu;
5335 TaskState *ts;
5337 rcu_register_thread();
5338 env = info->env;
5339 cpu = ENV_GET_CPU(env);
5340 thread_cpu = cpu;
5341 ts = (TaskState *)cpu->opaque;
5342 info->tid = gettid();
5343 cpu->host_tid = info->tid;
5344 task_settid(ts);
5345 if (info->child_tidptr)
5346 put_user_u32(info->tid, info->child_tidptr);
5347 if (info->parent_tidptr)
5348 put_user_u32(info->tid, info->parent_tidptr);
5349 /* Enable signals. */
5350 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5351 /* Signal to the parent that we're ready. */
5352 pthread_mutex_lock(&info->mutex);
5353 pthread_cond_broadcast(&info->cond);
5354 pthread_mutex_unlock(&info->mutex);
5355 /* Wait until the parent has finshed initializing the tls state. */
5356 pthread_mutex_lock(&clone_lock);
5357 pthread_mutex_unlock(&clone_lock);
5358 cpu_loop(env);
5359 /* never exits */
5360 return NULL;
5363 /* do_fork() Must return host values and target errnos (unlike most
5364 do_*() functions). */
5365 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5366 abi_ulong parent_tidptr, target_ulong newtls,
5367 abi_ulong child_tidptr)
5369 CPUState *cpu = ENV_GET_CPU(env);
5370 int ret;
5371 TaskState *ts;
5372 CPUState *new_cpu;
5373 CPUArchState *new_env;
5374 unsigned int nptl_flags;
5375 sigset_t sigmask;
5377 /* Emulate vfork() with fork() */
5378 if (flags & CLONE_VFORK)
5379 flags &= ~(CLONE_VFORK | CLONE_VM);
5381 if (flags & CLONE_VM) {
5382 TaskState *parent_ts = (TaskState *)cpu->opaque;
5383 new_thread_info info;
5384 pthread_attr_t attr;
5386 ts = g_new0(TaskState, 1);
5387 init_task_state(ts);
5388 /* we create a new CPU instance. */
5389 new_env = cpu_copy(env);
5390 /* Init regs that differ from the parent. */
5391 cpu_clone_regs(new_env, newsp);
5392 new_cpu = ENV_GET_CPU(new_env);
5393 new_cpu->opaque = ts;
5394 ts->bprm = parent_ts->bprm;
5395 ts->info = parent_ts->info;
5396 ts->signal_mask = parent_ts->signal_mask;
5397 nptl_flags = flags;
5398 flags &= ~CLONE_NPTL_FLAGS2;
5400 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5401 ts->child_tidptr = child_tidptr;
5404 if (nptl_flags & CLONE_SETTLS)
5405 cpu_set_tls (new_env, newtls);
5407 /* Grab a mutex so that thread setup appears atomic. */
5408 pthread_mutex_lock(&clone_lock);
5410 memset(&info, 0, sizeof(info));
5411 pthread_mutex_init(&info.mutex, NULL);
5412 pthread_mutex_lock(&info.mutex);
5413 pthread_cond_init(&info.cond, NULL);
5414 info.env = new_env;
5415 if (nptl_flags & CLONE_CHILD_SETTID)
5416 info.child_tidptr = child_tidptr;
5417 if (nptl_flags & CLONE_PARENT_SETTID)
5418 info.parent_tidptr = parent_tidptr;
5420 ret = pthread_attr_init(&attr);
5421 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5422 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5423 /* It is not safe to deliver signals until the child has finished
5424 initializing, so temporarily block all signals. */
5425 sigfillset(&sigmask);
5426 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5428 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5429 /* TODO: Free new CPU state if thread creation failed. */
5431 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5432 pthread_attr_destroy(&attr);
5433 if (ret == 0) {
5434 /* Wait for the child to initialize. */
5435 pthread_cond_wait(&info.cond, &info.mutex);
5436 ret = info.tid;
5437 if (flags & CLONE_PARENT_SETTID)
5438 put_user_u32(ret, parent_tidptr);
5439 } else {
5440 ret = -1;
5442 pthread_mutex_unlock(&info.mutex);
5443 pthread_cond_destroy(&info.cond);
5444 pthread_mutex_destroy(&info.mutex);
5445 pthread_mutex_unlock(&clone_lock);
5446 } else {
5447 /* if no CLONE_VM, we consider it is a fork */
5448 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5449 return -TARGET_EINVAL;
5452 if (block_signals()) {
5453 return -TARGET_ERESTARTSYS;
5456 fork_start();
5457 ret = fork();
5458 if (ret == 0) {
5459 /* Child Process. */
5460 rcu_after_fork();
5461 cpu_clone_regs(env, newsp);
5462 fork_end(1);
5463 /* There is a race condition here. The parent process could
5464 theoretically read the TID in the child process before the child
5465 tid is set. This would require using either ptrace
5466 (not implemented) or having *_tidptr to point at a shared memory
5467 mapping. We can't repeat the spinlock hack used above because
5468 the child process gets its own copy of the lock. */
5469 if (flags & CLONE_CHILD_SETTID)
5470 put_user_u32(gettid(), child_tidptr);
5471 if (flags & CLONE_PARENT_SETTID)
5472 put_user_u32(gettid(), parent_tidptr);
5473 ts = (TaskState *)cpu->opaque;
5474 if (flags & CLONE_SETTLS)
5475 cpu_set_tls (env, newtls);
5476 if (flags & CLONE_CHILD_CLEARTID)
5477 ts->child_tidptr = child_tidptr;
5478 } else {
5479 fork_end(0);
5482 return ret;
5485 /* warning : doesn't handle linux specific flags... */
5486 static int target_to_host_fcntl_cmd(int cmd)
5488 switch(cmd) {
5489 case TARGET_F_DUPFD:
5490 case TARGET_F_GETFD:
5491 case TARGET_F_SETFD:
5492 case TARGET_F_GETFL:
5493 case TARGET_F_SETFL:
5494 return cmd;
5495 case TARGET_F_GETLK:
5496 return F_GETLK;
5497 case TARGET_F_SETLK:
5498 return F_SETLK;
5499 case TARGET_F_SETLKW:
5500 return F_SETLKW;
5501 case TARGET_F_GETOWN:
5502 return F_GETOWN;
5503 case TARGET_F_SETOWN:
5504 return F_SETOWN;
5505 case TARGET_F_GETSIG:
5506 return F_GETSIG;
5507 case TARGET_F_SETSIG:
5508 return F_SETSIG;
5509 #if TARGET_ABI_BITS == 32
5510 case TARGET_F_GETLK64:
5511 return F_GETLK64;
5512 case TARGET_F_SETLK64:
5513 return F_SETLK64;
5514 case TARGET_F_SETLKW64:
5515 return F_SETLKW64;
5516 #endif
5517 case TARGET_F_SETLEASE:
5518 return F_SETLEASE;
5519 case TARGET_F_GETLEASE:
5520 return F_GETLEASE;
5521 #ifdef F_DUPFD_CLOEXEC
5522 case TARGET_F_DUPFD_CLOEXEC:
5523 return F_DUPFD_CLOEXEC;
5524 #endif
5525 case TARGET_F_NOTIFY:
5526 return F_NOTIFY;
5527 #ifdef F_GETOWN_EX
5528 case TARGET_F_GETOWN_EX:
5529 return F_GETOWN_EX;
5530 #endif
5531 #ifdef F_SETOWN_EX
5532 case TARGET_F_SETOWN_EX:
5533 return F_SETOWN_EX;
5534 #endif
5535 default:
5536 return -TARGET_EINVAL;
5538 return -TARGET_EINVAL;
5541 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5542 static const bitmask_transtbl flock_tbl[] = {
5543 TRANSTBL_CONVERT(F_RDLCK),
5544 TRANSTBL_CONVERT(F_WRLCK),
5545 TRANSTBL_CONVERT(F_UNLCK),
5546 TRANSTBL_CONVERT(F_EXLCK),
5547 TRANSTBL_CONVERT(F_SHLCK),
5548 { 0, 0, 0, 0 }
5551 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5553 struct flock fl;
5554 struct target_flock *target_fl;
5555 struct flock64 fl64;
5556 struct target_flock64 *target_fl64;
5557 #ifdef F_GETOWN_EX
5558 struct f_owner_ex fox;
5559 struct target_f_owner_ex *target_fox;
5560 #endif
5561 abi_long ret;
5562 int host_cmd = target_to_host_fcntl_cmd(cmd);
5564 if (host_cmd == -TARGET_EINVAL)
5565 return host_cmd;
5567 switch(cmd) {
5568 case TARGET_F_GETLK:
5569 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5570 return -TARGET_EFAULT;
5571 fl.l_type =
5572 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5573 fl.l_whence = tswap16(target_fl->l_whence);
5574 fl.l_start = tswapal(target_fl->l_start);
5575 fl.l_len = tswapal(target_fl->l_len);
5576 fl.l_pid = tswap32(target_fl->l_pid);
5577 unlock_user_struct(target_fl, arg, 0);
5578 ret = get_errno(fcntl(fd, host_cmd, &fl));
5579 if (ret == 0) {
5580 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
5581 return -TARGET_EFAULT;
5582 target_fl->l_type =
5583 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
5584 target_fl->l_whence = tswap16(fl.l_whence);
5585 target_fl->l_start = tswapal(fl.l_start);
5586 target_fl->l_len = tswapal(fl.l_len);
5587 target_fl->l_pid = tswap32(fl.l_pid);
5588 unlock_user_struct(target_fl, arg, 1);
5590 break;
5592 case TARGET_F_SETLK:
5593 case TARGET_F_SETLKW:
5594 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5595 return -TARGET_EFAULT;
5596 fl.l_type =
5597 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5598 fl.l_whence = tswap16(target_fl->l_whence);
5599 fl.l_start = tswapal(target_fl->l_start);
5600 fl.l_len = tswapal(target_fl->l_len);
5601 fl.l_pid = tswap32(target_fl->l_pid);
5602 unlock_user_struct(target_fl, arg, 0);
5603 ret = get_errno(fcntl(fd, host_cmd, &fl));
5604 break;
5606 case TARGET_F_GETLK64:
5607 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5608 return -TARGET_EFAULT;
5609 fl64.l_type =
5610 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5611 fl64.l_whence = tswap16(target_fl64->l_whence);
5612 fl64.l_start = tswap64(target_fl64->l_start);
5613 fl64.l_len = tswap64(target_fl64->l_len);
5614 fl64.l_pid = tswap32(target_fl64->l_pid);
5615 unlock_user_struct(target_fl64, arg, 0);
5616 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5617 if (ret == 0) {
5618 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
5619 return -TARGET_EFAULT;
5620 target_fl64->l_type =
5621 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
5622 target_fl64->l_whence = tswap16(fl64.l_whence);
5623 target_fl64->l_start = tswap64(fl64.l_start);
5624 target_fl64->l_len = tswap64(fl64.l_len);
5625 target_fl64->l_pid = tswap32(fl64.l_pid);
5626 unlock_user_struct(target_fl64, arg, 1);
5628 break;
5629 case TARGET_F_SETLK64:
5630 case TARGET_F_SETLKW64:
5631 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5632 return -TARGET_EFAULT;
5633 fl64.l_type =
5634 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5635 fl64.l_whence = tswap16(target_fl64->l_whence);
5636 fl64.l_start = tswap64(target_fl64->l_start);
5637 fl64.l_len = tswap64(target_fl64->l_len);
5638 fl64.l_pid = tswap32(target_fl64->l_pid);
5639 unlock_user_struct(target_fl64, arg, 0);
5640 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5641 break;
5643 case TARGET_F_GETFL:
5644 ret = get_errno(fcntl(fd, host_cmd, arg));
5645 if (ret >= 0) {
5646 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5648 break;
5650 case TARGET_F_SETFL:
5651 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
5652 break;
5654 #ifdef F_GETOWN_EX
5655 case TARGET_F_GETOWN_EX:
5656 ret = get_errno(fcntl(fd, host_cmd, &fox));
5657 if (ret >= 0) {
5658 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5659 return -TARGET_EFAULT;
5660 target_fox->type = tswap32(fox.type);
5661 target_fox->pid = tswap32(fox.pid);
5662 unlock_user_struct(target_fox, arg, 1);
5664 break;
5665 #endif
5667 #ifdef F_SETOWN_EX
5668 case TARGET_F_SETOWN_EX:
5669 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5670 return -TARGET_EFAULT;
5671 fox.type = tswap32(target_fox->type);
5672 fox.pid = tswap32(target_fox->pid);
5673 unlock_user_struct(target_fox, arg, 0);
5674 ret = get_errno(fcntl(fd, host_cmd, &fox));
5675 break;
5676 #endif
5678 case TARGET_F_SETOWN:
5679 case TARGET_F_GETOWN:
5680 case TARGET_F_SETSIG:
5681 case TARGET_F_GETSIG:
5682 case TARGET_F_SETLEASE:
5683 case TARGET_F_GETLEASE:
5684 ret = get_errno(fcntl(fd, host_cmd, arg));
5685 break;
5687 default:
5688 ret = get_errno(fcntl(fd, cmd, arg));
5689 break;
5691 return ret;
5694 #ifdef USE_UID16
5696 static inline int high2lowuid(int uid)
5698 if (uid > 65535)
5699 return 65534;
5700 else
5701 return uid;
5704 static inline int high2lowgid(int gid)
5706 if (gid > 65535)
5707 return 65534;
5708 else
5709 return gid;
5712 static inline int low2highuid(int uid)
5714 if ((int16_t)uid == -1)
5715 return -1;
5716 else
5717 return uid;
5720 static inline int low2highgid(int gid)
5722 if ((int16_t)gid == -1)
5723 return -1;
5724 else
5725 return gid;
5727 static inline int tswapid(int id)
5729 return tswap16(id);
5732 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5734 #else /* !USE_UID16 */
5735 static inline int high2lowuid(int uid)
5737 return uid;
5739 static inline int high2lowgid(int gid)
5741 return gid;
5743 static inline int low2highuid(int uid)
5745 return uid;
5747 static inline int low2highgid(int gid)
5749 return gid;
5751 static inline int tswapid(int id)
5753 return tswap32(id);
5756 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5758 #endif /* USE_UID16 */
5760 /* We must do direct syscalls for setting UID/GID, because we want to
5761 * implement the Linux system call semantics of "change only for this thread",
5762 * not the libc/POSIX semantics of "change for all threads in process".
5763 * (See http://ewontfix.com/17/ for more details.)
5764 * We use the 32-bit version of the syscalls if present; if it is not
5765 * then either the host architecture supports 32-bit UIDs natively with
5766 * the standard syscall, or the 16-bit UID is the best we can do.
5768 #ifdef __NR_setuid32
5769 #define __NR_sys_setuid __NR_setuid32
5770 #else
5771 #define __NR_sys_setuid __NR_setuid
5772 #endif
5773 #ifdef __NR_setgid32
5774 #define __NR_sys_setgid __NR_setgid32
5775 #else
5776 #define __NR_sys_setgid __NR_setgid
5777 #endif
5778 #ifdef __NR_setresuid32
5779 #define __NR_sys_setresuid __NR_setresuid32
5780 #else
5781 #define __NR_sys_setresuid __NR_setresuid
5782 #endif
5783 #ifdef __NR_setresgid32
5784 #define __NR_sys_setresgid __NR_setresgid32
5785 #else
5786 #define __NR_sys_setresgid __NR_setresgid
5787 #endif
5789 _syscall1(int, sys_setuid, uid_t, uid)
5790 _syscall1(int, sys_setgid, gid_t, gid)
5791 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5792 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5794 void syscall_init(void)
5796 IOCTLEntry *ie;
5797 const argtype *arg_type;
5798 int size;
5799 int i;
5801 thunk_init(STRUCT_MAX);
5803 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5804 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5805 #include "syscall_types.h"
5806 #undef STRUCT
5807 #undef STRUCT_SPECIAL
5809 /* Build target_to_host_errno_table[] table from
5810 * host_to_target_errno_table[]. */
5811 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5812 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5815 /* we patch the ioctl size if necessary. We rely on the fact that
5816 no ioctl has all the bits at '1' in the size field */
5817 ie = ioctl_entries;
5818 while (ie->target_cmd != 0) {
5819 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5820 TARGET_IOC_SIZEMASK) {
5821 arg_type = ie->arg_type;
5822 if (arg_type[0] != TYPE_PTR) {
5823 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5824 ie->target_cmd);
5825 exit(1);
5827 arg_type++;
5828 size = thunk_type_size(arg_type, 0);
5829 ie->target_cmd = (ie->target_cmd &
5830 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5831 (size << TARGET_IOC_SIZESHIFT);
5834 /* automatic consistency check if same arch */
5835 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5836 (defined(__x86_64__) && defined(TARGET_X86_64))
5837 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5838 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5839 ie->name, ie->target_cmd, ie->host_cmd);
5841 #endif
5842 ie++;
5846 #if TARGET_ABI_BITS == 32
5847 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5849 #ifdef TARGET_WORDS_BIGENDIAN
5850 return ((uint64_t)word0 << 32) | word1;
5851 #else
5852 return ((uint64_t)word1 << 32) | word0;
5853 #endif
5855 #else /* TARGET_ABI_BITS == 32 */
5856 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5858 return word0;
5860 #endif /* TARGET_ABI_BITS != 32 */
5862 #ifdef TARGET_NR_truncate64
5863 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5864 abi_long arg2,
5865 abi_long arg3,
5866 abi_long arg4)
5868 if (regpairs_aligned(cpu_env)) {
5869 arg2 = arg3;
5870 arg3 = arg4;
5872 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5874 #endif
5876 #ifdef TARGET_NR_ftruncate64
5877 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5878 abi_long arg2,
5879 abi_long arg3,
5880 abi_long arg4)
5882 if (regpairs_aligned(cpu_env)) {
5883 arg2 = arg3;
5884 arg3 = arg4;
5886 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5888 #endif
5890 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5891 abi_ulong target_addr)
5893 struct target_timespec *target_ts;
5895 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5896 return -TARGET_EFAULT;
5897 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5898 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5899 unlock_user_struct(target_ts, target_addr, 0);
5900 return 0;
5903 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5904 struct timespec *host_ts)
5906 struct target_timespec *target_ts;
5908 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5909 return -TARGET_EFAULT;
5910 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5911 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5912 unlock_user_struct(target_ts, target_addr, 1);
5913 return 0;
5916 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5917 abi_ulong target_addr)
5919 struct target_itimerspec *target_itspec;
5921 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5922 return -TARGET_EFAULT;
5925 host_itspec->it_interval.tv_sec =
5926 tswapal(target_itspec->it_interval.tv_sec);
5927 host_itspec->it_interval.tv_nsec =
5928 tswapal(target_itspec->it_interval.tv_nsec);
5929 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5930 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5932 unlock_user_struct(target_itspec, target_addr, 1);
5933 return 0;
5936 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5937 struct itimerspec *host_its)
5939 struct target_itimerspec *target_itspec;
5941 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5942 return -TARGET_EFAULT;
5945 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5946 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5948 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5949 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5951 unlock_user_struct(target_itspec, target_addr, 0);
5952 return 0;
5955 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5956 abi_ulong target_addr)
5958 struct target_sigevent *target_sevp;
5960 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5961 return -TARGET_EFAULT;
5964 /* This union is awkward on 64 bit systems because it has a 32 bit
5965 * integer and a pointer in it; we follow the conversion approach
5966 * used for handling sigval types in signal.c so the guest should get
5967 * the correct value back even if we did a 64 bit byteswap and it's
5968 * using the 32 bit integer.
5970 host_sevp->sigev_value.sival_ptr =
5971 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5972 host_sevp->sigev_signo =
5973 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5974 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5975 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5977 unlock_user_struct(target_sevp, target_addr, 1);
5978 return 0;
5981 #if defined(TARGET_NR_mlockall)
5982 static inline int target_to_host_mlockall_arg(int arg)
5984 int result = 0;
5986 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5987 result |= MCL_CURRENT;
5989 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5990 result |= MCL_FUTURE;
5992 return result;
5994 #endif
5996 static inline abi_long host_to_target_stat64(void *cpu_env,
5997 abi_ulong target_addr,
5998 struct stat *host_st)
6000 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6001 if (((CPUARMState *)cpu_env)->eabi) {
6002 struct target_eabi_stat64 *target_st;
6004 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6005 return -TARGET_EFAULT;
6006 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6007 __put_user(host_st->st_dev, &target_st->st_dev);
6008 __put_user(host_st->st_ino, &target_st->st_ino);
6009 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6010 __put_user(host_st->st_ino, &target_st->__st_ino);
6011 #endif
6012 __put_user(host_st->st_mode, &target_st->st_mode);
6013 __put_user(host_st->st_nlink, &target_st->st_nlink);
6014 __put_user(host_st->st_uid, &target_st->st_uid);
6015 __put_user(host_st->st_gid, &target_st->st_gid);
6016 __put_user(host_st->st_rdev, &target_st->st_rdev);
6017 __put_user(host_st->st_size, &target_st->st_size);
6018 __put_user(host_st->st_blksize, &target_st->st_blksize);
6019 __put_user(host_st->st_blocks, &target_st->st_blocks);
6020 __put_user(host_st->st_atime, &target_st->target_st_atime);
6021 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6022 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6023 unlock_user_struct(target_st, target_addr, 1);
6024 } else
6025 #endif
6027 #if defined(TARGET_HAS_STRUCT_STAT64)
6028 struct target_stat64 *target_st;
6029 #else
6030 struct target_stat *target_st;
6031 #endif
6033 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6034 return -TARGET_EFAULT;
6035 memset(target_st, 0, sizeof(*target_st));
6036 __put_user(host_st->st_dev, &target_st->st_dev);
6037 __put_user(host_st->st_ino, &target_st->st_ino);
6038 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6039 __put_user(host_st->st_ino, &target_st->__st_ino);
6040 #endif
6041 __put_user(host_st->st_mode, &target_st->st_mode);
6042 __put_user(host_st->st_nlink, &target_st->st_nlink);
6043 __put_user(host_st->st_uid, &target_st->st_uid);
6044 __put_user(host_st->st_gid, &target_st->st_gid);
6045 __put_user(host_st->st_rdev, &target_st->st_rdev);
6046 /* XXX: better use of kernel struct */
6047 __put_user(host_st->st_size, &target_st->st_size);
6048 __put_user(host_st->st_blksize, &target_st->st_blksize);
6049 __put_user(host_st->st_blocks, &target_st->st_blocks);
6050 __put_user(host_st->st_atime, &target_st->target_st_atime);
6051 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6052 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6053 unlock_user_struct(target_st, target_addr, 1);
6056 return 0;
6059 /* ??? Using host futex calls even when target atomic operations
6060 are not really atomic probably breaks things. However implementing
6061 futexes locally would make futexes shared between multiple processes
6062 tricky. However they're probably useless because guest atomic
6063 operations won't work either. */
6064 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6065 target_ulong uaddr2, int val3)
6067 struct timespec ts, *pts;
6068 int base_op;
6070 /* ??? We assume FUTEX_* constants are the same on both host
6071 and target. */
6072 #ifdef FUTEX_CMD_MASK
6073 base_op = op & FUTEX_CMD_MASK;
6074 #else
6075 base_op = op;
6076 #endif
6077 switch (base_op) {
6078 case FUTEX_WAIT:
6079 case FUTEX_WAIT_BITSET:
6080 if (timeout) {
6081 pts = &ts;
6082 target_to_host_timespec(pts, timeout);
6083 } else {
6084 pts = NULL;
6086 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6087 pts, NULL, val3));
6088 case FUTEX_WAKE:
6089 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6090 case FUTEX_FD:
6091 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6092 case FUTEX_REQUEUE:
6093 case FUTEX_CMP_REQUEUE:
6094 case FUTEX_WAKE_OP:
6095 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6096 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6097 But the prototype takes a `struct timespec *'; insert casts
6098 to satisfy the compiler. We do not need to tswap TIMEOUT
6099 since it's not compared to guest memory. */
6100 pts = (struct timespec *)(uintptr_t) timeout;
6101 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6102 g2h(uaddr2),
6103 (base_op == FUTEX_CMP_REQUEUE
6104 ? tswap32(val3)
6105 : val3)));
6106 default:
6107 return -TARGET_ENOSYS;
6110 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6111 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6112 abi_long handle, abi_long mount_id,
6113 abi_long flags)
6115 struct file_handle *target_fh;
6116 struct file_handle *fh;
6117 int mid = 0;
6118 abi_long ret;
6119 char *name;
6120 unsigned int size, total_size;
6122 if (get_user_s32(size, handle)) {
6123 return -TARGET_EFAULT;
6126 name = lock_user_string(pathname);
6127 if (!name) {
6128 return -TARGET_EFAULT;
6131 total_size = sizeof(struct file_handle) + size;
6132 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6133 if (!target_fh) {
6134 unlock_user(name, pathname, 0);
6135 return -TARGET_EFAULT;
6138 fh = g_malloc0(total_size);
6139 fh->handle_bytes = size;
6141 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6142 unlock_user(name, pathname, 0);
6144 /* man name_to_handle_at(2):
6145 * Other than the use of the handle_bytes field, the caller should treat
6146 * the file_handle structure as an opaque data type
6149 memcpy(target_fh, fh, total_size);
6150 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6151 target_fh->handle_type = tswap32(fh->handle_type);
6152 g_free(fh);
6153 unlock_user(target_fh, handle, total_size);
6155 if (put_user_s32(mid, mount_id)) {
6156 return -TARGET_EFAULT;
6159 return ret;
6162 #endif
6164 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6165 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6166 abi_long flags)
6168 struct file_handle *target_fh;
6169 struct file_handle *fh;
6170 unsigned int size, total_size;
6171 abi_long ret;
6173 if (get_user_s32(size, handle)) {
6174 return -TARGET_EFAULT;
6177 total_size = sizeof(struct file_handle) + size;
6178 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6179 if (!target_fh) {
6180 return -TARGET_EFAULT;
6183 fh = g_memdup(target_fh, total_size);
6184 fh->handle_bytes = size;
6185 fh->handle_type = tswap32(target_fh->handle_type);
6187 ret = get_errno(open_by_handle_at(mount_fd, fh,
6188 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6190 g_free(fh);
6192 unlock_user(target_fh, handle, total_size);
6194 return ret;
6196 #endif
6198 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6200 /* signalfd siginfo conversion */
6202 static void
6203 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6204 const struct signalfd_siginfo *info)
6206 int sig = host_to_target_signal(info->ssi_signo);
6208 /* linux/signalfd.h defines a ssi_addr_lsb
6209 * not defined in sys/signalfd.h but used by some kernels
6212 #ifdef BUS_MCEERR_AO
6213 if (tinfo->ssi_signo == SIGBUS &&
6214 (tinfo->ssi_code == BUS_MCEERR_AR ||
6215 tinfo->ssi_code == BUS_MCEERR_AO)) {
6216 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6217 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6218 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6220 #endif
6222 tinfo->ssi_signo = tswap32(sig);
6223 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6224 tinfo->ssi_code = tswap32(info->ssi_code);
6225 tinfo->ssi_pid = tswap32(info->ssi_pid);
6226 tinfo->ssi_uid = tswap32(info->ssi_uid);
6227 tinfo->ssi_fd = tswap32(info->ssi_fd);
6228 tinfo->ssi_tid = tswap32(info->ssi_tid);
6229 tinfo->ssi_band = tswap32(info->ssi_band);
6230 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6231 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6232 tinfo->ssi_status = tswap32(info->ssi_status);
6233 tinfo->ssi_int = tswap32(info->ssi_int);
6234 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6235 tinfo->ssi_utime = tswap64(info->ssi_utime);
6236 tinfo->ssi_stime = tswap64(info->ssi_stime);
6237 tinfo->ssi_addr = tswap64(info->ssi_addr);
6240 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6242 int i;
6244 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6245 host_to_target_signalfd_siginfo(buf + i, buf + i);
6248 return len;
6251 static TargetFdTrans target_signalfd_trans = {
6252 .host_to_target_data = host_to_target_data_signalfd,
6255 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6257 int host_flags;
6258 target_sigset_t *target_mask;
6259 sigset_t host_mask;
6260 abi_long ret;
6262 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6263 return -TARGET_EINVAL;
6265 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6266 return -TARGET_EFAULT;
6269 target_to_host_sigset(&host_mask, target_mask);
6271 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6273 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6274 if (ret >= 0) {
6275 fd_trans_register(ret, &target_signalfd_trans);
6278 unlock_user_struct(target_mask, mask, 0);
6280 return ret;
6282 #endif
6284 /* Map host to target signal numbers for the wait family of syscalls.
6285 Assume all other status bits are the same. */
6286 int host_to_target_waitstatus(int status)
6288 if (WIFSIGNALED(status)) {
6289 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6291 if (WIFSTOPPED(status)) {
6292 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6293 | (status & 0xff);
6295 return status;
6298 static int open_self_cmdline(void *cpu_env, int fd)
6300 int fd_orig = -1;
6301 bool word_skipped = false;
6303 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6304 if (fd_orig < 0) {
6305 return fd_orig;
6308 while (true) {
6309 ssize_t nb_read;
6310 char buf[128];
6311 char *cp_buf = buf;
6313 nb_read = read(fd_orig, buf, sizeof(buf));
6314 if (nb_read < 0) {
6315 int e = errno;
6316 fd_orig = close(fd_orig);
6317 errno = e;
6318 return -1;
6319 } else if (nb_read == 0) {
6320 break;
6323 if (!word_skipped) {
6324 /* Skip the first string, which is the path to qemu-*-static
6325 instead of the actual command. */
6326 cp_buf = memchr(buf, 0, sizeof(buf));
6327 if (cp_buf) {
6328 /* Null byte found, skip one string */
6329 cp_buf++;
6330 nb_read -= cp_buf - buf;
6331 word_skipped = true;
6335 if (word_skipped) {
6336 if (write(fd, cp_buf, nb_read) != nb_read) {
6337 int e = errno;
6338 close(fd_orig);
6339 errno = e;
6340 return -1;
6345 return close(fd_orig);
6348 static int open_self_maps(void *cpu_env, int fd)
6350 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6351 TaskState *ts = cpu->opaque;
6352 FILE *fp;
6353 char *line = NULL;
6354 size_t len = 0;
6355 ssize_t read;
6357 fp = fopen("/proc/self/maps", "r");
6358 if (fp == NULL) {
6359 return -1;
6362 while ((read = getline(&line, &len, fp)) != -1) {
6363 int fields, dev_maj, dev_min, inode;
6364 uint64_t min, max, offset;
6365 char flag_r, flag_w, flag_x, flag_p;
6366 char path[512] = "";
6367 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6368 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6369 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6371 if ((fields < 10) || (fields > 11)) {
6372 continue;
6374 if (h2g_valid(min)) {
6375 int flags = page_get_flags(h2g(min));
6376 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6377 if (page_check_range(h2g(min), max - min, flags) == -1) {
6378 continue;
6380 if (h2g(min) == ts->info->stack_limit) {
6381 pstrcpy(path, sizeof(path), " [stack]");
6383 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6384 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6385 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6386 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6387 path[0] ? " " : "", path);
6391 free(line);
6392 fclose(fp);
6394 return 0;
6397 static int open_self_stat(void *cpu_env, int fd)
6399 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6400 TaskState *ts = cpu->opaque;
6401 abi_ulong start_stack = ts->info->start_stack;
6402 int i;
6404 for (i = 0; i < 44; i++) {
6405 char buf[128];
6406 int len;
6407 uint64_t val = 0;
6409 if (i == 0) {
6410 /* pid */
6411 val = getpid();
6412 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6413 } else if (i == 1) {
6414 /* app name */
6415 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6416 } else if (i == 27) {
6417 /* stack bottom */
6418 val = start_stack;
6419 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6420 } else {
6421 /* for the rest, there is MasterCard */
6422 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6425 len = strlen(buf);
6426 if (write(fd, buf, len) != len) {
6427 return -1;
6431 return 0;
6434 static int open_self_auxv(void *cpu_env, int fd)
6436 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6437 TaskState *ts = cpu->opaque;
6438 abi_ulong auxv = ts->info->saved_auxv;
6439 abi_ulong len = ts->info->auxv_len;
6440 char *ptr;
6443 * Auxiliary vector is stored in target process stack.
6444 * read in whole auxv vector and copy it to file
6446 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6447 if (ptr != NULL) {
6448 while (len > 0) {
6449 ssize_t r;
6450 r = write(fd, ptr, len);
6451 if (r <= 0) {
6452 break;
6454 len -= r;
6455 ptr += r;
6457 lseek(fd, 0, SEEK_SET);
6458 unlock_user(ptr, auxv, len);
6461 return 0;
6464 static int is_proc_myself(const char *filename, const char *entry)
6466 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6467 filename += strlen("/proc/");
6468 if (!strncmp(filename, "self/", strlen("self/"))) {
6469 filename += strlen("self/");
6470 } else if (*filename >= '1' && *filename <= '9') {
6471 char myself[80];
6472 snprintf(myself, sizeof(myself), "%d/", getpid());
6473 if (!strncmp(filename, myself, strlen(myself))) {
6474 filename += strlen(myself);
6475 } else {
6476 return 0;
6478 } else {
6479 return 0;
6481 if (!strcmp(filename, entry)) {
6482 return 1;
6485 return 0;
6488 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6489 static int is_proc(const char *filename, const char *entry)
6491 return strcmp(filename, entry) == 0;
6494 static int open_net_route(void *cpu_env, int fd)
6496 FILE *fp;
6497 char *line = NULL;
6498 size_t len = 0;
6499 ssize_t read;
6501 fp = fopen("/proc/net/route", "r");
6502 if (fp == NULL) {
6503 return -1;
6506 /* read header */
6508 read = getline(&line, &len, fp);
6509 dprintf(fd, "%s", line);
6511 /* read routes */
6513 while ((read = getline(&line, &len, fp)) != -1) {
6514 char iface[16];
6515 uint32_t dest, gw, mask;
6516 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6517 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6518 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6519 &mask, &mtu, &window, &irtt);
6520 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6521 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6522 metric, tswap32(mask), mtu, window, irtt);
6525 free(line);
6526 fclose(fp);
6528 return 0;
6530 #endif
6532 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6534 struct fake_open {
6535 const char *filename;
6536 int (*fill)(void *cpu_env, int fd);
6537 int (*cmp)(const char *s1, const char *s2);
6539 const struct fake_open *fake_open;
6540 static const struct fake_open fakes[] = {
6541 { "maps", open_self_maps, is_proc_myself },
6542 { "stat", open_self_stat, is_proc_myself },
6543 { "auxv", open_self_auxv, is_proc_myself },
6544 { "cmdline", open_self_cmdline, is_proc_myself },
6545 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6546 { "/proc/net/route", open_net_route, is_proc },
6547 #endif
6548 { NULL, NULL, NULL }
6551 if (is_proc_myself(pathname, "exe")) {
6552 int execfd = qemu_getauxval(AT_EXECFD);
6553 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6556 for (fake_open = fakes; fake_open->filename; fake_open++) {
6557 if (fake_open->cmp(pathname, fake_open->filename)) {
6558 break;
6562 if (fake_open->filename) {
6563 const char *tmpdir;
6564 char filename[PATH_MAX];
6565 int fd, r;
6567 /* create temporary file to map stat to */
6568 tmpdir = getenv("TMPDIR");
6569 if (!tmpdir)
6570 tmpdir = "/tmp";
6571 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6572 fd = mkstemp(filename);
6573 if (fd < 0) {
6574 return fd;
6576 unlink(filename);
6578 if ((r = fake_open->fill(cpu_env, fd))) {
6579 int e = errno;
6580 close(fd);
6581 errno = e;
6582 return r;
6584 lseek(fd, 0, SEEK_SET);
6586 return fd;
6589 return safe_openat(dirfd, path(pathname), flags, mode);
6592 #define TIMER_MAGIC 0x0caf0000
6593 #define TIMER_MAGIC_MASK 0xffff0000
6595 /* Convert QEMU provided timer ID back to internal 16bit index format */
6596 static target_timer_t get_timer_id(abi_long arg)
6598 target_timer_t timerid = arg;
6600 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6601 return -TARGET_EINVAL;
6604 timerid &= 0xffff;
6606 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6607 return -TARGET_EINVAL;
6610 return timerid;
6613 /* do_syscall() should always have a single exit point at the end so
6614 that actions, such as logging of syscall results, can be performed.
6615 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6616 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6617 abi_long arg2, abi_long arg3, abi_long arg4,
6618 abi_long arg5, abi_long arg6, abi_long arg7,
6619 abi_long arg8)
6621 CPUState *cpu = ENV_GET_CPU(cpu_env);
6622 abi_long ret;
6623 struct stat st;
6624 struct statfs stfs;
6625 void *p;
6627 #if defined(DEBUG_ERESTARTSYS)
6628 /* Debug-only code for exercising the syscall-restart code paths
6629 * in the per-architecture cpu main loops: restart every syscall
6630 * the guest makes once before letting it through.
6633 static int flag;
6635 flag = !flag;
6636 if (flag) {
6637 return -TARGET_ERESTARTSYS;
6640 #endif
6642 #ifdef DEBUG
6643 gemu_log("syscall %d", num);
6644 #endif
6645 if(do_strace)
6646 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6648 switch(num) {
6649 case TARGET_NR_exit:
6650 /* In old applications this may be used to implement _exit(2).
6651 However in threaded applictions it is used for thread termination,
6652 and _exit_group is used for application termination.
6653 Do thread termination if we have more then one thread. */
6655 if (block_signals()) {
6656 ret = -TARGET_ERESTARTSYS;
6657 break;
6660 if (CPU_NEXT(first_cpu)) {
6661 TaskState *ts;
6663 cpu_list_lock();
6664 /* Remove the CPU from the list. */
6665 QTAILQ_REMOVE(&cpus, cpu, node);
6666 cpu_list_unlock();
6667 ts = cpu->opaque;
6668 if (ts->child_tidptr) {
6669 put_user_u32(0, ts->child_tidptr);
6670 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6671 NULL, NULL, 0);
6673 thread_cpu = NULL;
6674 object_unref(OBJECT(cpu));
6675 g_free(ts);
6676 rcu_unregister_thread();
6677 pthread_exit(NULL);
6679 #ifdef TARGET_GPROF
6680 _mcleanup();
6681 #endif
6682 gdb_exit(cpu_env, arg1);
6683 _exit(arg1);
6684 ret = 0; /* avoid warning */
6685 break;
6686 case TARGET_NR_read:
6687 if (arg3 == 0)
6688 ret = 0;
6689 else {
6690 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6691 goto efault;
6692 ret = get_errno(safe_read(arg1, p, arg3));
6693 if (ret >= 0 &&
6694 fd_trans_host_to_target_data(arg1)) {
6695 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6697 unlock_user(p, arg2, ret);
6699 break;
6700 case TARGET_NR_write:
6701 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6702 goto efault;
6703 ret = get_errno(safe_write(arg1, p, arg3));
6704 unlock_user(p, arg2, 0);
6705 break;
6706 #ifdef TARGET_NR_open
6707 case TARGET_NR_open:
6708 if (!(p = lock_user_string(arg1)))
6709 goto efault;
6710 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6711 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6712 arg3));
6713 fd_trans_unregister(ret);
6714 unlock_user(p, arg1, 0);
6715 break;
6716 #endif
6717 case TARGET_NR_openat:
6718 if (!(p = lock_user_string(arg2)))
6719 goto efault;
6720 ret = get_errno(do_openat(cpu_env, arg1, p,
6721 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6722 arg4));
6723 fd_trans_unregister(ret);
6724 unlock_user(p, arg2, 0);
6725 break;
6726 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6727 case TARGET_NR_name_to_handle_at:
6728 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6729 break;
6730 #endif
6731 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6732 case TARGET_NR_open_by_handle_at:
6733 ret = do_open_by_handle_at(arg1, arg2, arg3);
6734 fd_trans_unregister(ret);
6735 break;
6736 #endif
6737 case TARGET_NR_close:
6738 fd_trans_unregister(arg1);
6739 ret = get_errno(close(arg1));
6740 break;
6741 case TARGET_NR_brk:
6742 ret = do_brk(arg1);
6743 break;
6744 #ifdef TARGET_NR_fork
6745 case TARGET_NR_fork:
6746 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6747 break;
6748 #endif
6749 #ifdef TARGET_NR_waitpid
6750 case TARGET_NR_waitpid:
6752 int status;
6753 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6754 if (!is_error(ret) && arg2 && ret
6755 && put_user_s32(host_to_target_waitstatus(status), arg2))
6756 goto efault;
6758 break;
6759 #endif
6760 #ifdef TARGET_NR_waitid
6761 case TARGET_NR_waitid:
6763 siginfo_t info;
6764 info.si_pid = 0;
6765 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6766 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6767 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6768 goto efault;
6769 host_to_target_siginfo(p, &info);
6770 unlock_user(p, arg3, sizeof(target_siginfo_t));
6773 break;
6774 #endif
6775 #ifdef TARGET_NR_creat /* not on alpha */
6776 case TARGET_NR_creat:
6777 if (!(p = lock_user_string(arg1)))
6778 goto efault;
6779 ret = get_errno(creat(p, arg2));
6780 fd_trans_unregister(ret);
6781 unlock_user(p, arg1, 0);
6782 break;
6783 #endif
6784 #ifdef TARGET_NR_link
6785 case TARGET_NR_link:
6787 void * p2;
6788 p = lock_user_string(arg1);
6789 p2 = lock_user_string(arg2);
6790 if (!p || !p2)
6791 ret = -TARGET_EFAULT;
6792 else
6793 ret = get_errno(link(p, p2));
6794 unlock_user(p2, arg2, 0);
6795 unlock_user(p, arg1, 0);
6797 break;
6798 #endif
6799 #if defined(TARGET_NR_linkat)
6800 case TARGET_NR_linkat:
6802 void * p2 = NULL;
6803 if (!arg2 || !arg4)
6804 goto efault;
6805 p = lock_user_string(arg2);
6806 p2 = lock_user_string(arg4);
6807 if (!p || !p2)
6808 ret = -TARGET_EFAULT;
6809 else
6810 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6811 unlock_user(p, arg2, 0);
6812 unlock_user(p2, arg4, 0);
6814 break;
6815 #endif
6816 #ifdef TARGET_NR_unlink
6817 case TARGET_NR_unlink:
6818 if (!(p = lock_user_string(arg1)))
6819 goto efault;
6820 ret = get_errno(unlink(p));
6821 unlock_user(p, arg1, 0);
6822 break;
6823 #endif
6824 #if defined(TARGET_NR_unlinkat)
6825 case TARGET_NR_unlinkat:
6826 if (!(p = lock_user_string(arg2)))
6827 goto efault;
6828 ret = get_errno(unlinkat(arg1, p, arg3));
6829 unlock_user(p, arg2, 0);
6830 break;
6831 #endif
6832 case TARGET_NR_execve:
6834 char **argp, **envp;
6835 int argc, envc;
6836 abi_ulong gp;
6837 abi_ulong guest_argp;
6838 abi_ulong guest_envp;
6839 abi_ulong addr;
6840 char **q;
6841 int total_size = 0;
6843 argc = 0;
6844 guest_argp = arg2;
6845 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6846 if (get_user_ual(addr, gp))
6847 goto efault;
6848 if (!addr)
6849 break;
6850 argc++;
6852 envc = 0;
6853 guest_envp = arg3;
6854 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6855 if (get_user_ual(addr, gp))
6856 goto efault;
6857 if (!addr)
6858 break;
6859 envc++;
6862 argp = alloca((argc + 1) * sizeof(void *));
6863 envp = alloca((envc + 1) * sizeof(void *));
6865 for (gp = guest_argp, q = argp; gp;
6866 gp += sizeof(abi_ulong), q++) {
6867 if (get_user_ual(addr, gp))
6868 goto execve_efault;
6869 if (!addr)
6870 break;
6871 if (!(*q = lock_user_string(addr)))
6872 goto execve_efault;
6873 total_size += strlen(*q) + 1;
6875 *q = NULL;
6877 for (gp = guest_envp, q = envp; gp;
6878 gp += sizeof(abi_ulong), q++) {
6879 if (get_user_ual(addr, gp))
6880 goto execve_efault;
6881 if (!addr)
6882 break;
6883 if (!(*q = lock_user_string(addr)))
6884 goto execve_efault;
6885 total_size += strlen(*q) + 1;
6887 *q = NULL;
6889 if (!(p = lock_user_string(arg1)))
6890 goto execve_efault;
6891 /* Although execve() is not an interruptible syscall it is
6892 * a special case where we must use the safe_syscall wrapper:
6893 * if we allow a signal to happen before we make the host
6894 * syscall then we will 'lose' it, because at the point of
6895 * execve the process leaves QEMU's control. So we use the
6896 * safe syscall wrapper to ensure that we either take the
6897 * signal as a guest signal, or else it does not happen
6898 * before the execve completes and makes it the other
6899 * program's problem.
6901 ret = get_errno(safe_execve(p, argp, envp));
6902 unlock_user(p, arg1, 0);
6904 goto execve_end;
6906 execve_efault:
6907 ret = -TARGET_EFAULT;
6909 execve_end:
6910 for (gp = guest_argp, q = argp; *q;
6911 gp += sizeof(abi_ulong), q++) {
6912 if (get_user_ual(addr, gp)
6913 || !addr)
6914 break;
6915 unlock_user(*q, addr, 0);
6917 for (gp = guest_envp, q = envp; *q;
6918 gp += sizeof(abi_ulong), q++) {
6919 if (get_user_ual(addr, gp)
6920 || !addr)
6921 break;
6922 unlock_user(*q, addr, 0);
6925 break;
6926 case TARGET_NR_chdir:
6927 if (!(p = lock_user_string(arg1)))
6928 goto efault;
6929 ret = get_errno(chdir(p));
6930 unlock_user(p, arg1, 0);
6931 break;
6932 #ifdef TARGET_NR_time
6933 case TARGET_NR_time:
6935 time_t host_time;
6936 ret = get_errno(time(&host_time));
6937 if (!is_error(ret)
6938 && arg1
6939 && put_user_sal(host_time, arg1))
6940 goto efault;
6942 break;
6943 #endif
6944 #ifdef TARGET_NR_mknod
6945 case TARGET_NR_mknod:
6946 if (!(p = lock_user_string(arg1)))
6947 goto efault;
6948 ret = get_errno(mknod(p, arg2, arg3));
6949 unlock_user(p, arg1, 0);
6950 break;
6951 #endif
6952 #if defined(TARGET_NR_mknodat)
6953 case TARGET_NR_mknodat:
6954 if (!(p = lock_user_string(arg2)))
6955 goto efault;
6956 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6957 unlock_user(p, arg2, 0);
6958 break;
6959 #endif
6960 #ifdef TARGET_NR_chmod
6961 case TARGET_NR_chmod:
6962 if (!(p = lock_user_string(arg1)))
6963 goto efault;
6964 ret = get_errno(chmod(p, arg2));
6965 unlock_user(p, arg1, 0);
6966 break;
6967 #endif
6968 #ifdef TARGET_NR_break
6969 case TARGET_NR_break:
6970 goto unimplemented;
6971 #endif
6972 #ifdef TARGET_NR_oldstat
6973 case TARGET_NR_oldstat:
6974 goto unimplemented;
6975 #endif
6976 case TARGET_NR_lseek:
6977 ret = get_errno(lseek(arg1, arg2, arg3));
6978 break;
6979 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6980 /* Alpha specific */
6981 case TARGET_NR_getxpid:
6982 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6983 ret = get_errno(getpid());
6984 break;
6985 #endif
6986 #ifdef TARGET_NR_getpid
6987 case TARGET_NR_getpid:
6988 ret = get_errno(getpid());
6989 break;
6990 #endif
6991 case TARGET_NR_mount:
6993 /* need to look at the data field */
6994 void *p2, *p3;
6996 if (arg1) {
6997 p = lock_user_string(arg1);
6998 if (!p) {
6999 goto efault;
7001 } else {
7002 p = NULL;
7005 p2 = lock_user_string(arg2);
7006 if (!p2) {
7007 if (arg1) {
7008 unlock_user(p, arg1, 0);
7010 goto efault;
7013 if (arg3) {
7014 p3 = lock_user_string(arg3);
7015 if (!p3) {
7016 if (arg1) {
7017 unlock_user(p, arg1, 0);
7019 unlock_user(p2, arg2, 0);
7020 goto efault;
7022 } else {
7023 p3 = NULL;
7026 /* FIXME - arg5 should be locked, but it isn't clear how to
7027 * do that since it's not guaranteed to be a NULL-terminated
7028 * string.
7030 if (!arg5) {
7031 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7032 } else {
7033 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7035 ret = get_errno(ret);
7037 if (arg1) {
7038 unlock_user(p, arg1, 0);
7040 unlock_user(p2, arg2, 0);
7041 if (arg3) {
7042 unlock_user(p3, arg3, 0);
7045 break;
7046 #ifdef TARGET_NR_umount
7047 case TARGET_NR_umount:
7048 if (!(p = lock_user_string(arg1)))
7049 goto efault;
7050 ret = get_errno(umount(p));
7051 unlock_user(p, arg1, 0);
7052 break;
7053 #endif
7054 #ifdef TARGET_NR_stime /* not on alpha */
7055 case TARGET_NR_stime:
7057 time_t host_time;
7058 if (get_user_sal(host_time, arg1))
7059 goto efault;
7060 ret = get_errno(stime(&host_time));
7062 break;
7063 #endif
7064 case TARGET_NR_ptrace:
7065 goto unimplemented;
7066 #ifdef TARGET_NR_alarm /* not on alpha */
7067 case TARGET_NR_alarm:
7068 ret = alarm(arg1);
7069 break;
7070 #endif
7071 #ifdef TARGET_NR_oldfstat
7072 case TARGET_NR_oldfstat:
7073 goto unimplemented;
7074 #endif
7075 #ifdef TARGET_NR_pause /* not on alpha */
7076 case TARGET_NR_pause:
7077 if (!block_signals()) {
7078 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7080 ret = -TARGET_EINTR;
7081 break;
7082 #endif
7083 #ifdef TARGET_NR_utime
7084 case TARGET_NR_utime:
7086 struct utimbuf tbuf, *host_tbuf;
7087 struct target_utimbuf *target_tbuf;
7088 if (arg2) {
7089 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7090 goto efault;
7091 tbuf.actime = tswapal(target_tbuf->actime);
7092 tbuf.modtime = tswapal(target_tbuf->modtime);
7093 unlock_user_struct(target_tbuf, arg2, 0);
7094 host_tbuf = &tbuf;
7095 } else {
7096 host_tbuf = NULL;
7098 if (!(p = lock_user_string(arg1)))
7099 goto efault;
7100 ret = get_errno(utime(p, host_tbuf));
7101 unlock_user(p, arg1, 0);
7103 break;
7104 #endif
7105 #ifdef TARGET_NR_utimes
7106 case TARGET_NR_utimes:
7108 struct timeval *tvp, tv[2];
7109 if (arg2) {
7110 if (copy_from_user_timeval(&tv[0], arg2)
7111 || copy_from_user_timeval(&tv[1],
7112 arg2 + sizeof(struct target_timeval)))
7113 goto efault;
7114 tvp = tv;
7115 } else {
7116 tvp = NULL;
7118 if (!(p = lock_user_string(arg1)))
7119 goto efault;
7120 ret = get_errno(utimes(p, tvp));
7121 unlock_user(p, arg1, 0);
7123 break;
7124 #endif
7125 #if defined(TARGET_NR_futimesat)
7126 case TARGET_NR_futimesat:
7128 struct timeval *tvp, tv[2];
7129 if (arg3) {
7130 if (copy_from_user_timeval(&tv[0], arg3)
7131 || copy_from_user_timeval(&tv[1],
7132 arg3 + sizeof(struct target_timeval)))
7133 goto efault;
7134 tvp = tv;
7135 } else {
7136 tvp = NULL;
7138 if (!(p = lock_user_string(arg2)))
7139 goto efault;
7140 ret = get_errno(futimesat(arg1, path(p), tvp));
7141 unlock_user(p, arg2, 0);
7143 break;
7144 #endif
7145 #ifdef TARGET_NR_stty
7146 case TARGET_NR_stty:
7147 goto unimplemented;
7148 #endif
7149 #ifdef TARGET_NR_gtty
7150 case TARGET_NR_gtty:
7151 goto unimplemented;
7152 #endif
7153 #ifdef TARGET_NR_access
7154 case TARGET_NR_access:
7155 if (!(p = lock_user_string(arg1)))
7156 goto efault;
7157 ret = get_errno(access(path(p), arg2));
7158 unlock_user(p, arg1, 0);
7159 break;
7160 #endif
7161 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7162 case TARGET_NR_faccessat:
7163 if (!(p = lock_user_string(arg2)))
7164 goto efault;
7165 ret = get_errno(faccessat(arg1, p, arg3, 0));
7166 unlock_user(p, arg2, 0);
7167 break;
7168 #endif
7169 #ifdef TARGET_NR_nice /* not on alpha */
7170 case TARGET_NR_nice:
7171 ret = get_errno(nice(arg1));
7172 break;
7173 #endif
7174 #ifdef TARGET_NR_ftime
7175 case TARGET_NR_ftime:
7176 goto unimplemented;
7177 #endif
7178 case TARGET_NR_sync:
7179 sync();
7180 ret = 0;
7181 break;
7182 case TARGET_NR_kill:
7183 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7184 break;
7185 #ifdef TARGET_NR_rename
7186 case TARGET_NR_rename:
7188 void *p2;
7189 p = lock_user_string(arg1);
7190 p2 = lock_user_string(arg2);
7191 if (!p || !p2)
7192 ret = -TARGET_EFAULT;
7193 else
7194 ret = get_errno(rename(p, p2));
7195 unlock_user(p2, arg2, 0);
7196 unlock_user(p, arg1, 0);
7198 break;
7199 #endif
7200 #if defined(TARGET_NR_renameat)
7201 case TARGET_NR_renameat:
7203 void *p2;
7204 p = lock_user_string(arg2);
7205 p2 = lock_user_string(arg4);
7206 if (!p || !p2)
7207 ret = -TARGET_EFAULT;
7208 else
7209 ret = get_errno(renameat(arg1, p, arg3, p2));
7210 unlock_user(p2, arg4, 0);
7211 unlock_user(p, arg2, 0);
7213 break;
7214 #endif
7215 #ifdef TARGET_NR_mkdir
7216 case TARGET_NR_mkdir:
7217 if (!(p = lock_user_string(arg1)))
7218 goto efault;
7219 ret = get_errno(mkdir(p, arg2));
7220 unlock_user(p, arg1, 0);
7221 break;
7222 #endif
7223 #if defined(TARGET_NR_mkdirat)
7224 case TARGET_NR_mkdirat:
7225 if (!(p = lock_user_string(arg2)))
7226 goto efault;
7227 ret = get_errno(mkdirat(arg1, p, arg3));
7228 unlock_user(p, arg2, 0);
7229 break;
7230 #endif
7231 #ifdef TARGET_NR_rmdir
7232 case TARGET_NR_rmdir:
7233 if (!(p = lock_user_string(arg1)))
7234 goto efault;
7235 ret = get_errno(rmdir(p));
7236 unlock_user(p, arg1, 0);
7237 break;
7238 #endif
7239 case TARGET_NR_dup:
7240 ret = get_errno(dup(arg1));
7241 if (ret >= 0) {
7242 fd_trans_dup(arg1, ret);
7244 break;
7245 #ifdef TARGET_NR_pipe
7246 case TARGET_NR_pipe:
7247 ret = do_pipe(cpu_env, arg1, 0, 0);
7248 break;
7249 #endif
7250 #ifdef TARGET_NR_pipe2
7251 case TARGET_NR_pipe2:
7252 ret = do_pipe(cpu_env, arg1,
7253 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7254 break;
7255 #endif
7256 case TARGET_NR_times:
7258 struct target_tms *tmsp;
7259 struct tms tms;
7260 ret = get_errno(times(&tms));
7261 if (arg1) {
7262 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7263 if (!tmsp)
7264 goto efault;
7265 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7266 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7267 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7268 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7270 if (!is_error(ret))
7271 ret = host_to_target_clock_t(ret);
7273 break;
7274 #ifdef TARGET_NR_prof
7275 case TARGET_NR_prof:
7276 goto unimplemented;
7277 #endif
7278 #ifdef TARGET_NR_signal
7279 case TARGET_NR_signal:
7280 goto unimplemented;
7281 #endif
7282 case TARGET_NR_acct:
7283 if (arg1 == 0) {
7284 ret = get_errno(acct(NULL));
7285 } else {
7286 if (!(p = lock_user_string(arg1)))
7287 goto efault;
7288 ret = get_errno(acct(path(p)));
7289 unlock_user(p, arg1, 0);
7291 break;
7292 #ifdef TARGET_NR_umount2
7293 case TARGET_NR_umount2:
7294 if (!(p = lock_user_string(arg1)))
7295 goto efault;
7296 ret = get_errno(umount2(p, arg2));
7297 unlock_user(p, arg1, 0);
7298 break;
7299 #endif
7300 #ifdef TARGET_NR_lock
7301 case TARGET_NR_lock:
7302 goto unimplemented;
7303 #endif
7304 case TARGET_NR_ioctl:
7305 ret = do_ioctl(arg1, arg2, arg3);
7306 break;
7307 case TARGET_NR_fcntl:
7308 ret = do_fcntl(arg1, arg2, arg3);
7309 break;
7310 #ifdef TARGET_NR_mpx
7311 case TARGET_NR_mpx:
7312 goto unimplemented;
7313 #endif
7314 case TARGET_NR_setpgid:
7315 ret = get_errno(setpgid(arg1, arg2));
7316 break;
7317 #ifdef TARGET_NR_ulimit
7318 case TARGET_NR_ulimit:
7319 goto unimplemented;
7320 #endif
7321 #ifdef TARGET_NR_oldolduname
7322 case TARGET_NR_oldolduname:
7323 goto unimplemented;
7324 #endif
7325 case TARGET_NR_umask:
7326 ret = get_errno(umask(arg1));
7327 break;
7328 case TARGET_NR_chroot:
7329 if (!(p = lock_user_string(arg1)))
7330 goto efault;
7331 ret = get_errno(chroot(p));
7332 unlock_user(p, arg1, 0);
7333 break;
7334 #ifdef TARGET_NR_ustat
7335 case TARGET_NR_ustat:
7336 goto unimplemented;
7337 #endif
7338 #ifdef TARGET_NR_dup2
7339 case TARGET_NR_dup2:
7340 ret = get_errno(dup2(arg1, arg2));
7341 if (ret >= 0) {
7342 fd_trans_dup(arg1, arg2);
7344 break;
7345 #endif
7346 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7347 case TARGET_NR_dup3:
7348 ret = get_errno(dup3(arg1, arg2, arg3));
7349 if (ret >= 0) {
7350 fd_trans_dup(arg1, arg2);
7352 break;
7353 #endif
7354 #ifdef TARGET_NR_getppid /* not on alpha */
7355 case TARGET_NR_getppid:
7356 ret = get_errno(getppid());
7357 break;
7358 #endif
7359 #ifdef TARGET_NR_getpgrp
7360 case TARGET_NR_getpgrp:
7361 ret = get_errno(getpgrp());
7362 break;
7363 #endif
7364 case TARGET_NR_setsid:
7365 ret = get_errno(setsid());
7366 break;
7367 #ifdef TARGET_NR_sigaction
7368 case TARGET_NR_sigaction:
7370 #if defined(TARGET_ALPHA)
7371 struct target_sigaction act, oact, *pact = 0;
7372 struct target_old_sigaction *old_act;
7373 if (arg2) {
7374 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7375 goto efault;
7376 act._sa_handler = old_act->_sa_handler;
7377 target_siginitset(&act.sa_mask, old_act->sa_mask);
7378 act.sa_flags = old_act->sa_flags;
7379 act.sa_restorer = 0;
7380 unlock_user_struct(old_act, arg2, 0);
7381 pact = &act;
7383 ret = get_errno(do_sigaction(arg1, pact, &oact));
7384 if (!is_error(ret) && arg3) {
7385 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7386 goto efault;
7387 old_act->_sa_handler = oact._sa_handler;
7388 old_act->sa_mask = oact.sa_mask.sig[0];
7389 old_act->sa_flags = oact.sa_flags;
7390 unlock_user_struct(old_act, arg3, 1);
7392 #elif defined(TARGET_MIPS)
7393 struct target_sigaction act, oact, *pact, *old_act;
7395 if (arg2) {
7396 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7397 goto efault;
7398 act._sa_handler = old_act->_sa_handler;
7399 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7400 act.sa_flags = old_act->sa_flags;
7401 unlock_user_struct(old_act, arg2, 0);
7402 pact = &act;
7403 } else {
7404 pact = NULL;
7407 ret = get_errno(do_sigaction(arg1, pact, &oact));
7409 if (!is_error(ret) && arg3) {
7410 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7411 goto efault;
7412 old_act->_sa_handler = oact._sa_handler;
7413 old_act->sa_flags = oact.sa_flags;
7414 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7415 old_act->sa_mask.sig[1] = 0;
7416 old_act->sa_mask.sig[2] = 0;
7417 old_act->sa_mask.sig[3] = 0;
7418 unlock_user_struct(old_act, arg3, 1);
7420 #else
7421 struct target_old_sigaction *old_act;
7422 struct target_sigaction act, oact, *pact;
7423 if (arg2) {
7424 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7425 goto efault;
7426 act._sa_handler = old_act->_sa_handler;
7427 target_siginitset(&act.sa_mask, old_act->sa_mask);
7428 act.sa_flags = old_act->sa_flags;
7429 act.sa_restorer = old_act->sa_restorer;
7430 unlock_user_struct(old_act, arg2, 0);
7431 pact = &act;
7432 } else {
7433 pact = NULL;
7435 ret = get_errno(do_sigaction(arg1, pact, &oact));
7436 if (!is_error(ret) && arg3) {
7437 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7438 goto efault;
7439 old_act->_sa_handler = oact._sa_handler;
7440 old_act->sa_mask = oact.sa_mask.sig[0];
7441 old_act->sa_flags = oact.sa_flags;
7442 old_act->sa_restorer = oact.sa_restorer;
7443 unlock_user_struct(old_act, arg3, 1);
7445 #endif
7447 break;
7448 #endif
7449 case TARGET_NR_rt_sigaction:
7451 #if defined(TARGET_ALPHA)
7452 struct target_sigaction act, oact, *pact = 0;
7453 struct target_rt_sigaction *rt_act;
7454 /* ??? arg4 == sizeof(sigset_t). */
7455 if (arg2) {
7456 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7457 goto efault;
7458 act._sa_handler = rt_act->_sa_handler;
7459 act.sa_mask = rt_act->sa_mask;
7460 act.sa_flags = rt_act->sa_flags;
7461 act.sa_restorer = arg5;
7462 unlock_user_struct(rt_act, arg2, 0);
7463 pact = &act;
7465 ret = get_errno(do_sigaction(arg1, pact, &oact));
7466 if (!is_error(ret) && arg3) {
7467 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7468 goto efault;
7469 rt_act->_sa_handler = oact._sa_handler;
7470 rt_act->sa_mask = oact.sa_mask;
7471 rt_act->sa_flags = oact.sa_flags;
7472 unlock_user_struct(rt_act, arg3, 1);
7474 #else
7475 struct target_sigaction *act;
7476 struct target_sigaction *oact;
7478 if (arg2) {
7479 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7480 goto efault;
7481 } else
7482 act = NULL;
7483 if (arg3) {
7484 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7485 ret = -TARGET_EFAULT;
7486 goto rt_sigaction_fail;
7488 } else
7489 oact = NULL;
7490 ret = get_errno(do_sigaction(arg1, act, oact));
7491 rt_sigaction_fail:
7492 if (act)
7493 unlock_user_struct(act, arg2, 0);
7494 if (oact)
7495 unlock_user_struct(oact, arg3, 1);
7496 #endif
7498 break;
7499 #ifdef TARGET_NR_sgetmask /* not on alpha */
7500 case TARGET_NR_sgetmask:
7502 sigset_t cur_set;
7503 abi_ulong target_set;
7504 ret = do_sigprocmask(0, NULL, &cur_set);
7505 if (!ret) {
7506 host_to_target_old_sigset(&target_set, &cur_set);
7507 ret = target_set;
7510 break;
7511 #endif
7512 #ifdef TARGET_NR_ssetmask /* not on alpha */
7513 case TARGET_NR_ssetmask:
7515 sigset_t set, oset, cur_set;
7516 abi_ulong target_set = arg1;
7517 /* We only have one word of the new mask so we must read
7518 * the rest of it with do_sigprocmask() and OR in this word.
7519 * We are guaranteed that a do_sigprocmask() that only queries
7520 * the signal mask will not fail.
7522 ret = do_sigprocmask(0, NULL, &cur_set);
7523 assert(!ret);
7524 target_to_host_old_sigset(&set, &target_set);
7525 sigorset(&set, &set, &cur_set);
7526 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7527 if (!ret) {
7528 host_to_target_old_sigset(&target_set, &oset);
7529 ret = target_set;
7532 break;
7533 #endif
7534 #ifdef TARGET_NR_sigprocmask
7535 case TARGET_NR_sigprocmask:
7537 #if defined(TARGET_ALPHA)
7538 sigset_t set, oldset;
7539 abi_ulong mask;
7540 int how;
7542 switch (arg1) {
7543 case TARGET_SIG_BLOCK:
7544 how = SIG_BLOCK;
7545 break;
7546 case TARGET_SIG_UNBLOCK:
7547 how = SIG_UNBLOCK;
7548 break;
7549 case TARGET_SIG_SETMASK:
7550 how = SIG_SETMASK;
7551 break;
7552 default:
7553 ret = -TARGET_EINVAL;
7554 goto fail;
7556 mask = arg2;
7557 target_to_host_old_sigset(&set, &mask);
7559 ret = do_sigprocmask(how, &set, &oldset);
7560 if (!is_error(ret)) {
7561 host_to_target_old_sigset(&mask, &oldset);
7562 ret = mask;
7563 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7565 #else
7566 sigset_t set, oldset, *set_ptr;
7567 int how;
7569 if (arg2) {
7570 switch (arg1) {
7571 case TARGET_SIG_BLOCK:
7572 how = SIG_BLOCK;
7573 break;
7574 case TARGET_SIG_UNBLOCK:
7575 how = SIG_UNBLOCK;
7576 break;
7577 case TARGET_SIG_SETMASK:
7578 how = SIG_SETMASK;
7579 break;
7580 default:
7581 ret = -TARGET_EINVAL;
7582 goto fail;
7584 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7585 goto efault;
7586 target_to_host_old_sigset(&set, p);
7587 unlock_user(p, arg2, 0);
7588 set_ptr = &set;
7589 } else {
7590 how = 0;
7591 set_ptr = NULL;
7593 ret = do_sigprocmask(how, set_ptr, &oldset);
7594 if (!is_error(ret) && arg3) {
7595 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7596 goto efault;
7597 host_to_target_old_sigset(p, &oldset);
7598 unlock_user(p, arg3, sizeof(target_sigset_t));
7600 #endif
7602 break;
7603 #endif
7604 case TARGET_NR_rt_sigprocmask:
7606 int how = arg1;
7607 sigset_t set, oldset, *set_ptr;
7609 if (arg2) {
7610 switch(how) {
7611 case TARGET_SIG_BLOCK:
7612 how = SIG_BLOCK;
7613 break;
7614 case TARGET_SIG_UNBLOCK:
7615 how = SIG_UNBLOCK;
7616 break;
7617 case TARGET_SIG_SETMASK:
7618 how = SIG_SETMASK;
7619 break;
7620 default:
7621 ret = -TARGET_EINVAL;
7622 goto fail;
7624 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7625 goto efault;
7626 target_to_host_sigset(&set, p);
7627 unlock_user(p, arg2, 0);
7628 set_ptr = &set;
7629 } else {
7630 how = 0;
7631 set_ptr = NULL;
7633 ret = do_sigprocmask(how, set_ptr, &oldset);
7634 if (!is_error(ret) && arg3) {
7635 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7636 goto efault;
7637 host_to_target_sigset(p, &oldset);
7638 unlock_user(p, arg3, sizeof(target_sigset_t));
7641 break;
7642 #ifdef TARGET_NR_sigpending
7643 case TARGET_NR_sigpending:
7645 sigset_t set;
7646 ret = get_errno(sigpending(&set));
7647 if (!is_error(ret)) {
7648 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7649 goto efault;
7650 host_to_target_old_sigset(p, &set);
7651 unlock_user(p, arg1, sizeof(target_sigset_t));
7654 break;
7655 #endif
7656 case TARGET_NR_rt_sigpending:
7658 sigset_t set;
7659 ret = get_errno(sigpending(&set));
7660 if (!is_error(ret)) {
7661 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7662 goto efault;
7663 host_to_target_sigset(p, &set);
7664 unlock_user(p, arg1, sizeof(target_sigset_t));
7667 break;
7668 #ifdef TARGET_NR_sigsuspend
7669 case TARGET_NR_sigsuspend:
7671 TaskState *ts = cpu->opaque;
7672 #if defined(TARGET_ALPHA)
7673 abi_ulong mask = arg1;
7674 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7675 #else
7676 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7677 goto efault;
7678 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7679 unlock_user(p, arg1, 0);
7680 #endif
7681 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7682 SIGSET_T_SIZE));
7683 if (ret != -TARGET_ERESTARTSYS) {
7684 ts->in_sigsuspend = 1;
7687 break;
7688 #endif
7689 case TARGET_NR_rt_sigsuspend:
7691 TaskState *ts = cpu->opaque;
7692 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7693 goto efault;
7694 target_to_host_sigset(&ts->sigsuspend_mask, p);
7695 unlock_user(p, arg1, 0);
7696 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7697 SIGSET_T_SIZE));
7698 if (ret != -TARGET_ERESTARTSYS) {
7699 ts->in_sigsuspend = 1;
7702 break;
7703 case TARGET_NR_rt_sigtimedwait:
7705 sigset_t set;
7706 struct timespec uts, *puts;
7707 siginfo_t uinfo;
7709 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7710 goto efault;
7711 target_to_host_sigset(&set, p);
7712 unlock_user(p, arg1, 0);
7713 if (arg3) {
7714 puts = &uts;
7715 target_to_host_timespec(puts, arg3);
7716 } else {
7717 puts = NULL;
7719 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
7720 if (!is_error(ret)) {
7721 if (arg2) {
7722 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7724 if (!p) {
7725 goto efault;
7727 host_to_target_siginfo(p, &uinfo);
7728 unlock_user(p, arg2, sizeof(target_siginfo_t));
7730 ret = host_to_target_signal(ret);
7733 break;
7734 case TARGET_NR_rt_sigqueueinfo:
7736 siginfo_t uinfo;
7737 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7738 goto efault;
7739 target_to_host_siginfo(&uinfo, p);
7740 unlock_user(p, arg1, 0);
7741 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7743 break;
7744 #ifdef TARGET_NR_sigreturn
7745 case TARGET_NR_sigreturn:
7746 if (block_signals()) {
7747 ret = -TARGET_ERESTARTSYS;
7748 } else {
7749 ret = do_sigreturn(cpu_env);
7751 break;
7752 #endif
7753 case TARGET_NR_rt_sigreturn:
7754 if (block_signals()) {
7755 ret = -TARGET_ERESTARTSYS;
7756 } else {
7757 ret = do_rt_sigreturn(cpu_env);
7759 break;
7760 case TARGET_NR_sethostname:
7761 if (!(p = lock_user_string(arg1)))
7762 goto efault;
7763 ret = get_errno(sethostname(p, arg2));
7764 unlock_user(p, arg1, 0);
7765 break;
7766 case TARGET_NR_setrlimit:
7768 int resource = target_to_host_resource(arg1);
7769 struct target_rlimit *target_rlim;
7770 struct rlimit rlim;
7771 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7772 goto efault;
7773 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7774 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7775 unlock_user_struct(target_rlim, arg2, 0);
7776 ret = get_errno(setrlimit(resource, &rlim));
7778 break;
7779 case TARGET_NR_getrlimit:
7781 int resource = target_to_host_resource(arg1);
7782 struct target_rlimit *target_rlim;
7783 struct rlimit rlim;
7785 ret = get_errno(getrlimit(resource, &rlim));
7786 if (!is_error(ret)) {
7787 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7788 goto efault;
7789 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7790 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7791 unlock_user_struct(target_rlim, arg2, 1);
7794 break;
7795 case TARGET_NR_getrusage:
7797 struct rusage rusage;
7798 ret = get_errno(getrusage(arg1, &rusage));
7799 if (!is_error(ret)) {
7800 ret = host_to_target_rusage(arg2, &rusage);
7803 break;
7804 case TARGET_NR_gettimeofday:
7806 struct timeval tv;
7807 ret = get_errno(gettimeofday(&tv, NULL));
7808 if (!is_error(ret)) {
7809 if (copy_to_user_timeval(arg1, &tv))
7810 goto efault;
7813 break;
7814 case TARGET_NR_settimeofday:
7816 struct timeval tv, *ptv = NULL;
7817 struct timezone tz, *ptz = NULL;
7819 if (arg1) {
7820 if (copy_from_user_timeval(&tv, arg1)) {
7821 goto efault;
7823 ptv = &tv;
7826 if (arg2) {
7827 if (copy_from_user_timezone(&tz, arg2)) {
7828 goto efault;
7830 ptz = &tz;
7833 ret = get_errno(settimeofday(ptv, ptz));
7835 break;
7836 #if defined(TARGET_NR_select)
7837 case TARGET_NR_select:
7838 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7839 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7840 #else
7842 struct target_sel_arg_struct *sel;
7843 abi_ulong inp, outp, exp, tvp;
7844 long nsel;
7846 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7847 goto efault;
7848 nsel = tswapal(sel->n);
7849 inp = tswapal(sel->inp);
7850 outp = tswapal(sel->outp);
7851 exp = tswapal(sel->exp);
7852 tvp = tswapal(sel->tvp);
7853 unlock_user_struct(sel, arg1, 0);
7854 ret = do_select(nsel, inp, outp, exp, tvp);
7856 #endif
7857 break;
7858 #endif
7859 #ifdef TARGET_NR_pselect6
7860 case TARGET_NR_pselect6:
7862 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7863 fd_set rfds, wfds, efds;
7864 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7865 struct timespec ts, *ts_ptr;
7868 * The 6th arg is actually two args smashed together,
7869 * so we cannot use the C library.
7871 sigset_t set;
7872 struct {
7873 sigset_t *set;
7874 size_t size;
7875 } sig, *sig_ptr;
7877 abi_ulong arg_sigset, arg_sigsize, *arg7;
7878 target_sigset_t *target_sigset;
7880 n = arg1;
7881 rfd_addr = arg2;
7882 wfd_addr = arg3;
7883 efd_addr = arg4;
7884 ts_addr = arg5;
7886 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7887 if (ret) {
7888 goto fail;
7890 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7891 if (ret) {
7892 goto fail;
7894 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7895 if (ret) {
7896 goto fail;
7900 * This takes a timespec, and not a timeval, so we cannot
7901 * use the do_select() helper ...
7903 if (ts_addr) {
7904 if (target_to_host_timespec(&ts, ts_addr)) {
7905 goto efault;
7907 ts_ptr = &ts;
7908 } else {
7909 ts_ptr = NULL;
7912 /* Extract the two packed args for the sigset */
7913 if (arg6) {
7914 sig_ptr = &sig;
7915 sig.size = SIGSET_T_SIZE;
7917 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7918 if (!arg7) {
7919 goto efault;
7921 arg_sigset = tswapal(arg7[0]);
7922 arg_sigsize = tswapal(arg7[1]);
7923 unlock_user(arg7, arg6, 0);
7925 if (arg_sigset) {
7926 sig.set = &set;
7927 if (arg_sigsize != sizeof(*target_sigset)) {
7928 /* Like the kernel, we enforce correct size sigsets */
7929 ret = -TARGET_EINVAL;
7930 goto fail;
7932 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7933 sizeof(*target_sigset), 1);
7934 if (!target_sigset) {
7935 goto efault;
7937 target_to_host_sigset(&set, target_sigset);
7938 unlock_user(target_sigset, arg_sigset, 0);
7939 } else {
7940 sig.set = NULL;
7942 } else {
7943 sig_ptr = NULL;
7946 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7947 ts_ptr, sig_ptr));
7949 if (!is_error(ret)) {
7950 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7951 goto efault;
7952 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7953 goto efault;
7954 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7955 goto efault;
7957 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7958 goto efault;
7961 break;
7962 #endif
7963 #ifdef TARGET_NR_symlink
7964 case TARGET_NR_symlink:
7966 void *p2;
7967 p = lock_user_string(arg1);
7968 p2 = lock_user_string(arg2);
7969 if (!p || !p2)
7970 ret = -TARGET_EFAULT;
7971 else
7972 ret = get_errno(symlink(p, p2));
7973 unlock_user(p2, arg2, 0);
7974 unlock_user(p, arg1, 0);
7976 break;
7977 #endif
7978 #if defined(TARGET_NR_symlinkat)
7979 case TARGET_NR_symlinkat:
7981 void *p2;
7982 p = lock_user_string(arg1);
7983 p2 = lock_user_string(arg3);
7984 if (!p || !p2)
7985 ret = -TARGET_EFAULT;
7986 else
7987 ret = get_errno(symlinkat(p, arg2, p2));
7988 unlock_user(p2, arg3, 0);
7989 unlock_user(p, arg1, 0);
7991 break;
7992 #endif
7993 #ifdef TARGET_NR_oldlstat
7994 case TARGET_NR_oldlstat:
7995 goto unimplemented;
7996 #endif
7997 #ifdef TARGET_NR_readlink
7998 case TARGET_NR_readlink:
8000 void *p2;
8001 p = lock_user_string(arg1);
8002 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8003 if (!p || !p2) {
8004 ret = -TARGET_EFAULT;
8005 } else if (!arg3) {
8006 /* Short circuit this for the magic exe check. */
8007 ret = -TARGET_EINVAL;
8008 } else if (is_proc_myself((const char *)p, "exe")) {
8009 char real[PATH_MAX], *temp;
8010 temp = realpath(exec_path, real);
8011 /* Return value is # of bytes that we wrote to the buffer. */
8012 if (temp == NULL) {
8013 ret = get_errno(-1);
8014 } else {
8015 /* Don't worry about sign mismatch as earlier mapping
8016 * logic would have thrown a bad address error. */
8017 ret = MIN(strlen(real), arg3);
8018 /* We cannot NUL terminate the string. */
8019 memcpy(p2, real, ret);
8021 } else {
8022 ret = get_errno(readlink(path(p), p2, arg3));
8024 unlock_user(p2, arg2, ret);
8025 unlock_user(p, arg1, 0);
8027 break;
8028 #endif
8029 #if defined(TARGET_NR_readlinkat)
8030 case TARGET_NR_readlinkat:
8032 void *p2;
8033 p = lock_user_string(arg2);
8034 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8035 if (!p || !p2) {
8036 ret = -TARGET_EFAULT;
8037 } else if (is_proc_myself((const char *)p, "exe")) {
8038 char real[PATH_MAX], *temp;
8039 temp = realpath(exec_path, real);
8040 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8041 snprintf((char *)p2, arg4, "%s", real);
8042 } else {
8043 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8045 unlock_user(p2, arg3, ret);
8046 unlock_user(p, arg2, 0);
8048 break;
8049 #endif
8050 #ifdef TARGET_NR_uselib
8051 case TARGET_NR_uselib:
8052 goto unimplemented;
8053 #endif
8054 #ifdef TARGET_NR_swapon
8055 case TARGET_NR_swapon:
8056 if (!(p = lock_user_string(arg1)))
8057 goto efault;
8058 ret = get_errno(swapon(p, arg2));
8059 unlock_user(p, arg1, 0);
8060 break;
8061 #endif
8062 case TARGET_NR_reboot:
8063 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8064 /* arg4 must be ignored in all other cases */
8065 p = lock_user_string(arg4);
8066 if (!p) {
8067 goto efault;
8069 ret = get_errno(reboot(arg1, arg2, arg3, p));
8070 unlock_user(p, arg4, 0);
8071 } else {
8072 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8074 break;
8075 #ifdef TARGET_NR_readdir
8076 case TARGET_NR_readdir:
8077 goto unimplemented;
8078 #endif
8079 #ifdef TARGET_NR_mmap
8080 case TARGET_NR_mmap:
8081 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8082 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8083 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8084 || defined(TARGET_S390X)
8086 abi_ulong *v;
8087 abi_ulong v1, v2, v3, v4, v5, v6;
8088 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8089 goto efault;
8090 v1 = tswapal(v[0]);
8091 v2 = tswapal(v[1]);
8092 v3 = tswapal(v[2]);
8093 v4 = tswapal(v[3]);
8094 v5 = tswapal(v[4]);
8095 v6 = tswapal(v[5]);
8096 unlock_user(v, arg1, 0);
8097 ret = get_errno(target_mmap(v1, v2, v3,
8098 target_to_host_bitmask(v4, mmap_flags_tbl),
8099 v5, v6));
8101 #else
8102 ret = get_errno(target_mmap(arg1, arg2, arg3,
8103 target_to_host_bitmask(arg4, mmap_flags_tbl),
8104 arg5,
8105 arg6));
8106 #endif
8107 break;
8108 #endif
8109 #ifdef TARGET_NR_mmap2
8110 case TARGET_NR_mmap2:
8111 #ifndef MMAP_SHIFT
8112 #define MMAP_SHIFT 12
8113 #endif
8114 ret = get_errno(target_mmap(arg1, arg2, arg3,
8115 target_to_host_bitmask(arg4, mmap_flags_tbl),
8116 arg5,
8117 arg6 << MMAP_SHIFT));
8118 break;
8119 #endif
8120 case TARGET_NR_munmap:
8121 ret = get_errno(target_munmap(arg1, arg2));
8122 break;
8123 case TARGET_NR_mprotect:
8125 TaskState *ts = cpu->opaque;
8126 /* Special hack to detect libc making the stack executable. */
8127 if ((arg3 & PROT_GROWSDOWN)
8128 && arg1 >= ts->info->stack_limit
8129 && arg1 <= ts->info->start_stack) {
8130 arg3 &= ~PROT_GROWSDOWN;
8131 arg2 = arg2 + arg1 - ts->info->stack_limit;
8132 arg1 = ts->info->stack_limit;
8135 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8136 break;
8137 #ifdef TARGET_NR_mremap
8138 case TARGET_NR_mremap:
8139 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8140 break;
8141 #endif
8142 /* ??? msync/mlock/munlock are broken for softmmu. */
8143 #ifdef TARGET_NR_msync
8144 case TARGET_NR_msync:
8145 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8146 break;
8147 #endif
8148 #ifdef TARGET_NR_mlock
8149 case TARGET_NR_mlock:
8150 ret = get_errno(mlock(g2h(arg1), arg2));
8151 break;
8152 #endif
8153 #ifdef TARGET_NR_munlock
8154 case TARGET_NR_munlock:
8155 ret = get_errno(munlock(g2h(arg1), arg2));
8156 break;
8157 #endif
8158 #ifdef TARGET_NR_mlockall
8159 case TARGET_NR_mlockall:
8160 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8161 break;
8162 #endif
8163 #ifdef TARGET_NR_munlockall
8164 case TARGET_NR_munlockall:
8165 ret = get_errno(munlockall());
8166 break;
8167 #endif
8168 case TARGET_NR_truncate:
8169 if (!(p = lock_user_string(arg1)))
8170 goto efault;
8171 ret = get_errno(truncate(p, arg2));
8172 unlock_user(p, arg1, 0);
8173 break;
8174 case TARGET_NR_ftruncate:
8175 ret = get_errno(ftruncate(arg1, arg2));
8176 break;
8177 case TARGET_NR_fchmod:
8178 ret = get_errno(fchmod(arg1, arg2));
8179 break;
8180 #if defined(TARGET_NR_fchmodat)
8181 case TARGET_NR_fchmodat:
8182 if (!(p = lock_user_string(arg2)))
8183 goto efault;
8184 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8185 unlock_user(p, arg2, 0);
8186 break;
8187 #endif
8188 case TARGET_NR_getpriority:
8189 /* Note that negative values are valid for getpriority, so we must
8190 differentiate based on errno settings. */
8191 errno = 0;
8192 ret = getpriority(arg1, arg2);
8193 if (ret == -1 && errno != 0) {
8194 ret = -host_to_target_errno(errno);
8195 break;
8197 #ifdef TARGET_ALPHA
8198 /* Return value is the unbiased priority. Signal no error. */
8199 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8200 #else
8201 /* Return value is a biased priority to avoid negative numbers. */
8202 ret = 20 - ret;
8203 #endif
8204 break;
8205 case TARGET_NR_setpriority:
8206 ret = get_errno(setpriority(arg1, arg2, arg3));
8207 break;
8208 #ifdef TARGET_NR_profil
8209 case TARGET_NR_profil:
8210 goto unimplemented;
8211 #endif
8212 case TARGET_NR_statfs:
8213 if (!(p = lock_user_string(arg1)))
8214 goto efault;
8215 ret = get_errno(statfs(path(p), &stfs));
8216 unlock_user(p, arg1, 0);
8217 convert_statfs:
8218 if (!is_error(ret)) {
8219 struct target_statfs *target_stfs;
8221 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8222 goto efault;
8223 __put_user(stfs.f_type, &target_stfs->f_type);
8224 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8225 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8226 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8227 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8228 __put_user(stfs.f_files, &target_stfs->f_files);
8229 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8230 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8231 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8232 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8233 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8234 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8235 unlock_user_struct(target_stfs, arg2, 1);
8237 break;
8238 case TARGET_NR_fstatfs:
8239 ret = get_errno(fstatfs(arg1, &stfs));
8240 goto convert_statfs;
8241 #ifdef TARGET_NR_statfs64
8242 case TARGET_NR_statfs64:
8243 if (!(p = lock_user_string(arg1)))
8244 goto efault;
8245 ret = get_errno(statfs(path(p), &stfs));
8246 unlock_user(p, arg1, 0);
8247 convert_statfs64:
8248 if (!is_error(ret)) {
8249 struct target_statfs64 *target_stfs;
8251 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8252 goto efault;
8253 __put_user(stfs.f_type, &target_stfs->f_type);
8254 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8255 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8256 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8257 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8258 __put_user(stfs.f_files, &target_stfs->f_files);
8259 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8260 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8261 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8262 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8263 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8264 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8265 unlock_user_struct(target_stfs, arg3, 1);
8267 break;
8268 case TARGET_NR_fstatfs64:
8269 ret = get_errno(fstatfs(arg1, &stfs));
8270 goto convert_statfs64;
8271 #endif
8272 #ifdef TARGET_NR_ioperm
8273 case TARGET_NR_ioperm:
8274 goto unimplemented;
8275 #endif
8276 #ifdef TARGET_NR_socketcall
8277 case TARGET_NR_socketcall:
8278 ret = do_socketcall(arg1, arg2);
8279 break;
8280 #endif
8281 #ifdef TARGET_NR_accept
8282 case TARGET_NR_accept:
8283 ret = do_accept4(arg1, arg2, arg3, 0);
8284 break;
8285 #endif
8286 #ifdef TARGET_NR_accept4
8287 case TARGET_NR_accept4:
8288 #ifdef CONFIG_ACCEPT4
8289 ret = do_accept4(arg1, arg2, arg3, arg4);
8290 #else
8291 goto unimplemented;
8292 #endif
8293 break;
8294 #endif
8295 #ifdef TARGET_NR_bind
8296 case TARGET_NR_bind:
8297 ret = do_bind(arg1, arg2, arg3);
8298 break;
8299 #endif
8300 #ifdef TARGET_NR_connect
8301 case TARGET_NR_connect:
8302 ret = do_connect(arg1, arg2, arg3);
8303 break;
8304 #endif
8305 #ifdef TARGET_NR_getpeername
8306 case TARGET_NR_getpeername:
8307 ret = do_getpeername(arg1, arg2, arg3);
8308 break;
8309 #endif
8310 #ifdef TARGET_NR_getsockname
8311 case TARGET_NR_getsockname:
8312 ret = do_getsockname(arg1, arg2, arg3);
8313 break;
8314 #endif
8315 #ifdef TARGET_NR_getsockopt
8316 case TARGET_NR_getsockopt:
8317 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8318 break;
8319 #endif
8320 #ifdef TARGET_NR_listen
8321 case TARGET_NR_listen:
8322 ret = get_errno(listen(arg1, arg2));
8323 break;
8324 #endif
8325 #ifdef TARGET_NR_recv
8326 case TARGET_NR_recv:
8327 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8328 break;
8329 #endif
8330 #ifdef TARGET_NR_recvfrom
8331 case TARGET_NR_recvfrom:
8332 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8333 break;
8334 #endif
8335 #ifdef TARGET_NR_recvmsg
8336 case TARGET_NR_recvmsg:
8337 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8338 break;
8339 #endif
8340 #ifdef TARGET_NR_send
8341 case TARGET_NR_send:
8342 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8343 break;
8344 #endif
8345 #ifdef TARGET_NR_sendmsg
8346 case TARGET_NR_sendmsg:
8347 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8348 break;
8349 #endif
8350 #ifdef TARGET_NR_sendmmsg
8351 case TARGET_NR_sendmmsg:
8352 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8353 break;
8354 case TARGET_NR_recvmmsg:
8355 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8356 break;
8357 #endif
8358 #ifdef TARGET_NR_sendto
8359 case TARGET_NR_sendto:
8360 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8361 break;
8362 #endif
8363 #ifdef TARGET_NR_shutdown
8364 case TARGET_NR_shutdown:
8365 ret = get_errno(shutdown(arg1, arg2));
8366 break;
8367 #endif
8368 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8369 case TARGET_NR_getrandom:
8370 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8371 if (!p) {
8372 goto efault;
8374 ret = get_errno(getrandom(p, arg2, arg3));
8375 unlock_user(p, arg1, ret);
8376 break;
8377 #endif
8378 #ifdef TARGET_NR_socket
8379 case TARGET_NR_socket:
8380 ret = do_socket(arg1, arg2, arg3);
8381 fd_trans_unregister(ret);
8382 break;
8383 #endif
8384 #ifdef TARGET_NR_socketpair
8385 case TARGET_NR_socketpair:
8386 ret = do_socketpair(arg1, arg2, arg3, arg4);
8387 break;
8388 #endif
8389 #ifdef TARGET_NR_setsockopt
8390 case TARGET_NR_setsockopt:
8391 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8392 break;
8393 #endif
8395 case TARGET_NR_syslog:
8396 if (!(p = lock_user_string(arg2)))
8397 goto efault;
8398 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8399 unlock_user(p, arg2, 0);
8400 break;
8402 case TARGET_NR_setitimer:
8404 struct itimerval value, ovalue, *pvalue;
8406 if (arg2) {
8407 pvalue = &value;
8408 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8409 || copy_from_user_timeval(&pvalue->it_value,
8410 arg2 + sizeof(struct target_timeval)))
8411 goto efault;
8412 } else {
8413 pvalue = NULL;
8415 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8416 if (!is_error(ret) && arg3) {
8417 if (copy_to_user_timeval(arg3,
8418 &ovalue.it_interval)
8419 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8420 &ovalue.it_value))
8421 goto efault;
8424 break;
8425 case TARGET_NR_getitimer:
8427 struct itimerval value;
8429 ret = get_errno(getitimer(arg1, &value));
8430 if (!is_error(ret) && arg2) {
8431 if (copy_to_user_timeval(arg2,
8432 &value.it_interval)
8433 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8434 &value.it_value))
8435 goto efault;
8438 break;
8439 #ifdef TARGET_NR_stat
8440 case TARGET_NR_stat:
8441 if (!(p = lock_user_string(arg1)))
8442 goto efault;
8443 ret = get_errno(stat(path(p), &st));
8444 unlock_user(p, arg1, 0);
8445 goto do_stat;
8446 #endif
8447 #ifdef TARGET_NR_lstat
8448 case TARGET_NR_lstat:
8449 if (!(p = lock_user_string(arg1)))
8450 goto efault;
8451 ret = get_errno(lstat(path(p), &st));
8452 unlock_user(p, arg1, 0);
8453 goto do_stat;
8454 #endif
8455 case TARGET_NR_fstat:
8457 ret = get_errno(fstat(arg1, &st));
8458 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8459 do_stat:
8460 #endif
8461 if (!is_error(ret)) {
8462 struct target_stat *target_st;
8464 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8465 goto efault;
8466 memset(target_st, 0, sizeof(*target_st));
8467 __put_user(st.st_dev, &target_st->st_dev);
8468 __put_user(st.st_ino, &target_st->st_ino);
8469 __put_user(st.st_mode, &target_st->st_mode);
8470 __put_user(st.st_uid, &target_st->st_uid);
8471 __put_user(st.st_gid, &target_st->st_gid);
8472 __put_user(st.st_nlink, &target_st->st_nlink);
8473 __put_user(st.st_rdev, &target_st->st_rdev);
8474 __put_user(st.st_size, &target_st->st_size);
8475 __put_user(st.st_blksize, &target_st->st_blksize);
8476 __put_user(st.st_blocks, &target_st->st_blocks);
8477 __put_user(st.st_atime, &target_st->target_st_atime);
8478 __put_user(st.st_mtime, &target_st->target_st_mtime);
8479 __put_user(st.st_ctime, &target_st->target_st_ctime);
8480 unlock_user_struct(target_st, arg2, 1);
8483 break;
8484 #ifdef TARGET_NR_olduname
8485 case TARGET_NR_olduname:
8486 goto unimplemented;
8487 #endif
8488 #ifdef TARGET_NR_iopl
8489 case TARGET_NR_iopl:
8490 goto unimplemented;
8491 #endif
8492 case TARGET_NR_vhangup:
8493 ret = get_errno(vhangup());
8494 break;
8495 #ifdef TARGET_NR_idle
8496 case TARGET_NR_idle:
8497 goto unimplemented;
8498 #endif
8499 #ifdef TARGET_NR_syscall
8500 case TARGET_NR_syscall:
8501 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8502 arg6, arg7, arg8, 0);
8503 break;
8504 #endif
8505 case TARGET_NR_wait4:
8507 int status;
8508 abi_long status_ptr = arg2;
8509 struct rusage rusage, *rusage_ptr;
8510 abi_ulong target_rusage = arg4;
8511 abi_long rusage_err;
8512 if (target_rusage)
8513 rusage_ptr = &rusage;
8514 else
8515 rusage_ptr = NULL;
8516 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8517 if (!is_error(ret)) {
8518 if (status_ptr && ret) {
8519 status = host_to_target_waitstatus(status);
8520 if (put_user_s32(status, status_ptr))
8521 goto efault;
8523 if (target_rusage) {
8524 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8525 if (rusage_err) {
8526 ret = rusage_err;
8531 break;
8532 #ifdef TARGET_NR_swapoff
8533 case TARGET_NR_swapoff:
8534 if (!(p = lock_user_string(arg1)))
8535 goto efault;
8536 ret = get_errno(swapoff(p));
8537 unlock_user(p, arg1, 0);
8538 break;
8539 #endif
8540 case TARGET_NR_sysinfo:
8542 struct target_sysinfo *target_value;
8543 struct sysinfo value;
8544 ret = get_errno(sysinfo(&value));
8545 if (!is_error(ret) && arg1)
8547 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8548 goto efault;
8549 __put_user(value.uptime, &target_value->uptime);
8550 __put_user(value.loads[0], &target_value->loads[0]);
8551 __put_user(value.loads[1], &target_value->loads[1]);
8552 __put_user(value.loads[2], &target_value->loads[2]);
8553 __put_user(value.totalram, &target_value->totalram);
8554 __put_user(value.freeram, &target_value->freeram);
8555 __put_user(value.sharedram, &target_value->sharedram);
8556 __put_user(value.bufferram, &target_value->bufferram);
8557 __put_user(value.totalswap, &target_value->totalswap);
8558 __put_user(value.freeswap, &target_value->freeswap);
8559 __put_user(value.procs, &target_value->procs);
8560 __put_user(value.totalhigh, &target_value->totalhigh);
8561 __put_user(value.freehigh, &target_value->freehigh);
8562 __put_user(value.mem_unit, &target_value->mem_unit);
8563 unlock_user_struct(target_value, arg1, 1);
8566 break;
8567 #ifdef TARGET_NR_ipc
8568 case TARGET_NR_ipc:
8569 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8570 break;
8571 #endif
8572 #ifdef TARGET_NR_semget
8573 case TARGET_NR_semget:
8574 ret = get_errno(semget(arg1, arg2, arg3));
8575 break;
8576 #endif
8577 #ifdef TARGET_NR_semop
8578 case TARGET_NR_semop:
8579 ret = do_semop(arg1, arg2, arg3);
8580 break;
8581 #endif
8582 #ifdef TARGET_NR_semctl
8583 case TARGET_NR_semctl:
8584 ret = do_semctl(arg1, arg2, arg3, arg4);
8585 break;
8586 #endif
8587 #ifdef TARGET_NR_msgctl
8588 case TARGET_NR_msgctl:
8589 ret = do_msgctl(arg1, arg2, arg3);
8590 break;
8591 #endif
8592 #ifdef TARGET_NR_msgget
8593 case TARGET_NR_msgget:
8594 ret = get_errno(msgget(arg1, arg2));
8595 break;
8596 #endif
8597 #ifdef TARGET_NR_msgrcv
8598 case TARGET_NR_msgrcv:
8599 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8600 break;
8601 #endif
8602 #ifdef TARGET_NR_msgsnd
8603 case TARGET_NR_msgsnd:
8604 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8605 break;
8606 #endif
8607 #ifdef TARGET_NR_shmget
8608 case TARGET_NR_shmget:
8609 ret = get_errno(shmget(arg1, arg2, arg3));
8610 break;
8611 #endif
8612 #ifdef TARGET_NR_shmctl
8613 case TARGET_NR_shmctl:
8614 ret = do_shmctl(arg1, arg2, arg3);
8615 break;
8616 #endif
8617 #ifdef TARGET_NR_shmat
8618 case TARGET_NR_shmat:
8619 ret = do_shmat(arg1, arg2, arg3);
8620 break;
8621 #endif
8622 #ifdef TARGET_NR_shmdt
8623 case TARGET_NR_shmdt:
8624 ret = do_shmdt(arg1);
8625 break;
8626 #endif
8627 case TARGET_NR_fsync:
8628 ret = get_errno(fsync(arg1));
8629 break;
8630 case TARGET_NR_clone:
8631 /* Linux manages to have three different orderings for its
8632 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8633 * match the kernel's CONFIG_CLONE_* settings.
8634 * Microblaze is further special in that it uses a sixth
8635 * implicit argument to clone for the TLS pointer.
8637 #if defined(TARGET_MICROBLAZE)
8638 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8639 #elif defined(TARGET_CLONE_BACKWARDS)
8640 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8641 #elif defined(TARGET_CLONE_BACKWARDS2)
8642 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8643 #else
8644 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8645 #endif
8646 break;
8647 #ifdef __NR_exit_group
8648 /* new thread calls */
8649 case TARGET_NR_exit_group:
8650 #ifdef TARGET_GPROF
8651 _mcleanup();
8652 #endif
8653 gdb_exit(cpu_env, arg1);
8654 ret = get_errno(exit_group(arg1));
8655 break;
8656 #endif
8657 case TARGET_NR_setdomainname:
8658 if (!(p = lock_user_string(arg1)))
8659 goto efault;
8660 ret = get_errno(setdomainname(p, arg2));
8661 unlock_user(p, arg1, 0);
8662 break;
8663 case TARGET_NR_uname:
8664 /* no need to transcode because we use the linux syscall */
8666 struct new_utsname * buf;
8668 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8669 goto efault;
8670 ret = get_errno(sys_uname(buf));
8671 if (!is_error(ret)) {
8672 /* Overrite the native machine name with whatever is being
8673 emulated. */
8674 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8675 /* Allow the user to override the reported release. */
8676 if (qemu_uname_release && *qemu_uname_release)
8677 strcpy (buf->release, qemu_uname_release);
8679 unlock_user_struct(buf, arg1, 1);
8681 break;
8682 #ifdef TARGET_I386
8683 case TARGET_NR_modify_ldt:
8684 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8685 break;
8686 #if !defined(TARGET_X86_64)
8687 case TARGET_NR_vm86old:
8688 goto unimplemented;
8689 case TARGET_NR_vm86:
8690 ret = do_vm86(cpu_env, arg1, arg2);
8691 break;
8692 #endif
8693 #endif
8694 case TARGET_NR_adjtimex:
8695 goto unimplemented;
8696 #ifdef TARGET_NR_create_module
8697 case TARGET_NR_create_module:
8698 #endif
8699 case TARGET_NR_init_module:
8700 case TARGET_NR_delete_module:
8701 #ifdef TARGET_NR_get_kernel_syms
8702 case TARGET_NR_get_kernel_syms:
8703 #endif
8704 goto unimplemented;
8705 case TARGET_NR_quotactl:
8706 goto unimplemented;
8707 case TARGET_NR_getpgid:
8708 ret = get_errno(getpgid(arg1));
8709 break;
8710 case TARGET_NR_fchdir:
8711 ret = get_errno(fchdir(arg1));
8712 break;
8713 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8714 case TARGET_NR_bdflush:
8715 goto unimplemented;
8716 #endif
8717 #ifdef TARGET_NR_sysfs
8718 case TARGET_NR_sysfs:
8719 goto unimplemented;
8720 #endif
8721 case TARGET_NR_personality:
8722 ret = get_errno(personality(arg1));
8723 break;
8724 #ifdef TARGET_NR_afs_syscall
8725 case TARGET_NR_afs_syscall:
8726 goto unimplemented;
8727 #endif
8728 #ifdef TARGET_NR__llseek /* Not on alpha */
8729 case TARGET_NR__llseek:
8731 int64_t res;
8732 #if !defined(__NR_llseek)
8733 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8734 if (res == -1) {
8735 ret = get_errno(res);
8736 } else {
8737 ret = 0;
8739 #else
8740 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8741 #endif
8742 if ((ret == 0) && put_user_s64(res, arg4)) {
8743 goto efault;
8746 break;
8747 #endif
8748 #ifdef TARGET_NR_getdents
8749 case TARGET_NR_getdents:
8750 #ifdef __NR_getdents
8751 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8753 struct target_dirent *target_dirp;
8754 struct linux_dirent *dirp;
8755 abi_long count = arg3;
8757 dirp = g_try_malloc(count);
8758 if (!dirp) {
8759 ret = -TARGET_ENOMEM;
8760 goto fail;
8763 ret = get_errno(sys_getdents(arg1, dirp, count));
8764 if (!is_error(ret)) {
8765 struct linux_dirent *de;
8766 struct target_dirent *tde;
8767 int len = ret;
8768 int reclen, treclen;
8769 int count1, tnamelen;
8771 count1 = 0;
8772 de = dirp;
8773 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8774 goto efault;
8775 tde = target_dirp;
8776 while (len > 0) {
8777 reclen = de->d_reclen;
8778 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8779 assert(tnamelen >= 0);
8780 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8781 assert(count1 + treclen <= count);
8782 tde->d_reclen = tswap16(treclen);
8783 tde->d_ino = tswapal(de->d_ino);
8784 tde->d_off = tswapal(de->d_off);
8785 memcpy(tde->d_name, de->d_name, tnamelen);
8786 de = (struct linux_dirent *)((char *)de + reclen);
8787 len -= reclen;
8788 tde = (struct target_dirent *)((char *)tde + treclen);
8789 count1 += treclen;
8791 ret = count1;
8792 unlock_user(target_dirp, arg2, ret);
8794 g_free(dirp);
8796 #else
8798 struct linux_dirent *dirp;
8799 abi_long count = arg3;
8801 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8802 goto efault;
8803 ret = get_errno(sys_getdents(arg1, dirp, count));
8804 if (!is_error(ret)) {
8805 struct linux_dirent *de;
8806 int len = ret;
8807 int reclen;
8808 de = dirp;
8809 while (len > 0) {
8810 reclen = de->d_reclen;
8811 if (reclen > len)
8812 break;
8813 de->d_reclen = tswap16(reclen);
8814 tswapls(&de->d_ino);
8815 tswapls(&de->d_off);
8816 de = (struct linux_dirent *)((char *)de + reclen);
8817 len -= reclen;
8820 unlock_user(dirp, arg2, ret);
8822 #endif
8823 #else
8824 /* Implement getdents in terms of getdents64 */
8826 struct linux_dirent64 *dirp;
8827 abi_long count = arg3;
8829 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8830 if (!dirp) {
8831 goto efault;
8833 ret = get_errno(sys_getdents64(arg1, dirp, count));
8834 if (!is_error(ret)) {
8835 /* Convert the dirent64 structs to target dirent. We do this
8836 * in-place, since we can guarantee that a target_dirent is no
8837 * larger than a dirent64; however this means we have to be
8838 * careful to read everything before writing in the new format.
8840 struct linux_dirent64 *de;
8841 struct target_dirent *tde;
8842 int len = ret;
8843 int tlen = 0;
8845 de = dirp;
8846 tde = (struct target_dirent *)dirp;
8847 while (len > 0) {
8848 int namelen, treclen;
8849 int reclen = de->d_reclen;
8850 uint64_t ino = de->d_ino;
8851 int64_t off = de->d_off;
8852 uint8_t type = de->d_type;
8854 namelen = strlen(de->d_name);
8855 treclen = offsetof(struct target_dirent, d_name)
8856 + namelen + 2;
8857 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8859 memmove(tde->d_name, de->d_name, namelen + 1);
8860 tde->d_ino = tswapal(ino);
8861 tde->d_off = tswapal(off);
8862 tde->d_reclen = tswap16(treclen);
8863 /* The target_dirent type is in what was formerly a padding
8864 * byte at the end of the structure:
8866 *(((char *)tde) + treclen - 1) = type;
8868 de = (struct linux_dirent64 *)((char *)de + reclen);
8869 tde = (struct target_dirent *)((char *)tde + treclen);
8870 len -= reclen;
8871 tlen += treclen;
8873 ret = tlen;
8875 unlock_user(dirp, arg2, ret);
8877 #endif
8878 break;
8879 #endif /* TARGET_NR_getdents */
8880 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8881 case TARGET_NR_getdents64:
8883 struct linux_dirent64 *dirp;
8884 abi_long count = arg3;
8885 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8886 goto efault;
8887 ret = get_errno(sys_getdents64(arg1, dirp, count));
8888 if (!is_error(ret)) {
8889 struct linux_dirent64 *de;
8890 int len = ret;
8891 int reclen;
8892 de = dirp;
8893 while (len > 0) {
8894 reclen = de->d_reclen;
8895 if (reclen > len)
8896 break;
8897 de->d_reclen = tswap16(reclen);
8898 tswap64s((uint64_t *)&de->d_ino);
8899 tswap64s((uint64_t *)&de->d_off);
8900 de = (struct linux_dirent64 *)((char *)de + reclen);
8901 len -= reclen;
8904 unlock_user(dirp, arg2, ret);
8906 break;
8907 #endif /* TARGET_NR_getdents64 */
8908 #if defined(TARGET_NR__newselect)
8909 case TARGET_NR__newselect:
8910 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8911 break;
8912 #endif
8913 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8914 # ifdef TARGET_NR_poll
8915 case TARGET_NR_poll:
8916 # endif
8917 # ifdef TARGET_NR_ppoll
8918 case TARGET_NR_ppoll:
8919 # endif
8921 struct target_pollfd *target_pfd;
8922 unsigned int nfds = arg2;
8923 int timeout = arg3;
8924 struct pollfd *pfd;
8925 unsigned int i;
8927 pfd = NULL;
8928 target_pfd = NULL;
8929 if (nfds) {
8930 target_pfd = lock_user(VERIFY_WRITE, arg1,
8931 sizeof(struct target_pollfd) * nfds, 1);
8932 if (!target_pfd) {
8933 goto efault;
8936 pfd = alloca(sizeof(struct pollfd) * nfds);
8937 for (i = 0; i < nfds; i++) {
8938 pfd[i].fd = tswap32(target_pfd[i].fd);
8939 pfd[i].events = tswap16(target_pfd[i].events);
8943 # ifdef TARGET_NR_ppoll
8944 if (num == TARGET_NR_ppoll) {
8945 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8946 target_sigset_t *target_set;
8947 sigset_t _set, *set = &_set;
8949 if (arg3) {
8950 if (target_to_host_timespec(timeout_ts, arg3)) {
8951 unlock_user(target_pfd, arg1, 0);
8952 goto efault;
8954 } else {
8955 timeout_ts = NULL;
8958 if (arg4) {
8959 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8960 if (!target_set) {
8961 unlock_user(target_pfd, arg1, 0);
8962 goto efault;
8964 target_to_host_sigset(set, target_set);
8965 } else {
8966 set = NULL;
8969 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts,
8970 set, SIGSET_T_SIZE));
8972 if (!is_error(ret) && arg3) {
8973 host_to_target_timespec(arg3, timeout_ts);
8975 if (arg4) {
8976 unlock_user(target_set, arg4, 0);
8978 } else
8979 # endif
8980 ret = get_errno(poll(pfd, nfds, timeout));
8982 if (!is_error(ret)) {
8983 for(i = 0; i < nfds; i++) {
8984 target_pfd[i].revents = tswap16(pfd[i].revents);
8987 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8989 break;
8990 #endif
8991 case TARGET_NR_flock:
8992 /* NOTE: the flock constant seems to be the same for every
8993 Linux platform */
8994 ret = get_errno(flock(arg1, arg2));
8995 break;
8996 case TARGET_NR_readv:
8998 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8999 if (vec != NULL) {
9000 ret = get_errno(safe_readv(arg1, vec, arg3));
9001 unlock_iovec(vec, arg2, arg3, 1);
9002 } else {
9003 ret = -host_to_target_errno(errno);
9006 break;
9007 case TARGET_NR_writev:
9009 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9010 if (vec != NULL) {
9011 ret = get_errno(safe_writev(arg1, vec, arg3));
9012 unlock_iovec(vec, arg2, arg3, 0);
9013 } else {
9014 ret = -host_to_target_errno(errno);
9017 break;
9018 case TARGET_NR_getsid:
9019 ret = get_errno(getsid(arg1));
9020 break;
9021 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9022 case TARGET_NR_fdatasync:
9023 ret = get_errno(fdatasync(arg1));
9024 break;
9025 #endif
9026 #ifdef TARGET_NR__sysctl
9027 case TARGET_NR__sysctl:
9028 /* We don't implement this, but ENOTDIR is always a safe
9029 return value. */
9030 ret = -TARGET_ENOTDIR;
9031 break;
9032 #endif
9033 case TARGET_NR_sched_getaffinity:
9035 unsigned int mask_size;
9036 unsigned long *mask;
9039 * sched_getaffinity needs multiples of ulong, so need to take
9040 * care of mismatches between target ulong and host ulong sizes.
9042 if (arg2 & (sizeof(abi_ulong) - 1)) {
9043 ret = -TARGET_EINVAL;
9044 break;
9046 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9048 mask = alloca(mask_size);
9049 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9051 if (!is_error(ret)) {
9052 if (ret > arg2) {
9053 /* More data returned than the caller's buffer will fit.
9054 * This only happens if sizeof(abi_long) < sizeof(long)
9055 * and the caller passed us a buffer holding an odd number
9056 * of abi_longs. If the host kernel is actually using the
9057 * extra 4 bytes then fail EINVAL; otherwise we can just
9058 * ignore them and only copy the interesting part.
9060 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9061 if (numcpus > arg2 * 8) {
9062 ret = -TARGET_EINVAL;
9063 break;
9065 ret = arg2;
9068 if (copy_to_user(arg3, mask, ret)) {
9069 goto efault;
9073 break;
9074 case TARGET_NR_sched_setaffinity:
9076 unsigned int mask_size;
9077 unsigned long *mask;
9080 * sched_setaffinity needs multiples of ulong, so need to take
9081 * care of mismatches between target ulong and host ulong sizes.
9083 if (arg2 & (sizeof(abi_ulong) - 1)) {
9084 ret = -TARGET_EINVAL;
9085 break;
9087 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9089 mask = alloca(mask_size);
9090 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9091 goto efault;
9093 memcpy(mask, p, arg2);
9094 unlock_user_struct(p, arg2, 0);
9096 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9098 break;
9099 case TARGET_NR_sched_setparam:
9101 struct sched_param *target_schp;
9102 struct sched_param schp;
9104 if (arg2 == 0) {
9105 return -TARGET_EINVAL;
9107 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9108 goto efault;
9109 schp.sched_priority = tswap32(target_schp->sched_priority);
9110 unlock_user_struct(target_schp, arg2, 0);
9111 ret = get_errno(sched_setparam(arg1, &schp));
9113 break;
9114 case TARGET_NR_sched_getparam:
9116 struct sched_param *target_schp;
9117 struct sched_param schp;
9119 if (arg2 == 0) {
9120 return -TARGET_EINVAL;
9122 ret = get_errno(sched_getparam(arg1, &schp));
9123 if (!is_error(ret)) {
9124 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9125 goto efault;
9126 target_schp->sched_priority = tswap32(schp.sched_priority);
9127 unlock_user_struct(target_schp, arg2, 1);
9130 break;
9131 case TARGET_NR_sched_setscheduler:
9133 struct sched_param *target_schp;
9134 struct sched_param schp;
9135 if (arg3 == 0) {
9136 return -TARGET_EINVAL;
9138 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9139 goto efault;
9140 schp.sched_priority = tswap32(target_schp->sched_priority);
9141 unlock_user_struct(target_schp, arg3, 0);
9142 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9144 break;
9145 case TARGET_NR_sched_getscheduler:
9146 ret = get_errno(sched_getscheduler(arg1));
9147 break;
9148 case TARGET_NR_sched_yield:
9149 ret = get_errno(sched_yield());
9150 break;
9151 case TARGET_NR_sched_get_priority_max:
9152 ret = get_errno(sched_get_priority_max(arg1));
9153 break;
9154 case TARGET_NR_sched_get_priority_min:
9155 ret = get_errno(sched_get_priority_min(arg1));
9156 break;
9157 case TARGET_NR_sched_rr_get_interval:
9159 struct timespec ts;
9160 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9161 if (!is_error(ret)) {
9162 ret = host_to_target_timespec(arg2, &ts);
9165 break;
9166 case TARGET_NR_nanosleep:
9168 struct timespec req, rem;
9169 target_to_host_timespec(&req, arg1);
9170 ret = get_errno(nanosleep(&req, &rem));
9171 if (is_error(ret) && arg2) {
9172 host_to_target_timespec(arg2, &rem);
9175 break;
9176 #ifdef TARGET_NR_query_module
9177 case TARGET_NR_query_module:
9178 goto unimplemented;
9179 #endif
9180 #ifdef TARGET_NR_nfsservctl
9181 case TARGET_NR_nfsservctl:
9182 goto unimplemented;
9183 #endif
9184 case TARGET_NR_prctl:
9185 switch (arg1) {
9186 case PR_GET_PDEATHSIG:
9188 int deathsig;
9189 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9190 if (!is_error(ret) && arg2
9191 && put_user_ual(deathsig, arg2)) {
9192 goto efault;
9194 break;
9196 #ifdef PR_GET_NAME
9197 case PR_GET_NAME:
9199 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9200 if (!name) {
9201 goto efault;
9203 ret = get_errno(prctl(arg1, (unsigned long)name,
9204 arg3, arg4, arg5));
9205 unlock_user(name, arg2, 16);
9206 break;
9208 case PR_SET_NAME:
9210 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9211 if (!name) {
9212 goto efault;
9214 ret = get_errno(prctl(arg1, (unsigned long)name,
9215 arg3, arg4, arg5));
9216 unlock_user(name, arg2, 0);
9217 break;
9219 #endif
9220 default:
9221 /* Most prctl options have no pointer arguments */
9222 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9223 break;
9225 break;
9226 #ifdef TARGET_NR_arch_prctl
9227 case TARGET_NR_arch_prctl:
9228 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9229 ret = do_arch_prctl(cpu_env, arg1, arg2);
9230 break;
9231 #else
9232 goto unimplemented;
9233 #endif
9234 #endif
9235 #ifdef TARGET_NR_pread64
9236 case TARGET_NR_pread64:
9237 if (regpairs_aligned(cpu_env)) {
9238 arg4 = arg5;
9239 arg5 = arg6;
9241 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9242 goto efault;
9243 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9244 unlock_user(p, arg2, ret);
9245 break;
9246 case TARGET_NR_pwrite64:
9247 if (regpairs_aligned(cpu_env)) {
9248 arg4 = arg5;
9249 arg5 = arg6;
9251 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9252 goto efault;
9253 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9254 unlock_user(p, arg2, 0);
9255 break;
9256 #endif
9257 case TARGET_NR_getcwd:
9258 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9259 goto efault;
9260 ret = get_errno(sys_getcwd1(p, arg2));
9261 unlock_user(p, arg1, ret);
9262 break;
9263 case TARGET_NR_capget:
9264 case TARGET_NR_capset:
9266 struct target_user_cap_header *target_header;
9267 struct target_user_cap_data *target_data = NULL;
9268 struct __user_cap_header_struct header;
9269 struct __user_cap_data_struct data[2];
9270 struct __user_cap_data_struct *dataptr = NULL;
9271 int i, target_datalen;
9272 int data_items = 1;
9274 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9275 goto efault;
9277 header.version = tswap32(target_header->version);
9278 header.pid = tswap32(target_header->pid);
9280 if (header.version != _LINUX_CAPABILITY_VERSION) {
9281 /* Version 2 and up takes pointer to two user_data structs */
9282 data_items = 2;
9285 target_datalen = sizeof(*target_data) * data_items;
9287 if (arg2) {
9288 if (num == TARGET_NR_capget) {
9289 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9290 } else {
9291 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9293 if (!target_data) {
9294 unlock_user_struct(target_header, arg1, 0);
9295 goto efault;
9298 if (num == TARGET_NR_capset) {
9299 for (i = 0; i < data_items; i++) {
9300 data[i].effective = tswap32(target_data[i].effective);
9301 data[i].permitted = tswap32(target_data[i].permitted);
9302 data[i].inheritable = tswap32(target_data[i].inheritable);
9306 dataptr = data;
9309 if (num == TARGET_NR_capget) {
9310 ret = get_errno(capget(&header, dataptr));
9311 } else {
9312 ret = get_errno(capset(&header, dataptr));
9315 /* The kernel always updates version for both capget and capset */
9316 target_header->version = tswap32(header.version);
9317 unlock_user_struct(target_header, arg1, 1);
9319 if (arg2) {
9320 if (num == TARGET_NR_capget) {
9321 for (i = 0; i < data_items; i++) {
9322 target_data[i].effective = tswap32(data[i].effective);
9323 target_data[i].permitted = tswap32(data[i].permitted);
9324 target_data[i].inheritable = tswap32(data[i].inheritable);
9326 unlock_user(target_data, arg2, target_datalen);
9327 } else {
9328 unlock_user(target_data, arg2, 0);
9331 break;
9333 case TARGET_NR_sigaltstack:
9334 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9335 break;
9337 #ifdef CONFIG_SENDFILE
9338 case TARGET_NR_sendfile:
9340 off_t *offp = NULL;
9341 off_t off;
9342 if (arg3) {
9343 ret = get_user_sal(off, arg3);
9344 if (is_error(ret)) {
9345 break;
9347 offp = &off;
9349 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9350 if (!is_error(ret) && arg3) {
9351 abi_long ret2 = put_user_sal(off, arg3);
9352 if (is_error(ret2)) {
9353 ret = ret2;
9356 break;
9358 #ifdef TARGET_NR_sendfile64
9359 case TARGET_NR_sendfile64:
9361 off_t *offp = NULL;
9362 off_t off;
9363 if (arg3) {
9364 ret = get_user_s64(off, arg3);
9365 if (is_error(ret)) {
9366 break;
9368 offp = &off;
9370 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9371 if (!is_error(ret) && arg3) {
9372 abi_long ret2 = put_user_s64(off, arg3);
9373 if (is_error(ret2)) {
9374 ret = ret2;
9377 break;
9379 #endif
9380 #else
9381 case TARGET_NR_sendfile:
9382 #ifdef TARGET_NR_sendfile64
9383 case TARGET_NR_sendfile64:
9384 #endif
9385 goto unimplemented;
9386 #endif
9388 #ifdef TARGET_NR_getpmsg
9389 case TARGET_NR_getpmsg:
9390 goto unimplemented;
9391 #endif
9392 #ifdef TARGET_NR_putpmsg
9393 case TARGET_NR_putpmsg:
9394 goto unimplemented;
9395 #endif
9396 #ifdef TARGET_NR_vfork
9397 case TARGET_NR_vfork:
9398 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9399 0, 0, 0, 0));
9400 break;
9401 #endif
9402 #ifdef TARGET_NR_ugetrlimit
9403 case TARGET_NR_ugetrlimit:
9405 struct rlimit rlim;
9406 int resource = target_to_host_resource(arg1);
9407 ret = get_errno(getrlimit(resource, &rlim));
9408 if (!is_error(ret)) {
9409 struct target_rlimit *target_rlim;
9410 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9411 goto efault;
9412 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9413 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9414 unlock_user_struct(target_rlim, arg2, 1);
9416 break;
9418 #endif
9419 #ifdef TARGET_NR_truncate64
9420 case TARGET_NR_truncate64:
9421 if (!(p = lock_user_string(arg1)))
9422 goto efault;
9423 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9424 unlock_user(p, arg1, 0);
9425 break;
9426 #endif
9427 #ifdef TARGET_NR_ftruncate64
9428 case TARGET_NR_ftruncate64:
9429 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9430 break;
9431 #endif
9432 #ifdef TARGET_NR_stat64
9433 case TARGET_NR_stat64:
9434 if (!(p = lock_user_string(arg1)))
9435 goto efault;
9436 ret = get_errno(stat(path(p), &st));
9437 unlock_user(p, arg1, 0);
9438 if (!is_error(ret))
9439 ret = host_to_target_stat64(cpu_env, arg2, &st);
9440 break;
9441 #endif
9442 #ifdef TARGET_NR_lstat64
9443 case TARGET_NR_lstat64:
9444 if (!(p = lock_user_string(arg1)))
9445 goto efault;
9446 ret = get_errno(lstat(path(p), &st));
9447 unlock_user(p, arg1, 0);
9448 if (!is_error(ret))
9449 ret = host_to_target_stat64(cpu_env, arg2, &st);
9450 break;
9451 #endif
9452 #ifdef TARGET_NR_fstat64
9453 case TARGET_NR_fstat64:
9454 ret = get_errno(fstat(arg1, &st));
9455 if (!is_error(ret))
9456 ret = host_to_target_stat64(cpu_env, arg2, &st);
9457 break;
9458 #endif
9459 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9460 #ifdef TARGET_NR_fstatat64
9461 case TARGET_NR_fstatat64:
9462 #endif
9463 #ifdef TARGET_NR_newfstatat
9464 case TARGET_NR_newfstatat:
9465 #endif
9466 if (!(p = lock_user_string(arg2)))
9467 goto efault;
9468 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9469 if (!is_error(ret))
9470 ret = host_to_target_stat64(cpu_env, arg3, &st);
9471 break;
9472 #endif
9473 #ifdef TARGET_NR_lchown
9474 case TARGET_NR_lchown:
9475 if (!(p = lock_user_string(arg1)))
9476 goto efault;
9477 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9478 unlock_user(p, arg1, 0);
9479 break;
9480 #endif
9481 #ifdef TARGET_NR_getuid
9482 case TARGET_NR_getuid:
9483 ret = get_errno(high2lowuid(getuid()));
9484 break;
9485 #endif
9486 #ifdef TARGET_NR_getgid
9487 case TARGET_NR_getgid:
9488 ret = get_errno(high2lowgid(getgid()));
9489 break;
9490 #endif
9491 #ifdef TARGET_NR_geteuid
9492 case TARGET_NR_geteuid:
9493 ret = get_errno(high2lowuid(geteuid()));
9494 break;
9495 #endif
9496 #ifdef TARGET_NR_getegid
9497 case TARGET_NR_getegid:
9498 ret = get_errno(high2lowgid(getegid()));
9499 break;
9500 #endif
9501 case TARGET_NR_setreuid:
9502 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9503 break;
9504 case TARGET_NR_setregid:
9505 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9506 break;
9507 case TARGET_NR_getgroups:
9509 int gidsetsize = arg1;
9510 target_id *target_grouplist;
9511 gid_t *grouplist;
9512 int i;
9514 grouplist = alloca(gidsetsize * sizeof(gid_t));
9515 ret = get_errno(getgroups(gidsetsize, grouplist));
9516 if (gidsetsize == 0)
9517 break;
9518 if (!is_error(ret)) {
9519 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9520 if (!target_grouplist)
9521 goto efault;
9522 for(i = 0;i < ret; i++)
9523 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9524 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9527 break;
9528 case TARGET_NR_setgroups:
9530 int gidsetsize = arg1;
9531 target_id *target_grouplist;
9532 gid_t *grouplist = NULL;
9533 int i;
9534 if (gidsetsize) {
9535 grouplist = alloca(gidsetsize * sizeof(gid_t));
9536 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9537 if (!target_grouplist) {
9538 ret = -TARGET_EFAULT;
9539 goto fail;
9541 for (i = 0; i < gidsetsize; i++) {
9542 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9544 unlock_user(target_grouplist, arg2, 0);
9546 ret = get_errno(setgroups(gidsetsize, grouplist));
9548 break;
9549 case TARGET_NR_fchown:
9550 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9551 break;
9552 #if defined(TARGET_NR_fchownat)
9553 case TARGET_NR_fchownat:
9554 if (!(p = lock_user_string(arg2)))
9555 goto efault;
9556 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9557 low2highgid(arg4), arg5));
9558 unlock_user(p, arg2, 0);
9559 break;
9560 #endif
9561 #ifdef TARGET_NR_setresuid
9562 case TARGET_NR_setresuid:
9563 ret = get_errno(sys_setresuid(low2highuid(arg1),
9564 low2highuid(arg2),
9565 low2highuid(arg3)));
9566 break;
9567 #endif
9568 #ifdef TARGET_NR_getresuid
9569 case TARGET_NR_getresuid:
9571 uid_t ruid, euid, suid;
9572 ret = get_errno(getresuid(&ruid, &euid, &suid));
9573 if (!is_error(ret)) {
9574 if (put_user_id(high2lowuid(ruid), arg1)
9575 || put_user_id(high2lowuid(euid), arg2)
9576 || put_user_id(high2lowuid(suid), arg3))
9577 goto efault;
9580 break;
9581 #endif
9582 #ifdef TARGET_NR_getresgid
9583 case TARGET_NR_setresgid:
9584 ret = get_errno(sys_setresgid(low2highgid(arg1),
9585 low2highgid(arg2),
9586 low2highgid(arg3)));
9587 break;
9588 #endif
9589 #ifdef TARGET_NR_getresgid
9590 case TARGET_NR_getresgid:
9592 gid_t rgid, egid, sgid;
9593 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9594 if (!is_error(ret)) {
9595 if (put_user_id(high2lowgid(rgid), arg1)
9596 || put_user_id(high2lowgid(egid), arg2)
9597 || put_user_id(high2lowgid(sgid), arg3))
9598 goto efault;
9601 break;
9602 #endif
9603 #ifdef TARGET_NR_chown
9604 case TARGET_NR_chown:
9605 if (!(p = lock_user_string(arg1)))
9606 goto efault;
9607 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9608 unlock_user(p, arg1, 0);
9609 break;
9610 #endif
9611 case TARGET_NR_setuid:
9612 ret = get_errno(sys_setuid(low2highuid(arg1)));
9613 break;
9614 case TARGET_NR_setgid:
9615 ret = get_errno(sys_setgid(low2highgid(arg1)));
9616 break;
9617 case TARGET_NR_setfsuid:
9618 ret = get_errno(setfsuid(arg1));
9619 break;
9620 case TARGET_NR_setfsgid:
9621 ret = get_errno(setfsgid(arg1));
9622 break;
9624 #ifdef TARGET_NR_lchown32
9625 case TARGET_NR_lchown32:
9626 if (!(p = lock_user_string(arg1)))
9627 goto efault;
9628 ret = get_errno(lchown(p, arg2, arg3));
9629 unlock_user(p, arg1, 0);
9630 break;
9631 #endif
9632 #ifdef TARGET_NR_getuid32
9633 case TARGET_NR_getuid32:
9634 ret = get_errno(getuid());
9635 break;
9636 #endif
9638 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9639 /* Alpha specific */
9640 case TARGET_NR_getxuid:
9642 uid_t euid;
9643 euid=geteuid();
9644 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9646 ret = get_errno(getuid());
9647 break;
9648 #endif
9649 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9650 /* Alpha specific */
9651 case TARGET_NR_getxgid:
9653 uid_t egid;
9654 egid=getegid();
9655 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9657 ret = get_errno(getgid());
9658 break;
9659 #endif
9660 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9661 /* Alpha specific */
9662 case TARGET_NR_osf_getsysinfo:
9663 ret = -TARGET_EOPNOTSUPP;
9664 switch (arg1) {
9665 case TARGET_GSI_IEEE_FP_CONTROL:
9667 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9669 /* Copied from linux ieee_fpcr_to_swcr. */
9670 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9671 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9672 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9673 | SWCR_TRAP_ENABLE_DZE
9674 | SWCR_TRAP_ENABLE_OVF);
9675 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9676 | SWCR_TRAP_ENABLE_INE);
9677 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9678 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9680 if (put_user_u64 (swcr, arg2))
9681 goto efault;
9682 ret = 0;
9684 break;
9686 /* case GSI_IEEE_STATE_AT_SIGNAL:
9687 -- Not implemented in linux kernel.
9688 case GSI_UACPROC:
9689 -- Retrieves current unaligned access state; not much used.
9690 case GSI_PROC_TYPE:
9691 -- Retrieves implver information; surely not used.
9692 case GSI_GET_HWRPB:
9693 -- Grabs a copy of the HWRPB; surely not used.
9696 break;
9697 #endif
9698 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9699 /* Alpha specific */
9700 case TARGET_NR_osf_setsysinfo:
9701 ret = -TARGET_EOPNOTSUPP;
9702 switch (arg1) {
9703 case TARGET_SSI_IEEE_FP_CONTROL:
9705 uint64_t swcr, fpcr, orig_fpcr;
9707 if (get_user_u64 (swcr, arg2)) {
9708 goto efault;
9710 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9711 fpcr = orig_fpcr & FPCR_DYN_MASK;
9713 /* Copied from linux ieee_swcr_to_fpcr. */
9714 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9715 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9716 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9717 | SWCR_TRAP_ENABLE_DZE
9718 | SWCR_TRAP_ENABLE_OVF)) << 48;
9719 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9720 | SWCR_TRAP_ENABLE_INE)) << 57;
9721 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9722 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9724 cpu_alpha_store_fpcr(cpu_env, fpcr);
9725 ret = 0;
9727 break;
9729 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9731 uint64_t exc, fpcr, orig_fpcr;
9732 int si_code;
9734 if (get_user_u64(exc, arg2)) {
9735 goto efault;
9738 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9740 /* We only add to the exception status here. */
9741 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9743 cpu_alpha_store_fpcr(cpu_env, fpcr);
9744 ret = 0;
9746 /* Old exceptions are not signaled. */
9747 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9749 /* If any exceptions set by this call,
9750 and are unmasked, send a signal. */
9751 si_code = 0;
9752 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9753 si_code = TARGET_FPE_FLTRES;
9755 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9756 si_code = TARGET_FPE_FLTUND;
9758 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9759 si_code = TARGET_FPE_FLTOVF;
9761 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9762 si_code = TARGET_FPE_FLTDIV;
9764 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9765 si_code = TARGET_FPE_FLTINV;
9767 if (si_code != 0) {
9768 target_siginfo_t info;
9769 info.si_signo = SIGFPE;
9770 info.si_errno = 0;
9771 info.si_code = si_code;
9772 info._sifields._sigfault._addr
9773 = ((CPUArchState *)cpu_env)->pc;
9774 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9777 break;
9779 /* case SSI_NVPAIRS:
9780 -- Used with SSIN_UACPROC to enable unaligned accesses.
9781 case SSI_IEEE_STATE_AT_SIGNAL:
9782 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9783 -- Not implemented in linux kernel
9786 break;
9787 #endif
9788 #ifdef TARGET_NR_osf_sigprocmask
9789 /* Alpha specific. */
9790 case TARGET_NR_osf_sigprocmask:
9792 abi_ulong mask;
9793 int how;
9794 sigset_t set, oldset;
9796 switch(arg1) {
9797 case TARGET_SIG_BLOCK:
9798 how = SIG_BLOCK;
9799 break;
9800 case TARGET_SIG_UNBLOCK:
9801 how = SIG_UNBLOCK;
9802 break;
9803 case TARGET_SIG_SETMASK:
9804 how = SIG_SETMASK;
9805 break;
9806 default:
9807 ret = -TARGET_EINVAL;
9808 goto fail;
9810 mask = arg2;
9811 target_to_host_old_sigset(&set, &mask);
9812 ret = do_sigprocmask(how, &set, &oldset);
9813 if (!ret) {
9814 host_to_target_old_sigset(&mask, &oldset);
9815 ret = mask;
9818 break;
9819 #endif
9821 #ifdef TARGET_NR_getgid32
9822 case TARGET_NR_getgid32:
9823 ret = get_errno(getgid());
9824 break;
9825 #endif
9826 #ifdef TARGET_NR_geteuid32
9827 case TARGET_NR_geteuid32:
9828 ret = get_errno(geteuid());
9829 break;
9830 #endif
9831 #ifdef TARGET_NR_getegid32
9832 case TARGET_NR_getegid32:
9833 ret = get_errno(getegid());
9834 break;
9835 #endif
9836 #ifdef TARGET_NR_setreuid32
9837 case TARGET_NR_setreuid32:
9838 ret = get_errno(setreuid(arg1, arg2));
9839 break;
9840 #endif
9841 #ifdef TARGET_NR_setregid32
9842 case TARGET_NR_setregid32:
9843 ret = get_errno(setregid(arg1, arg2));
9844 break;
9845 #endif
9846 #ifdef TARGET_NR_getgroups32
9847 case TARGET_NR_getgroups32:
9849 int gidsetsize = arg1;
9850 uint32_t *target_grouplist;
9851 gid_t *grouplist;
9852 int i;
9854 grouplist = alloca(gidsetsize * sizeof(gid_t));
9855 ret = get_errno(getgroups(gidsetsize, grouplist));
9856 if (gidsetsize == 0)
9857 break;
9858 if (!is_error(ret)) {
9859 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9860 if (!target_grouplist) {
9861 ret = -TARGET_EFAULT;
9862 goto fail;
9864 for(i = 0;i < ret; i++)
9865 target_grouplist[i] = tswap32(grouplist[i]);
9866 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9869 break;
9870 #endif
9871 #ifdef TARGET_NR_setgroups32
9872 case TARGET_NR_setgroups32:
9874 int gidsetsize = arg1;
9875 uint32_t *target_grouplist;
9876 gid_t *grouplist;
9877 int i;
9879 grouplist = alloca(gidsetsize * sizeof(gid_t));
9880 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9881 if (!target_grouplist) {
9882 ret = -TARGET_EFAULT;
9883 goto fail;
9885 for(i = 0;i < gidsetsize; i++)
9886 grouplist[i] = tswap32(target_grouplist[i]);
9887 unlock_user(target_grouplist, arg2, 0);
9888 ret = get_errno(setgroups(gidsetsize, grouplist));
9890 break;
9891 #endif
9892 #ifdef TARGET_NR_fchown32
9893 case TARGET_NR_fchown32:
9894 ret = get_errno(fchown(arg1, arg2, arg3));
9895 break;
9896 #endif
9897 #ifdef TARGET_NR_setresuid32
9898 case TARGET_NR_setresuid32:
9899 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9900 break;
9901 #endif
9902 #ifdef TARGET_NR_getresuid32
9903 case TARGET_NR_getresuid32:
9905 uid_t ruid, euid, suid;
9906 ret = get_errno(getresuid(&ruid, &euid, &suid));
9907 if (!is_error(ret)) {
9908 if (put_user_u32(ruid, arg1)
9909 || put_user_u32(euid, arg2)
9910 || put_user_u32(suid, arg3))
9911 goto efault;
9914 break;
9915 #endif
9916 #ifdef TARGET_NR_setresgid32
9917 case TARGET_NR_setresgid32:
9918 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9919 break;
9920 #endif
9921 #ifdef TARGET_NR_getresgid32
9922 case TARGET_NR_getresgid32:
9924 gid_t rgid, egid, sgid;
9925 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9926 if (!is_error(ret)) {
9927 if (put_user_u32(rgid, arg1)
9928 || put_user_u32(egid, arg2)
9929 || put_user_u32(sgid, arg3))
9930 goto efault;
9933 break;
9934 #endif
9935 #ifdef TARGET_NR_chown32
9936 case TARGET_NR_chown32:
9937 if (!(p = lock_user_string(arg1)))
9938 goto efault;
9939 ret = get_errno(chown(p, arg2, arg3));
9940 unlock_user(p, arg1, 0);
9941 break;
9942 #endif
9943 #ifdef TARGET_NR_setuid32
9944 case TARGET_NR_setuid32:
9945 ret = get_errno(sys_setuid(arg1));
9946 break;
9947 #endif
9948 #ifdef TARGET_NR_setgid32
9949 case TARGET_NR_setgid32:
9950 ret = get_errno(sys_setgid(arg1));
9951 break;
9952 #endif
9953 #ifdef TARGET_NR_setfsuid32
9954 case TARGET_NR_setfsuid32:
9955 ret = get_errno(setfsuid(arg1));
9956 break;
9957 #endif
9958 #ifdef TARGET_NR_setfsgid32
9959 case TARGET_NR_setfsgid32:
9960 ret = get_errno(setfsgid(arg1));
9961 break;
9962 #endif
9964 case TARGET_NR_pivot_root:
9965 goto unimplemented;
9966 #ifdef TARGET_NR_mincore
9967 case TARGET_NR_mincore:
9969 void *a;
9970 ret = -TARGET_EFAULT;
9971 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9972 goto efault;
9973 if (!(p = lock_user_string(arg3)))
9974 goto mincore_fail;
9975 ret = get_errno(mincore(a, arg2, p));
9976 unlock_user(p, arg3, ret);
9977 mincore_fail:
9978 unlock_user(a, arg1, 0);
9980 break;
9981 #endif
9982 #ifdef TARGET_NR_arm_fadvise64_64
9983 case TARGET_NR_arm_fadvise64_64:
9984 /* arm_fadvise64_64 looks like fadvise64_64 but
9985 * with different argument order: fd, advice, offset, len
9986 * rather than the usual fd, offset, len, advice.
9987 * Note that offset and len are both 64-bit so appear as
9988 * pairs of 32-bit registers.
9990 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
9991 target_offset64(arg5, arg6), arg2);
9992 ret = -host_to_target_errno(ret);
9993 break;
9994 #endif
9996 #if TARGET_ABI_BITS == 32
9998 #ifdef TARGET_NR_fadvise64_64
9999 case TARGET_NR_fadvise64_64:
10000 /* 6 args: fd, offset (high, low), len (high, low), advice */
10001 if (regpairs_aligned(cpu_env)) {
10002 /* offset is in (3,4), len in (5,6) and advice in 7 */
10003 arg2 = arg3;
10004 arg3 = arg4;
10005 arg4 = arg5;
10006 arg5 = arg6;
10007 arg6 = arg7;
10009 ret = -host_to_target_errno(posix_fadvise(arg1,
10010 target_offset64(arg2, arg3),
10011 target_offset64(arg4, arg5),
10012 arg6));
10013 break;
10014 #endif
10016 #ifdef TARGET_NR_fadvise64
10017 case TARGET_NR_fadvise64:
10018 /* 5 args: fd, offset (high, low), len, advice */
10019 if (regpairs_aligned(cpu_env)) {
10020 /* offset is in (3,4), len in 5 and advice in 6 */
10021 arg2 = arg3;
10022 arg3 = arg4;
10023 arg4 = arg5;
10024 arg5 = arg6;
10026 ret = -host_to_target_errno(posix_fadvise(arg1,
10027 target_offset64(arg2, arg3),
10028 arg4, arg5));
10029 break;
10030 #endif
10032 #else /* not a 32-bit ABI */
10033 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10034 #ifdef TARGET_NR_fadvise64_64
10035 case TARGET_NR_fadvise64_64:
10036 #endif
10037 #ifdef TARGET_NR_fadvise64
10038 case TARGET_NR_fadvise64:
10039 #endif
10040 #ifdef TARGET_S390X
10041 switch (arg4) {
10042 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10043 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10044 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10045 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10046 default: break;
10048 #endif
10049 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10050 break;
10051 #endif
10052 #endif /* end of 64-bit ABI fadvise handling */
10054 #ifdef TARGET_NR_madvise
10055 case TARGET_NR_madvise:
10056 /* A straight passthrough may not be safe because qemu sometimes
10057 turns private file-backed mappings into anonymous mappings.
10058 This will break MADV_DONTNEED.
10059 This is a hint, so ignoring and returning success is ok. */
10060 ret = get_errno(0);
10061 break;
10062 #endif
10063 #if TARGET_ABI_BITS == 32
10064 case TARGET_NR_fcntl64:
10066 int cmd;
10067 struct flock64 fl;
10068 struct target_flock64 *target_fl;
10069 #ifdef TARGET_ARM
10070 struct target_eabi_flock64 *target_efl;
10071 #endif
10073 cmd = target_to_host_fcntl_cmd(arg2);
10074 if (cmd == -TARGET_EINVAL) {
10075 ret = cmd;
10076 break;
10079 switch(arg2) {
10080 case TARGET_F_GETLK64:
10081 #ifdef TARGET_ARM
10082 if (((CPUARMState *)cpu_env)->eabi) {
10083 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10084 goto efault;
10085 fl.l_type = tswap16(target_efl->l_type);
10086 fl.l_whence = tswap16(target_efl->l_whence);
10087 fl.l_start = tswap64(target_efl->l_start);
10088 fl.l_len = tswap64(target_efl->l_len);
10089 fl.l_pid = tswap32(target_efl->l_pid);
10090 unlock_user_struct(target_efl, arg3, 0);
10091 } else
10092 #endif
10094 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10095 goto efault;
10096 fl.l_type = tswap16(target_fl->l_type);
10097 fl.l_whence = tswap16(target_fl->l_whence);
10098 fl.l_start = tswap64(target_fl->l_start);
10099 fl.l_len = tswap64(target_fl->l_len);
10100 fl.l_pid = tswap32(target_fl->l_pid);
10101 unlock_user_struct(target_fl, arg3, 0);
10103 ret = get_errno(fcntl(arg1, cmd, &fl));
10104 if (ret == 0) {
10105 #ifdef TARGET_ARM
10106 if (((CPUARMState *)cpu_env)->eabi) {
10107 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
10108 goto efault;
10109 target_efl->l_type = tswap16(fl.l_type);
10110 target_efl->l_whence = tswap16(fl.l_whence);
10111 target_efl->l_start = tswap64(fl.l_start);
10112 target_efl->l_len = tswap64(fl.l_len);
10113 target_efl->l_pid = tswap32(fl.l_pid);
10114 unlock_user_struct(target_efl, arg3, 1);
10115 } else
10116 #endif
10118 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
10119 goto efault;
10120 target_fl->l_type = tswap16(fl.l_type);
10121 target_fl->l_whence = tswap16(fl.l_whence);
10122 target_fl->l_start = tswap64(fl.l_start);
10123 target_fl->l_len = tswap64(fl.l_len);
10124 target_fl->l_pid = tswap32(fl.l_pid);
10125 unlock_user_struct(target_fl, arg3, 1);
10128 break;
10130 case TARGET_F_SETLK64:
10131 case TARGET_F_SETLKW64:
10132 #ifdef TARGET_ARM
10133 if (((CPUARMState *)cpu_env)->eabi) {
10134 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10135 goto efault;
10136 fl.l_type = tswap16(target_efl->l_type);
10137 fl.l_whence = tswap16(target_efl->l_whence);
10138 fl.l_start = tswap64(target_efl->l_start);
10139 fl.l_len = tswap64(target_efl->l_len);
10140 fl.l_pid = tswap32(target_efl->l_pid);
10141 unlock_user_struct(target_efl, arg3, 0);
10142 } else
10143 #endif
10145 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10146 goto efault;
10147 fl.l_type = tswap16(target_fl->l_type);
10148 fl.l_whence = tswap16(target_fl->l_whence);
10149 fl.l_start = tswap64(target_fl->l_start);
10150 fl.l_len = tswap64(target_fl->l_len);
10151 fl.l_pid = tswap32(target_fl->l_pid);
10152 unlock_user_struct(target_fl, arg3, 0);
10154 ret = get_errno(fcntl(arg1, cmd, &fl));
10155 break;
10156 default:
10157 ret = do_fcntl(arg1, arg2, arg3);
10158 break;
10160 break;
10162 #endif
10163 #ifdef TARGET_NR_cacheflush
10164 case TARGET_NR_cacheflush:
10165 /* self-modifying code is handled automatically, so nothing needed */
10166 ret = 0;
10167 break;
10168 #endif
10169 #ifdef TARGET_NR_security
10170 case TARGET_NR_security:
10171 goto unimplemented;
10172 #endif
10173 #ifdef TARGET_NR_getpagesize
10174 case TARGET_NR_getpagesize:
10175 ret = TARGET_PAGE_SIZE;
10176 break;
10177 #endif
10178 case TARGET_NR_gettid:
10179 ret = get_errno(gettid());
10180 break;
10181 #ifdef TARGET_NR_readahead
10182 case TARGET_NR_readahead:
10183 #if TARGET_ABI_BITS == 32
10184 if (regpairs_aligned(cpu_env)) {
10185 arg2 = arg3;
10186 arg3 = arg4;
10187 arg4 = arg5;
10189 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10190 #else
10191 ret = get_errno(readahead(arg1, arg2, arg3));
10192 #endif
10193 break;
10194 #endif
10195 #ifdef CONFIG_ATTR
10196 #ifdef TARGET_NR_setxattr
10197 case TARGET_NR_listxattr:
10198 case TARGET_NR_llistxattr:
10200 void *p, *b = 0;
10201 if (arg2) {
10202 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10203 if (!b) {
10204 ret = -TARGET_EFAULT;
10205 break;
10208 p = lock_user_string(arg1);
10209 if (p) {
10210 if (num == TARGET_NR_listxattr) {
10211 ret = get_errno(listxattr(p, b, arg3));
10212 } else {
10213 ret = get_errno(llistxattr(p, b, arg3));
10215 } else {
10216 ret = -TARGET_EFAULT;
10218 unlock_user(p, arg1, 0);
10219 unlock_user(b, arg2, arg3);
10220 break;
10222 case TARGET_NR_flistxattr:
10224 void *b = 0;
10225 if (arg2) {
10226 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10227 if (!b) {
10228 ret = -TARGET_EFAULT;
10229 break;
10232 ret = get_errno(flistxattr(arg1, b, arg3));
10233 unlock_user(b, arg2, arg3);
10234 break;
10236 case TARGET_NR_setxattr:
10237 case TARGET_NR_lsetxattr:
10239 void *p, *n, *v = 0;
10240 if (arg3) {
10241 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10242 if (!v) {
10243 ret = -TARGET_EFAULT;
10244 break;
10247 p = lock_user_string(arg1);
10248 n = lock_user_string(arg2);
10249 if (p && n) {
10250 if (num == TARGET_NR_setxattr) {
10251 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10252 } else {
10253 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10255 } else {
10256 ret = -TARGET_EFAULT;
10258 unlock_user(p, arg1, 0);
10259 unlock_user(n, arg2, 0);
10260 unlock_user(v, arg3, 0);
10262 break;
10263 case TARGET_NR_fsetxattr:
10265 void *n, *v = 0;
10266 if (arg3) {
10267 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10268 if (!v) {
10269 ret = -TARGET_EFAULT;
10270 break;
10273 n = lock_user_string(arg2);
10274 if (n) {
10275 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10276 } else {
10277 ret = -TARGET_EFAULT;
10279 unlock_user(n, arg2, 0);
10280 unlock_user(v, arg3, 0);
10282 break;
10283 case TARGET_NR_getxattr:
10284 case TARGET_NR_lgetxattr:
10286 void *p, *n, *v = 0;
10287 if (arg3) {
10288 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10289 if (!v) {
10290 ret = -TARGET_EFAULT;
10291 break;
10294 p = lock_user_string(arg1);
10295 n = lock_user_string(arg2);
10296 if (p && n) {
10297 if (num == TARGET_NR_getxattr) {
10298 ret = get_errno(getxattr(p, n, v, arg4));
10299 } else {
10300 ret = get_errno(lgetxattr(p, n, v, arg4));
10302 } else {
10303 ret = -TARGET_EFAULT;
10305 unlock_user(p, arg1, 0);
10306 unlock_user(n, arg2, 0);
10307 unlock_user(v, arg3, arg4);
10309 break;
10310 case TARGET_NR_fgetxattr:
10312 void *n, *v = 0;
10313 if (arg3) {
10314 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10315 if (!v) {
10316 ret = -TARGET_EFAULT;
10317 break;
10320 n = lock_user_string(arg2);
10321 if (n) {
10322 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10323 } else {
10324 ret = -TARGET_EFAULT;
10326 unlock_user(n, arg2, 0);
10327 unlock_user(v, arg3, arg4);
10329 break;
10330 case TARGET_NR_removexattr:
10331 case TARGET_NR_lremovexattr:
10333 void *p, *n;
10334 p = lock_user_string(arg1);
10335 n = lock_user_string(arg2);
10336 if (p && n) {
10337 if (num == TARGET_NR_removexattr) {
10338 ret = get_errno(removexattr(p, n));
10339 } else {
10340 ret = get_errno(lremovexattr(p, n));
10342 } else {
10343 ret = -TARGET_EFAULT;
10345 unlock_user(p, arg1, 0);
10346 unlock_user(n, arg2, 0);
10348 break;
10349 case TARGET_NR_fremovexattr:
10351 void *n;
10352 n = lock_user_string(arg2);
10353 if (n) {
10354 ret = get_errno(fremovexattr(arg1, n));
10355 } else {
10356 ret = -TARGET_EFAULT;
10358 unlock_user(n, arg2, 0);
10360 break;
10361 #endif
10362 #endif /* CONFIG_ATTR */
10363 #ifdef TARGET_NR_set_thread_area
10364 case TARGET_NR_set_thread_area:
10365 #if defined(TARGET_MIPS)
10366 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10367 ret = 0;
10368 break;
10369 #elif defined(TARGET_CRIS)
10370 if (arg1 & 0xff)
10371 ret = -TARGET_EINVAL;
10372 else {
10373 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10374 ret = 0;
10376 break;
10377 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10378 ret = do_set_thread_area(cpu_env, arg1);
10379 break;
10380 #elif defined(TARGET_M68K)
10382 TaskState *ts = cpu->opaque;
10383 ts->tp_value = arg1;
10384 ret = 0;
10385 break;
10387 #else
10388 goto unimplemented_nowarn;
10389 #endif
10390 #endif
10391 #ifdef TARGET_NR_get_thread_area
10392 case TARGET_NR_get_thread_area:
10393 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10394 ret = do_get_thread_area(cpu_env, arg1);
10395 break;
10396 #elif defined(TARGET_M68K)
10398 TaskState *ts = cpu->opaque;
10399 ret = ts->tp_value;
10400 break;
10402 #else
10403 goto unimplemented_nowarn;
10404 #endif
10405 #endif
10406 #ifdef TARGET_NR_getdomainname
10407 case TARGET_NR_getdomainname:
10408 goto unimplemented_nowarn;
10409 #endif
10411 #ifdef TARGET_NR_clock_gettime
10412 case TARGET_NR_clock_gettime:
10414 struct timespec ts;
10415 ret = get_errno(clock_gettime(arg1, &ts));
10416 if (!is_error(ret)) {
10417 host_to_target_timespec(arg2, &ts);
10419 break;
10421 #endif
10422 #ifdef TARGET_NR_clock_getres
10423 case TARGET_NR_clock_getres:
10425 struct timespec ts;
10426 ret = get_errno(clock_getres(arg1, &ts));
10427 if (!is_error(ret)) {
10428 host_to_target_timespec(arg2, &ts);
10430 break;
10432 #endif
10433 #ifdef TARGET_NR_clock_nanosleep
10434 case TARGET_NR_clock_nanosleep:
10436 struct timespec ts;
10437 target_to_host_timespec(&ts, arg3);
10438 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
10439 if (arg4)
10440 host_to_target_timespec(arg4, &ts);
10442 #if defined(TARGET_PPC)
10443 /* clock_nanosleep is odd in that it returns positive errno values.
10444 * On PPC, CR0 bit 3 should be set in such a situation. */
10445 if (ret) {
10446 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10448 #endif
10449 break;
10451 #endif
10453 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10454 case TARGET_NR_set_tid_address:
10455 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10456 break;
10457 #endif
10459 case TARGET_NR_tkill:
10460 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10461 break;
10463 case TARGET_NR_tgkill:
10464 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10465 target_to_host_signal(arg3)));
10466 break;
10468 #ifdef TARGET_NR_set_robust_list
10469 case TARGET_NR_set_robust_list:
10470 case TARGET_NR_get_robust_list:
10471 /* The ABI for supporting robust futexes has userspace pass
10472 * the kernel a pointer to a linked list which is updated by
10473 * userspace after the syscall; the list is walked by the kernel
10474 * when the thread exits. Since the linked list in QEMU guest
10475 * memory isn't a valid linked list for the host and we have
10476 * no way to reliably intercept the thread-death event, we can't
10477 * support these. Silently return ENOSYS so that guest userspace
10478 * falls back to a non-robust futex implementation (which should
10479 * be OK except in the corner case of the guest crashing while
10480 * holding a mutex that is shared with another process via
10481 * shared memory).
10483 goto unimplemented_nowarn;
10484 #endif
10486 #if defined(TARGET_NR_utimensat)
10487 case TARGET_NR_utimensat:
10489 struct timespec *tsp, ts[2];
10490 if (!arg3) {
10491 tsp = NULL;
10492 } else {
10493 target_to_host_timespec(ts, arg3);
10494 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10495 tsp = ts;
10497 if (!arg2)
10498 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10499 else {
10500 if (!(p = lock_user_string(arg2))) {
10501 ret = -TARGET_EFAULT;
10502 goto fail;
10504 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10505 unlock_user(p, arg2, 0);
10508 break;
10509 #endif
10510 case TARGET_NR_futex:
10511 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10512 break;
10513 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10514 case TARGET_NR_inotify_init:
10515 ret = get_errno(sys_inotify_init());
10516 break;
10517 #endif
10518 #ifdef CONFIG_INOTIFY1
10519 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10520 case TARGET_NR_inotify_init1:
10521 ret = get_errno(sys_inotify_init1(arg1));
10522 break;
10523 #endif
10524 #endif
10525 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10526 case TARGET_NR_inotify_add_watch:
10527 p = lock_user_string(arg2);
10528 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10529 unlock_user(p, arg2, 0);
10530 break;
10531 #endif
10532 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10533 case TARGET_NR_inotify_rm_watch:
10534 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10535 break;
10536 #endif
10538 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10539 case TARGET_NR_mq_open:
10541 struct mq_attr posix_mq_attr, *attrp;
10543 p = lock_user_string(arg1 - 1);
10544 if (arg4 != 0) {
10545 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10546 attrp = &posix_mq_attr;
10547 } else {
10548 attrp = 0;
10550 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10551 unlock_user (p, arg1, 0);
10553 break;
10555 case TARGET_NR_mq_unlink:
10556 p = lock_user_string(arg1 - 1);
10557 ret = get_errno(mq_unlink(p));
10558 unlock_user (p, arg1, 0);
10559 break;
10561 case TARGET_NR_mq_timedsend:
10563 struct timespec ts;
10565 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10566 if (arg5 != 0) {
10567 target_to_host_timespec(&ts, arg5);
10568 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
10569 host_to_target_timespec(arg5, &ts);
10571 else
10572 ret = get_errno(mq_send(arg1, p, arg3, arg4));
10573 unlock_user (p, arg2, arg3);
10575 break;
10577 case TARGET_NR_mq_timedreceive:
10579 struct timespec ts;
10580 unsigned int prio;
10582 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10583 if (arg5 != 0) {
10584 target_to_host_timespec(&ts, arg5);
10585 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
10586 host_to_target_timespec(arg5, &ts);
10588 else
10589 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
10590 unlock_user (p, arg2, arg3);
10591 if (arg4 != 0)
10592 put_user_u32(prio, arg4);
10594 break;
10596 /* Not implemented for now... */
10597 /* case TARGET_NR_mq_notify: */
10598 /* break; */
10600 case TARGET_NR_mq_getsetattr:
10602 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10603 ret = 0;
10604 if (arg3 != 0) {
10605 ret = mq_getattr(arg1, &posix_mq_attr_out);
10606 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10608 if (arg2 != 0) {
10609 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10610 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10614 break;
10615 #endif
10617 #ifdef CONFIG_SPLICE
10618 #ifdef TARGET_NR_tee
10619 case TARGET_NR_tee:
10621 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10623 break;
10624 #endif
10625 #ifdef TARGET_NR_splice
10626 case TARGET_NR_splice:
10628 loff_t loff_in, loff_out;
10629 loff_t *ploff_in = NULL, *ploff_out = NULL;
10630 if (arg2) {
10631 if (get_user_u64(loff_in, arg2)) {
10632 goto efault;
10634 ploff_in = &loff_in;
10636 if (arg4) {
10637 if (get_user_u64(loff_out, arg4)) {
10638 goto efault;
10640 ploff_out = &loff_out;
10642 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10643 if (arg2) {
10644 if (put_user_u64(loff_in, arg2)) {
10645 goto efault;
10648 if (arg4) {
10649 if (put_user_u64(loff_out, arg4)) {
10650 goto efault;
10654 break;
10655 #endif
10656 #ifdef TARGET_NR_vmsplice
10657 case TARGET_NR_vmsplice:
10659 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10660 if (vec != NULL) {
10661 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10662 unlock_iovec(vec, arg2, arg3, 0);
10663 } else {
10664 ret = -host_to_target_errno(errno);
10667 break;
10668 #endif
10669 #endif /* CONFIG_SPLICE */
10670 #ifdef CONFIG_EVENTFD
10671 #if defined(TARGET_NR_eventfd)
10672 case TARGET_NR_eventfd:
10673 ret = get_errno(eventfd(arg1, 0));
10674 fd_trans_unregister(ret);
10675 break;
10676 #endif
10677 #if defined(TARGET_NR_eventfd2)
10678 case TARGET_NR_eventfd2:
10680 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10681 if (arg2 & TARGET_O_NONBLOCK) {
10682 host_flags |= O_NONBLOCK;
10684 if (arg2 & TARGET_O_CLOEXEC) {
10685 host_flags |= O_CLOEXEC;
10687 ret = get_errno(eventfd(arg1, host_flags));
10688 fd_trans_unregister(ret);
10689 break;
10691 #endif
10692 #endif /* CONFIG_EVENTFD */
10693 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10694 case TARGET_NR_fallocate:
10695 #if TARGET_ABI_BITS == 32
10696 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10697 target_offset64(arg5, arg6)));
10698 #else
10699 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10700 #endif
10701 break;
10702 #endif
10703 #if defined(CONFIG_SYNC_FILE_RANGE)
10704 #if defined(TARGET_NR_sync_file_range)
10705 case TARGET_NR_sync_file_range:
10706 #if TARGET_ABI_BITS == 32
10707 #if defined(TARGET_MIPS)
10708 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10709 target_offset64(arg5, arg6), arg7));
10710 #else
10711 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10712 target_offset64(arg4, arg5), arg6));
10713 #endif /* !TARGET_MIPS */
10714 #else
10715 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10716 #endif
10717 break;
10718 #endif
10719 #if defined(TARGET_NR_sync_file_range2)
10720 case TARGET_NR_sync_file_range2:
10721 /* This is like sync_file_range but the arguments are reordered */
10722 #if TARGET_ABI_BITS == 32
10723 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10724 target_offset64(arg5, arg6), arg2));
10725 #else
10726 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10727 #endif
10728 break;
10729 #endif
10730 #endif
10731 #if defined(TARGET_NR_signalfd4)
10732 case TARGET_NR_signalfd4:
10733 ret = do_signalfd4(arg1, arg2, arg4);
10734 break;
10735 #endif
10736 #if defined(TARGET_NR_signalfd)
10737 case TARGET_NR_signalfd:
10738 ret = do_signalfd4(arg1, arg2, 0);
10739 break;
10740 #endif
10741 #if defined(CONFIG_EPOLL)
10742 #if defined(TARGET_NR_epoll_create)
10743 case TARGET_NR_epoll_create:
10744 ret = get_errno(epoll_create(arg1));
10745 break;
10746 #endif
10747 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10748 case TARGET_NR_epoll_create1:
10749 ret = get_errno(epoll_create1(arg1));
10750 break;
10751 #endif
10752 #if defined(TARGET_NR_epoll_ctl)
10753 case TARGET_NR_epoll_ctl:
10755 struct epoll_event ep;
10756 struct epoll_event *epp = 0;
10757 if (arg4) {
10758 struct target_epoll_event *target_ep;
10759 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10760 goto efault;
10762 ep.events = tswap32(target_ep->events);
10763 /* The epoll_data_t union is just opaque data to the kernel,
10764 * so we transfer all 64 bits across and need not worry what
10765 * actual data type it is.
10767 ep.data.u64 = tswap64(target_ep->data.u64);
10768 unlock_user_struct(target_ep, arg4, 0);
10769 epp = &ep;
10771 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10772 break;
10774 #endif
10776 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10777 #define IMPLEMENT_EPOLL_PWAIT
10778 #endif
10779 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10780 #if defined(TARGET_NR_epoll_wait)
10781 case TARGET_NR_epoll_wait:
10782 #endif
10783 #if defined(IMPLEMENT_EPOLL_PWAIT)
10784 case TARGET_NR_epoll_pwait:
10785 #endif
10787 struct target_epoll_event *target_ep;
10788 struct epoll_event *ep;
10789 int epfd = arg1;
10790 int maxevents = arg3;
10791 int timeout = arg4;
10793 target_ep = lock_user(VERIFY_WRITE, arg2,
10794 maxevents * sizeof(struct target_epoll_event), 1);
10795 if (!target_ep) {
10796 goto efault;
10799 ep = alloca(maxevents * sizeof(struct epoll_event));
10801 switch (num) {
10802 #if defined(IMPLEMENT_EPOLL_PWAIT)
10803 case TARGET_NR_epoll_pwait:
10805 target_sigset_t *target_set;
10806 sigset_t _set, *set = &_set;
10808 if (arg5) {
10809 target_set = lock_user(VERIFY_READ, arg5,
10810 sizeof(target_sigset_t), 1);
10811 if (!target_set) {
10812 unlock_user(target_ep, arg2, 0);
10813 goto efault;
10815 target_to_host_sigset(set, target_set);
10816 unlock_user(target_set, arg5, 0);
10817 } else {
10818 set = NULL;
10821 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10822 break;
10824 #endif
10825 #if defined(TARGET_NR_epoll_wait)
10826 case TARGET_NR_epoll_wait:
10827 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10828 break;
10829 #endif
10830 default:
10831 ret = -TARGET_ENOSYS;
10833 if (!is_error(ret)) {
10834 int i;
10835 for (i = 0; i < ret; i++) {
10836 target_ep[i].events = tswap32(ep[i].events);
10837 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10840 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10841 break;
10843 #endif
10844 #endif
10845 #ifdef TARGET_NR_prlimit64
10846 case TARGET_NR_prlimit64:
10848 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10849 struct target_rlimit64 *target_rnew, *target_rold;
10850 struct host_rlimit64 rnew, rold, *rnewp = 0;
10851 int resource = target_to_host_resource(arg2);
10852 if (arg3) {
10853 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10854 goto efault;
10856 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10857 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10858 unlock_user_struct(target_rnew, arg3, 0);
10859 rnewp = &rnew;
10862 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10863 if (!is_error(ret) && arg4) {
10864 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10865 goto efault;
10867 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10868 target_rold->rlim_max = tswap64(rold.rlim_max);
10869 unlock_user_struct(target_rold, arg4, 1);
10871 break;
10873 #endif
10874 #ifdef TARGET_NR_gethostname
10875 case TARGET_NR_gethostname:
10877 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10878 if (name) {
10879 ret = get_errno(gethostname(name, arg2));
10880 unlock_user(name, arg1, arg2);
10881 } else {
10882 ret = -TARGET_EFAULT;
10884 break;
10886 #endif
10887 #ifdef TARGET_NR_atomic_cmpxchg_32
10888 case TARGET_NR_atomic_cmpxchg_32:
10890 /* should use start_exclusive from main.c */
10891 abi_ulong mem_value;
10892 if (get_user_u32(mem_value, arg6)) {
10893 target_siginfo_t info;
10894 info.si_signo = SIGSEGV;
10895 info.si_errno = 0;
10896 info.si_code = TARGET_SEGV_MAPERR;
10897 info._sifields._sigfault._addr = arg6;
10898 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10899 ret = 0xdeadbeef;
10902 if (mem_value == arg2)
10903 put_user_u32(arg1, arg6);
10904 ret = mem_value;
10905 break;
10907 #endif
10908 #ifdef TARGET_NR_atomic_barrier
10909 case TARGET_NR_atomic_barrier:
10911 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10912 ret = 0;
10913 break;
10915 #endif
10917 #ifdef TARGET_NR_timer_create
10918 case TARGET_NR_timer_create:
10920 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10922 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10924 int clkid = arg1;
10925 int timer_index = next_free_host_timer();
10927 if (timer_index < 0) {
10928 ret = -TARGET_EAGAIN;
10929 } else {
10930 timer_t *phtimer = g_posix_timers + timer_index;
10932 if (arg2) {
10933 phost_sevp = &host_sevp;
10934 ret = target_to_host_sigevent(phost_sevp, arg2);
10935 if (ret != 0) {
10936 break;
10940 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10941 if (ret) {
10942 phtimer = NULL;
10943 } else {
10944 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10945 goto efault;
10949 break;
10951 #endif
10953 #ifdef TARGET_NR_timer_settime
10954 case TARGET_NR_timer_settime:
10956 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10957 * struct itimerspec * old_value */
10958 target_timer_t timerid = get_timer_id(arg1);
10960 if (timerid < 0) {
10961 ret = timerid;
10962 } else if (arg3 == 0) {
10963 ret = -TARGET_EINVAL;
10964 } else {
10965 timer_t htimer = g_posix_timers[timerid];
10966 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10968 target_to_host_itimerspec(&hspec_new, arg3);
10969 ret = get_errno(
10970 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10971 host_to_target_itimerspec(arg2, &hspec_old);
10973 break;
10975 #endif
10977 #ifdef TARGET_NR_timer_gettime
10978 case TARGET_NR_timer_gettime:
10980 /* args: timer_t timerid, struct itimerspec *curr_value */
10981 target_timer_t timerid = get_timer_id(arg1);
10983 if (timerid < 0) {
10984 ret = timerid;
10985 } else if (!arg2) {
10986 ret = -TARGET_EFAULT;
10987 } else {
10988 timer_t htimer = g_posix_timers[timerid];
10989 struct itimerspec hspec;
10990 ret = get_errno(timer_gettime(htimer, &hspec));
10992 if (host_to_target_itimerspec(arg2, &hspec)) {
10993 ret = -TARGET_EFAULT;
10996 break;
10998 #endif
11000 #ifdef TARGET_NR_timer_getoverrun
11001 case TARGET_NR_timer_getoverrun:
11003 /* args: timer_t timerid */
11004 target_timer_t timerid = get_timer_id(arg1);
11006 if (timerid < 0) {
11007 ret = timerid;
11008 } else {
11009 timer_t htimer = g_posix_timers[timerid];
11010 ret = get_errno(timer_getoverrun(htimer));
11012 fd_trans_unregister(ret);
11013 break;
11015 #endif
11017 #ifdef TARGET_NR_timer_delete
11018 case TARGET_NR_timer_delete:
11020 /* args: timer_t timerid */
11021 target_timer_t timerid = get_timer_id(arg1);
11023 if (timerid < 0) {
11024 ret = timerid;
11025 } else {
11026 timer_t htimer = g_posix_timers[timerid];
11027 ret = get_errno(timer_delete(htimer));
11028 g_posix_timers[timerid] = 0;
11030 break;
11032 #endif
11034 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11035 case TARGET_NR_timerfd_create:
11036 ret = get_errno(timerfd_create(arg1,
11037 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11038 break;
11039 #endif
11041 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11042 case TARGET_NR_timerfd_gettime:
11044 struct itimerspec its_curr;
11046 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11048 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11049 goto efault;
11052 break;
11053 #endif
11055 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11056 case TARGET_NR_timerfd_settime:
11058 struct itimerspec its_new, its_old, *p_new;
11060 if (arg3) {
11061 if (target_to_host_itimerspec(&its_new, arg3)) {
11062 goto efault;
11064 p_new = &its_new;
11065 } else {
11066 p_new = NULL;
11069 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11071 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11072 goto efault;
11075 break;
11076 #endif
11078 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11079 case TARGET_NR_ioprio_get:
11080 ret = get_errno(ioprio_get(arg1, arg2));
11081 break;
11082 #endif
11084 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11085 case TARGET_NR_ioprio_set:
11086 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11087 break;
11088 #endif
11090 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11091 case TARGET_NR_setns:
11092 ret = get_errno(setns(arg1, arg2));
11093 break;
11094 #endif
11095 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11096 case TARGET_NR_unshare:
11097 ret = get_errno(unshare(arg1));
11098 break;
11099 #endif
11101 default:
11102 unimplemented:
11103 gemu_log("qemu: Unsupported syscall: %d\n", num);
11104 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11105 unimplemented_nowarn:
11106 #endif
11107 ret = -TARGET_ENOSYS;
11108 break;
11110 fail:
11111 #ifdef DEBUG
11112 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11113 #endif
11114 if(do_strace)
11115 print_syscall_ret(num, ret);
11116 return ret;
11117 efault:
11118 ret = -TARGET_EFAULT;
11119 goto fail;