linux-user: Use safe_syscall wrapper for mq_timedsend and mq_timedreceive
[qemu/kevin.git] / linux-user / syscall.c
blob294e5ee23660056ef137652e4e3faa6ebeae87ef
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #endif
108 #include <linux/audit.h>
109 #include "linux_loop.h"
110 #include "uname.h"
112 #include "qemu.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 //#define DEBUG
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
132 #undef _syscall0
133 #undef _syscall1
134 #undef _syscall2
135 #undef _syscall3
136 #undef _syscall4
137 #undef _syscall5
138 #undef _syscall6
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
181 type6 arg6) \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 defined(__s390x__)
201 #define __NR__llseek __NR_lseek
202 #endif
204 /* Newer kernel ports have llseek() instead of _llseek() */
205 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
206 #define TARGET_NR__llseek TARGET_NR_llseek
207 #endif
209 #ifdef __NR_gettid
210 _syscall0(int, gettid)
211 #else
212 /* This is a replacement for the host gettid() and must return a host
213 errno. */
214 static int gettid(void) {
215 return -ENOSYS;
217 #endif
218 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
219 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
220 #endif
221 #if !defined(__NR_getdents) || \
222 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
223 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
224 #endif
225 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
226 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
227 loff_t *, res, uint, wh);
228 #endif
229 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
230 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
239 const struct timespec *,timeout,int *,uaddr2,int,val3)
240 #endif
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
246 unsigned long *, user_mask_ptr);
247 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
248 void *, arg);
249 _syscall2(int, capget, struct __user_cap_header_struct *, header,
250 struct __user_cap_data_struct *, data);
251 _syscall2(int, capset, struct __user_cap_header_struct *, header,
252 struct __user_cap_data_struct *, data);
253 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
254 _syscall2(int, ioprio_get, int, which, int, who)
255 #endif
256 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
257 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
258 #endif
259 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
260 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
261 #endif
263 static bitmask_transtbl fcntl_flags_tbl[] = {
264 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
265 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
266 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
267 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
268 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
269 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
270 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
271 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
272 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
273 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
274 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
275 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
276 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
279 #endif
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
282 #endif
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
285 #endif
286 #if defined(O_PATH)
287 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
288 #endif
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
292 #endif
293 { 0, 0, 0, 0 }
296 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
297 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
298 typedef struct TargetFdTrans {
299 TargetFdDataFunc host_to_target_data;
300 TargetFdDataFunc target_to_host_data;
301 TargetFdAddrFunc target_to_host_addr;
302 } TargetFdTrans;
304 static TargetFdTrans **target_fd_trans;
306 static unsigned int target_fd_max;
308 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
310 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
311 return target_fd_trans[fd]->target_to_host_data;
313 return NULL;
316 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
318 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
319 return target_fd_trans[fd]->host_to_target_data;
321 return NULL;
324 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
326 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
327 return target_fd_trans[fd]->target_to_host_addr;
329 return NULL;
332 static void fd_trans_register(int fd, TargetFdTrans *trans)
334 unsigned int oldmax;
336 if (fd >= target_fd_max) {
337 oldmax = target_fd_max;
338 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
339 target_fd_trans = g_renew(TargetFdTrans *,
340 target_fd_trans, target_fd_max);
341 memset((void *)(target_fd_trans + oldmax), 0,
342 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
344 target_fd_trans[fd] = trans;
347 static void fd_trans_unregister(int fd)
349 if (fd >= 0 && fd < target_fd_max) {
350 target_fd_trans[fd] = NULL;
354 static void fd_trans_dup(int oldfd, int newfd)
356 fd_trans_unregister(newfd);
357 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
358 fd_trans_register(newfd, target_fd_trans[oldfd]);
362 static int sys_getcwd1(char *buf, size_t size)
364 if (getcwd(buf, size) == NULL) {
365 /* getcwd() sets errno */
366 return (-1);
368 return strlen(buf)+1;
371 #ifdef TARGET_NR_utimensat
372 #ifdef CONFIG_UTIMENSAT
373 static int sys_utimensat(int dirfd, const char *pathname,
374 const struct timespec times[2], int flags)
376 if (pathname == NULL)
377 return futimens(dirfd, times);
378 else
379 return utimensat(dirfd, pathname, times, flags);
381 #elif defined(__NR_utimensat)
382 #define __NR_sys_utimensat __NR_utimensat
383 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
384 const struct timespec *,tsp,int,flags)
385 #else
386 static int sys_utimensat(int dirfd, const char *pathname,
387 const struct timespec times[2], int flags)
389 errno = ENOSYS;
390 return -1;
392 #endif
393 #endif /* TARGET_NR_utimensat */
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
401 return (inotify_init());
403 #endif
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
407 return (inotify_add_watch(fd, pathname, mask));
409 #endif
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
413 return (inotify_rm_watch(fd, wd));
415 #endif
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
420 return (inotify_init1(flags));
422 #endif
423 #endif
424 #else
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY */
432 #if defined(TARGET_NR_ppoll)
433 #ifndef __NR_ppoll
434 # define __NR_ppoll -1
435 #endif
436 #define __NR_sys_ppoll __NR_ppoll
437 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
438 struct timespec *, timeout, const sigset_t *, sigmask,
439 size_t, sigsetsize)
440 #endif
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449 uint64_t rlim_cur;
450 uint64_t rlim_max;
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
455 #endif
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
464 int k ;
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
469 return k;
472 return -1;
474 #endif
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env) {
479 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
486 * r3 as arg1 */
487 static inline int regpairs_aligned(void *cpu_env) { return 1; }
488 #else
489 static inline int regpairs_aligned(void *cpu_env) { return 0; }
490 #endif
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
504 [EAGAIN] = TARGET_EAGAIN,
505 [EIDRM] = TARGET_EIDRM,
506 [ECHRNG] = TARGET_ECHRNG,
507 [EL2NSYNC] = TARGET_EL2NSYNC,
508 [EL3HLT] = TARGET_EL3HLT,
509 [EL3RST] = TARGET_EL3RST,
510 [ELNRNG] = TARGET_ELNRNG,
511 [EUNATCH] = TARGET_EUNATCH,
512 [ENOCSI] = TARGET_ENOCSI,
513 [EL2HLT] = TARGET_EL2HLT,
514 [EDEADLK] = TARGET_EDEADLK,
515 [ENOLCK] = TARGET_ENOLCK,
516 [EBADE] = TARGET_EBADE,
517 [EBADR] = TARGET_EBADR,
518 [EXFULL] = TARGET_EXFULL,
519 [ENOANO] = TARGET_ENOANO,
520 [EBADRQC] = TARGET_EBADRQC,
521 [EBADSLT] = TARGET_EBADSLT,
522 [EBFONT] = TARGET_EBFONT,
523 [ENOSTR] = TARGET_ENOSTR,
524 [ENODATA] = TARGET_ENODATA,
525 [ETIME] = TARGET_ETIME,
526 [ENOSR] = TARGET_ENOSR,
527 [ENONET] = TARGET_ENONET,
528 [ENOPKG] = TARGET_ENOPKG,
529 [EREMOTE] = TARGET_EREMOTE,
530 [ENOLINK] = TARGET_ENOLINK,
531 [EADV] = TARGET_EADV,
532 [ESRMNT] = TARGET_ESRMNT,
533 [ECOMM] = TARGET_ECOMM,
534 [EPROTO] = TARGET_EPROTO,
535 [EDOTDOT] = TARGET_EDOTDOT,
536 [EMULTIHOP] = TARGET_EMULTIHOP,
537 [EBADMSG] = TARGET_EBADMSG,
538 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
539 [EOVERFLOW] = TARGET_EOVERFLOW,
540 [ENOTUNIQ] = TARGET_ENOTUNIQ,
541 [EBADFD] = TARGET_EBADFD,
542 [EREMCHG] = TARGET_EREMCHG,
543 [ELIBACC] = TARGET_ELIBACC,
544 [ELIBBAD] = TARGET_ELIBBAD,
545 [ELIBSCN] = TARGET_ELIBSCN,
546 [ELIBMAX] = TARGET_ELIBMAX,
547 [ELIBEXEC] = TARGET_ELIBEXEC,
548 [EILSEQ] = TARGET_EILSEQ,
549 [ENOSYS] = TARGET_ENOSYS,
550 [ELOOP] = TARGET_ELOOP,
551 [ERESTART] = TARGET_ERESTART,
552 [ESTRPIPE] = TARGET_ESTRPIPE,
553 [ENOTEMPTY] = TARGET_ENOTEMPTY,
554 [EUSERS] = TARGET_EUSERS,
555 [ENOTSOCK] = TARGET_ENOTSOCK,
556 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
557 [EMSGSIZE] = TARGET_EMSGSIZE,
558 [EPROTOTYPE] = TARGET_EPROTOTYPE,
559 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
560 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
561 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
562 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
563 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
564 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
565 [EADDRINUSE] = TARGET_EADDRINUSE,
566 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
567 [ENETDOWN] = TARGET_ENETDOWN,
568 [ENETUNREACH] = TARGET_ENETUNREACH,
569 [ENETRESET] = TARGET_ENETRESET,
570 [ECONNABORTED] = TARGET_ECONNABORTED,
571 [ECONNRESET] = TARGET_ECONNRESET,
572 [ENOBUFS] = TARGET_ENOBUFS,
573 [EISCONN] = TARGET_EISCONN,
574 [ENOTCONN] = TARGET_ENOTCONN,
575 [EUCLEAN] = TARGET_EUCLEAN,
576 [ENOTNAM] = TARGET_ENOTNAM,
577 [ENAVAIL] = TARGET_ENAVAIL,
578 [EISNAM] = TARGET_EISNAM,
579 [EREMOTEIO] = TARGET_EREMOTEIO,
580 [ESHUTDOWN] = TARGET_ESHUTDOWN,
581 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
582 [ETIMEDOUT] = TARGET_ETIMEDOUT,
583 [ECONNREFUSED] = TARGET_ECONNREFUSED,
584 [EHOSTDOWN] = TARGET_EHOSTDOWN,
585 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
586 [EALREADY] = TARGET_EALREADY,
587 [EINPROGRESS] = TARGET_EINPROGRESS,
588 [ESTALE] = TARGET_ESTALE,
589 [ECANCELED] = TARGET_ECANCELED,
590 [ENOMEDIUM] = TARGET_ENOMEDIUM,
591 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
592 #ifdef ENOKEY
593 [ENOKEY] = TARGET_ENOKEY,
594 #endif
595 #ifdef EKEYEXPIRED
596 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
597 #endif
598 #ifdef EKEYREVOKED
599 [EKEYREVOKED] = TARGET_EKEYREVOKED,
600 #endif
601 #ifdef EKEYREJECTED
602 [EKEYREJECTED] = TARGET_EKEYREJECTED,
603 #endif
604 #ifdef EOWNERDEAD
605 [EOWNERDEAD] = TARGET_EOWNERDEAD,
606 #endif
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
609 #endif
612 static inline int host_to_target_errno(int err)
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 host_to_target_errno_table[err]) {
616 return host_to_target_errno_table[err];
618 return err;
621 static inline int target_to_host_errno(int err)
623 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
624 target_to_host_errno_table[err]) {
625 return target_to_host_errno_table[err];
627 return err;
630 static inline abi_long get_errno(abi_long ret)
632 if (ret == -1)
633 return -host_to_target_errno(errno);
634 else
635 return ret;
638 static inline int is_error(abi_long ret)
640 return (abi_ulong)ret >= (abi_ulong)(-4096);
643 char *target_strerror(int err)
645 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
646 return NULL;
648 return strerror(target_to_host_errno(err));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
685 type5 arg5) \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
699 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
700 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
701 int, flags, mode_t, mode)
702 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
703 struct rusage *, rusage)
704 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
705 int, options, struct rusage *, rusage)
706 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
707 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
708 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
709 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710 const struct timespec *,timeout,int *,uaddr2,int,val3)
711 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
712 safe_syscall2(int, kill, pid_t, pid, int, sig)
713 safe_syscall2(int, tkill, int, tid, int, sig)
714 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
715 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
717 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
718 socklen_t, addrlen)
719 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
720 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
721 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
722 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
723 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
724 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
725 #ifdef __NR_msgsnd
726 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
727 int, flags)
728 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
729 long, msgtype, int, flags)
730 #else
731 /* This host kernel architecture uses a single ipc syscall; fake up
732 * wrappers for the sub-operations to hide this implementation detail.
733 * Annoyingly we can't include linux/ipc.h to get the constant definitions
734 * for the call parameter because some structs in there conflict with the
735 * sys/ipc.h ones. So we just define them here, and rely on them being
736 * the same for all host architectures.
738 #define Q_MSGSND 11
739 #define Q_MSGRCV 12
740 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
742 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
743 void *, ptr, long, fifth)
744 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
746 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
748 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
750 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
752 #endif
753 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
754 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
755 size_t, len, unsigned, prio, const struct timespec *, timeout)
756 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
757 size_t, len, unsigned *, prio, const struct timespec *, timeout)
758 #endif
760 static inline int host_to_target_sock_type(int host_type)
762 int target_type;
764 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
765 case SOCK_DGRAM:
766 target_type = TARGET_SOCK_DGRAM;
767 break;
768 case SOCK_STREAM:
769 target_type = TARGET_SOCK_STREAM;
770 break;
771 default:
772 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
773 break;
776 #if defined(SOCK_CLOEXEC)
777 if (host_type & SOCK_CLOEXEC) {
778 target_type |= TARGET_SOCK_CLOEXEC;
780 #endif
782 #if defined(SOCK_NONBLOCK)
783 if (host_type & SOCK_NONBLOCK) {
784 target_type |= TARGET_SOCK_NONBLOCK;
786 #endif
788 return target_type;
791 static abi_ulong target_brk;
792 static abi_ulong target_original_brk;
793 static abi_ulong brk_page;
795 void target_set_brk(abi_ulong new_brk)
797 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
798 brk_page = HOST_PAGE_ALIGN(target_brk);
801 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
802 #define DEBUGF_BRK(message, args...)
804 /* do_brk() must return target values and target errnos. */
805 abi_long do_brk(abi_ulong new_brk)
807 abi_long mapped_addr;
808 int new_alloc_size;
810 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
812 if (!new_brk) {
813 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
814 return target_brk;
816 if (new_brk < target_original_brk) {
817 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
818 target_brk);
819 return target_brk;
822 /* If the new brk is less than the highest page reserved to the
823 * target heap allocation, set it and we're almost done... */
824 if (new_brk <= brk_page) {
825 /* Heap contents are initialized to zero, as for anonymous
826 * mapped pages. */
827 if (new_brk > target_brk) {
828 memset(g2h(target_brk), 0, new_brk - target_brk);
830 target_brk = new_brk;
831 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
832 return target_brk;
835 /* We need to allocate more memory after the brk... Note that
836 * we don't use MAP_FIXED because that will map over the top of
837 * any existing mapping (like the one with the host libc or qemu
838 * itself); instead we treat "mapped but at wrong address" as
839 * a failure and unmap again.
841 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
842 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
843 PROT_READ|PROT_WRITE,
844 MAP_ANON|MAP_PRIVATE, 0, 0));
846 if (mapped_addr == brk_page) {
847 /* Heap contents are initialized to zero, as for anonymous
848 * mapped pages. Technically the new pages are already
849 * initialized to zero since they *are* anonymous mapped
850 * pages, however we have to take care with the contents that
851 * come from the remaining part of the previous page: it may
852 * contains garbage data due to a previous heap usage (grown
853 * then shrunken). */
854 memset(g2h(target_brk), 0, brk_page - target_brk);
856 target_brk = new_brk;
857 brk_page = HOST_PAGE_ALIGN(target_brk);
858 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
859 target_brk);
860 return target_brk;
861 } else if (mapped_addr != -1) {
862 /* Mapped but at wrong address, meaning there wasn't actually
863 * enough space for this brk.
865 target_munmap(mapped_addr, new_alloc_size);
866 mapped_addr = -1;
867 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
869 else {
870 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
873 #if defined(TARGET_ALPHA)
874 /* We (partially) emulate OSF/1 on Alpha, which requires we
875 return a proper errno, not an unchanged brk value. */
876 return -TARGET_ENOMEM;
877 #endif
878 /* For everything else, return the previous break. */
879 return target_brk;
882 static inline abi_long copy_from_user_fdset(fd_set *fds,
883 abi_ulong target_fds_addr,
884 int n)
886 int i, nw, j, k;
887 abi_ulong b, *target_fds;
889 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
890 if (!(target_fds = lock_user(VERIFY_READ,
891 target_fds_addr,
892 sizeof(abi_ulong) * nw,
893 1)))
894 return -TARGET_EFAULT;
896 FD_ZERO(fds);
897 k = 0;
898 for (i = 0; i < nw; i++) {
899 /* grab the abi_ulong */
900 __get_user(b, &target_fds[i]);
901 for (j = 0; j < TARGET_ABI_BITS; j++) {
902 /* check the bit inside the abi_ulong */
903 if ((b >> j) & 1)
904 FD_SET(k, fds);
905 k++;
909 unlock_user(target_fds, target_fds_addr, 0);
911 return 0;
914 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
915 abi_ulong target_fds_addr,
916 int n)
918 if (target_fds_addr) {
919 if (copy_from_user_fdset(fds, target_fds_addr, n))
920 return -TARGET_EFAULT;
921 *fds_ptr = fds;
922 } else {
923 *fds_ptr = NULL;
925 return 0;
928 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
929 const fd_set *fds,
930 int n)
932 int i, nw, j, k;
933 abi_long v;
934 abi_ulong *target_fds;
936 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
937 if (!(target_fds = lock_user(VERIFY_WRITE,
938 target_fds_addr,
939 sizeof(abi_ulong) * nw,
940 0)))
941 return -TARGET_EFAULT;
943 k = 0;
944 for (i = 0; i < nw; i++) {
945 v = 0;
946 for (j = 0; j < TARGET_ABI_BITS; j++) {
947 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
948 k++;
950 __put_user(v, &target_fds[i]);
953 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
955 return 0;
958 #if defined(__alpha__)
959 #define HOST_HZ 1024
960 #else
961 #define HOST_HZ 100
962 #endif
964 static inline abi_long host_to_target_clock_t(long ticks)
966 #if HOST_HZ == TARGET_HZ
967 return ticks;
968 #else
969 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
970 #endif
973 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
974 const struct rusage *rusage)
976 struct target_rusage *target_rusage;
978 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
979 return -TARGET_EFAULT;
980 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
981 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
982 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
983 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
984 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
985 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
986 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
987 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
988 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
989 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
990 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
991 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
992 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
993 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
994 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
995 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
996 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
997 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
998 unlock_user_struct(target_rusage, target_addr, 1);
1000 return 0;
1003 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1005 abi_ulong target_rlim_swap;
1006 rlim_t result;
1008 target_rlim_swap = tswapal(target_rlim);
1009 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1010 return RLIM_INFINITY;
1012 result = target_rlim_swap;
1013 if (target_rlim_swap != (rlim_t)result)
1014 return RLIM_INFINITY;
1016 return result;
1019 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1021 abi_ulong target_rlim_swap;
1022 abi_ulong result;
1024 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1025 target_rlim_swap = TARGET_RLIM_INFINITY;
1026 else
1027 target_rlim_swap = rlim;
1028 result = tswapal(target_rlim_swap);
1030 return result;
1033 static inline int target_to_host_resource(int code)
1035 switch (code) {
1036 case TARGET_RLIMIT_AS:
1037 return RLIMIT_AS;
1038 case TARGET_RLIMIT_CORE:
1039 return RLIMIT_CORE;
1040 case TARGET_RLIMIT_CPU:
1041 return RLIMIT_CPU;
1042 case TARGET_RLIMIT_DATA:
1043 return RLIMIT_DATA;
1044 case TARGET_RLIMIT_FSIZE:
1045 return RLIMIT_FSIZE;
1046 case TARGET_RLIMIT_LOCKS:
1047 return RLIMIT_LOCKS;
1048 case TARGET_RLIMIT_MEMLOCK:
1049 return RLIMIT_MEMLOCK;
1050 case TARGET_RLIMIT_MSGQUEUE:
1051 return RLIMIT_MSGQUEUE;
1052 case TARGET_RLIMIT_NICE:
1053 return RLIMIT_NICE;
1054 case TARGET_RLIMIT_NOFILE:
1055 return RLIMIT_NOFILE;
1056 case TARGET_RLIMIT_NPROC:
1057 return RLIMIT_NPROC;
1058 case TARGET_RLIMIT_RSS:
1059 return RLIMIT_RSS;
1060 case TARGET_RLIMIT_RTPRIO:
1061 return RLIMIT_RTPRIO;
1062 case TARGET_RLIMIT_SIGPENDING:
1063 return RLIMIT_SIGPENDING;
1064 case TARGET_RLIMIT_STACK:
1065 return RLIMIT_STACK;
1066 default:
1067 return code;
1071 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1072 abi_ulong target_tv_addr)
1074 struct target_timeval *target_tv;
1076 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1077 return -TARGET_EFAULT;
1079 __get_user(tv->tv_sec, &target_tv->tv_sec);
1080 __get_user(tv->tv_usec, &target_tv->tv_usec);
1082 unlock_user_struct(target_tv, target_tv_addr, 0);
1084 return 0;
1087 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1088 const struct timeval *tv)
1090 struct target_timeval *target_tv;
1092 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1093 return -TARGET_EFAULT;
1095 __put_user(tv->tv_sec, &target_tv->tv_sec);
1096 __put_user(tv->tv_usec, &target_tv->tv_usec);
1098 unlock_user_struct(target_tv, target_tv_addr, 1);
1100 return 0;
1103 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1104 abi_ulong target_tz_addr)
1106 struct target_timezone *target_tz;
1108 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1109 return -TARGET_EFAULT;
1112 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1113 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1115 unlock_user_struct(target_tz, target_tz_addr, 0);
1117 return 0;
1120 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1121 #include <mqueue.h>
1123 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1124 abi_ulong target_mq_attr_addr)
1126 struct target_mq_attr *target_mq_attr;
1128 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1129 target_mq_attr_addr, 1))
1130 return -TARGET_EFAULT;
1132 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1133 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1134 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1135 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1137 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1139 return 0;
1142 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1143 const struct mq_attr *attr)
1145 struct target_mq_attr *target_mq_attr;
1147 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1148 target_mq_attr_addr, 0))
1149 return -TARGET_EFAULT;
1151 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1152 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1153 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1154 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1156 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1158 return 0;
1160 #endif
1162 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1163 /* do_select() must return target values and target errnos. */
1164 static abi_long do_select(int n,
1165 abi_ulong rfd_addr, abi_ulong wfd_addr,
1166 abi_ulong efd_addr, abi_ulong target_tv_addr)
1168 fd_set rfds, wfds, efds;
1169 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1170 struct timeval tv;
1171 struct timespec ts, *ts_ptr;
1172 abi_long ret;
1174 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1175 if (ret) {
1176 return ret;
1178 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1179 if (ret) {
1180 return ret;
1182 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1183 if (ret) {
1184 return ret;
1187 if (target_tv_addr) {
1188 if (copy_from_user_timeval(&tv, target_tv_addr))
1189 return -TARGET_EFAULT;
1190 ts.tv_sec = tv.tv_sec;
1191 ts.tv_nsec = tv.tv_usec * 1000;
1192 ts_ptr = &ts;
1193 } else {
1194 ts_ptr = NULL;
1197 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1198 ts_ptr, NULL));
1200 if (!is_error(ret)) {
1201 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1202 return -TARGET_EFAULT;
1203 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1204 return -TARGET_EFAULT;
1205 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1206 return -TARGET_EFAULT;
1208 if (target_tv_addr) {
1209 tv.tv_sec = ts.tv_sec;
1210 tv.tv_usec = ts.tv_nsec / 1000;
1211 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1212 return -TARGET_EFAULT;
1217 return ret;
1219 #endif
1221 static abi_long do_pipe2(int host_pipe[], int flags)
1223 #ifdef CONFIG_PIPE2
1224 return pipe2(host_pipe, flags);
1225 #else
1226 return -ENOSYS;
1227 #endif
1230 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1231 int flags, int is_pipe2)
1233 int host_pipe[2];
1234 abi_long ret;
1235 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1237 if (is_error(ret))
1238 return get_errno(ret);
1240 /* Several targets have special calling conventions for the original
1241 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1242 if (!is_pipe2) {
1243 #if defined(TARGET_ALPHA)
1244 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1245 return host_pipe[0];
1246 #elif defined(TARGET_MIPS)
1247 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1248 return host_pipe[0];
1249 #elif defined(TARGET_SH4)
1250 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1251 return host_pipe[0];
1252 #elif defined(TARGET_SPARC)
1253 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1254 return host_pipe[0];
1255 #endif
1258 if (put_user_s32(host_pipe[0], pipedes)
1259 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1260 return -TARGET_EFAULT;
1261 return get_errno(ret);
1264 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1265 abi_ulong target_addr,
1266 socklen_t len)
1268 struct target_ip_mreqn *target_smreqn;
1270 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1271 if (!target_smreqn)
1272 return -TARGET_EFAULT;
1273 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1274 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1275 if (len == sizeof(struct target_ip_mreqn))
1276 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1277 unlock_user(target_smreqn, target_addr, 0);
1279 return 0;
1282 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1283 abi_ulong target_addr,
1284 socklen_t len)
1286 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1287 sa_family_t sa_family;
1288 struct target_sockaddr *target_saddr;
1290 if (fd_trans_target_to_host_addr(fd)) {
1291 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1294 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1295 if (!target_saddr)
1296 return -TARGET_EFAULT;
1298 sa_family = tswap16(target_saddr->sa_family);
1300 /* Oops. The caller might send a incomplete sun_path; sun_path
1301 * must be terminated by \0 (see the manual page), but
1302 * unfortunately it is quite common to specify sockaddr_un
1303 * length as "strlen(x->sun_path)" while it should be
1304 * "strlen(...) + 1". We'll fix that here if needed.
1305 * Linux kernel has a similar feature.
1308 if (sa_family == AF_UNIX) {
1309 if (len < unix_maxlen && len > 0) {
1310 char *cp = (char*)target_saddr;
1312 if ( cp[len-1] && !cp[len] )
1313 len++;
1315 if (len > unix_maxlen)
1316 len = unix_maxlen;
1319 memcpy(addr, target_saddr, len);
1320 addr->sa_family = sa_family;
1321 if (sa_family == AF_NETLINK) {
1322 struct sockaddr_nl *nladdr;
1324 nladdr = (struct sockaddr_nl *)addr;
1325 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1326 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1327 } else if (sa_family == AF_PACKET) {
1328 struct target_sockaddr_ll *lladdr;
1330 lladdr = (struct target_sockaddr_ll *)addr;
1331 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1332 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1334 unlock_user(target_saddr, target_addr, 0);
1336 return 0;
1339 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1340 struct sockaddr *addr,
1341 socklen_t len)
1343 struct target_sockaddr *target_saddr;
1345 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1346 if (!target_saddr)
1347 return -TARGET_EFAULT;
1348 memcpy(target_saddr, addr, len);
1349 target_saddr->sa_family = tswap16(addr->sa_family);
1350 if (addr->sa_family == AF_NETLINK) {
1351 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1352 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1353 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1355 unlock_user(target_saddr, target_addr, len);
1357 return 0;
1360 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1361 struct target_msghdr *target_msgh)
1363 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1364 abi_long msg_controllen;
1365 abi_ulong target_cmsg_addr;
1366 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1367 socklen_t space = 0;
1369 msg_controllen = tswapal(target_msgh->msg_controllen);
1370 if (msg_controllen < sizeof (struct target_cmsghdr))
1371 goto the_end;
1372 target_cmsg_addr = tswapal(target_msgh->msg_control);
1373 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1374 target_cmsg_start = target_cmsg;
1375 if (!target_cmsg)
1376 return -TARGET_EFAULT;
1378 while (cmsg && target_cmsg) {
1379 void *data = CMSG_DATA(cmsg);
1380 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1382 int len = tswapal(target_cmsg->cmsg_len)
1383 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1385 space += CMSG_SPACE(len);
1386 if (space > msgh->msg_controllen) {
1387 space -= CMSG_SPACE(len);
1388 /* This is a QEMU bug, since we allocated the payload
1389 * area ourselves (unlike overflow in host-to-target
1390 * conversion, which is just the guest giving us a buffer
1391 * that's too small). It can't happen for the payload types
1392 * we currently support; if it becomes an issue in future
1393 * we would need to improve our allocation strategy to
1394 * something more intelligent than "twice the size of the
1395 * target buffer we're reading from".
1397 gemu_log("Host cmsg overflow\n");
1398 break;
1401 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1402 cmsg->cmsg_level = SOL_SOCKET;
1403 } else {
1404 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1406 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1407 cmsg->cmsg_len = CMSG_LEN(len);
1409 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1410 int *fd = (int *)data;
1411 int *target_fd = (int *)target_data;
1412 int i, numfds = len / sizeof(int);
1414 for (i = 0; i < numfds; i++) {
1415 __get_user(fd[i], target_fd + i);
1417 } else if (cmsg->cmsg_level == SOL_SOCKET
1418 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1419 struct ucred *cred = (struct ucred *)data;
1420 struct target_ucred *target_cred =
1421 (struct target_ucred *)target_data;
1423 __get_user(cred->pid, &target_cred->pid);
1424 __get_user(cred->uid, &target_cred->uid);
1425 __get_user(cred->gid, &target_cred->gid);
1426 } else {
1427 gemu_log("Unsupported ancillary data: %d/%d\n",
1428 cmsg->cmsg_level, cmsg->cmsg_type);
1429 memcpy(data, target_data, len);
1432 cmsg = CMSG_NXTHDR(msgh, cmsg);
1433 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1434 target_cmsg_start);
1436 unlock_user(target_cmsg, target_cmsg_addr, 0);
1437 the_end:
1438 msgh->msg_controllen = space;
1439 return 0;
1442 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1443 struct msghdr *msgh)
1445 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1446 abi_long msg_controllen;
1447 abi_ulong target_cmsg_addr;
1448 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1449 socklen_t space = 0;
1451 msg_controllen = tswapal(target_msgh->msg_controllen);
1452 if (msg_controllen < sizeof (struct target_cmsghdr))
1453 goto the_end;
1454 target_cmsg_addr = tswapal(target_msgh->msg_control);
1455 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1456 target_cmsg_start = target_cmsg;
1457 if (!target_cmsg)
1458 return -TARGET_EFAULT;
1460 while (cmsg && target_cmsg) {
1461 void *data = CMSG_DATA(cmsg);
1462 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1464 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1465 int tgt_len, tgt_space;
1467 /* We never copy a half-header but may copy half-data;
1468 * this is Linux's behaviour in put_cmsg(). Note that
1469 * truncation here is a guest problem (which we report
1470 * to the guest via the CTRUNC bit), unlike truncation
1471 * in target_to_host_cmsg, which is a QEMU bug.
1473 if (msg_controllen < sizeof(struct cmsghdr)) {
1474 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1475 break;
1478 if (cmsg->cmsg_level == SOL_SOCKET) {
1479 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1480 } else {
1481 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1483 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1485 tgt_len = TARGET_CMSG_LEN(len);
1487 /* Payload types which need a different size of payload on
1488 * the target must adjust tgt_len here.
1490 switch (cmsg->cmsg_level) {
1491 case SOL_SOCKET:
1492 switch (cmsg->cmsg_type) {
1493 case SO_TIMESTAMP:
1494 tgt_len = sizeof(struct target_timeval);
1495 break;
1496 default:
1497 break;
1499 default:
1500 break;
1503 if (msg_controllen < tgt_len) {
1504 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1505 tgt_len = msg_controllen;
1508 /* We must now copy-and-convert len bytes of payload
1509 * into tgt_len bytes of destination space. Bear in mind
1510 * that in both source and destination we may be dealing
1511 * with a truncated value!
1513 switch (cmsg->cmsg_level) {
1514 case SOL_SOCKET:
1515 switch (cmsg->cmsg_type) {
1516 case SCM_RIGHTS:
1518 int *fd = (int *)data;
1519 int *target_fd = (int *)target_data;
1520 int i, numfds = tgt_len / sizeof(int);
1522 for (i = 0; i < numfds; i++) {
1523 __put_user(fd[i], target_fd + i);
1525 break;
1527 case SO_TIMESTAMP:
1529 struct timeval *tv = (struct timeval *)data;
1530 struct target_timeval *target_tv =
1531 (struct target_timeval *)target_data;
1533 if (len != sizeof(struct timeval) ||
1534 tgt_len != sizeof(struct target_timeval)) {
1535 goto unimplemented;
1538 /* copy struct timeval to target */
1539 __put_user(tv->tv_sec, &target_tv->tv_sec);
1540 __put_user(tv->tv_usec, &target_tv->tv_usec);
1541 break;
1543 case SCM_CREDENTIALS:
1545 struct ucred *cred = (struct ucred *)data;
1546 struct target_ucred *target_cred =
1547 (struct target_ucred *)target_data;
1549 __put_user(cred->pid, &target_cred->pid);
1550 __put_user(cred->uid, &target_cred->uid);
1551 __put_user(cred->gid, &target_cred->gid);
1552 break;
1554 default:
1555 goto unimplemented;
1557 break;
1559 default:
1560 unimplemented:
1561 gemu_log("Unsupported ancillary data: %d/%d\n",
1562 cmsg->cmsg_level, cmsg->cmsg_type);
1563 memcpy(target_data, data, MIN(len, tgt_len));
1564 if (tgt_len > len) {
1565 memset(target_data + len, 0, tgt_len - len);
1569 target_cmsg->cmsg_len = tswapal(tgt_len);
1570 tgt_space = TARGET_CMSG_SPACE(len);
1571 if (msg_controllen < tgt_space) {
1572 tgt_space = msg_controllen;
1574 msg_controllen -= tgt_space;
1575 space += tgt_space;
1576 cmsg = CMSG_NXTHDR(msgh, cmsg);
1577 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1578 target_cmsg_start);
1580 unlock_user(target_cmsg, target_cmsg_addr, space);
1581 the_end:
1582 target_msgh->msg_controllen = tswapal(space);
1583 return 0;
1586 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1588 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1589 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1590 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1591 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1592 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1595 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1596 size_t len,
1597 abi_long (*host_to_target_nlmsg)
1598 (struct nlmsghdr *))
1600 uint32_t nlmsg_len;
1601 abi_long ret;
1603 while (len > sizeof(struct nlmsghdr)) {
1605 nlmsg_len = nlh->nlmsg_len;
1606 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1607 nlmsg_len > len) {
1608 break;
1611 switch (nlh->nlmsg_type) {
1612 case NLMSG_DONE:
1613 tswap_nlmsghdr(nlh);
1614 return 0;
1615 case NLMSG_NOOP:
1616 break;
1617 case NLMSG_ERROR:
1619 struct nlmsgerr *e = NLMSG_DATA(nlh);
1620 e->error = tswap32(e->error);
1621 tswap_nlmsghdr(&e->msg);
1622 tswap_nlmsghdr(nlh);
1623 return 0;
1625 default:
1626 ret = host_to_target_nlmsg(nlh);
1627 if (ret < 0) {
1628 tswap_nlmsghdr(nlh);
1629 return ret;
1631 break;
1633 tswap_nlmsghdr(nlh);
1634 len -= NLMSG_ALIGN(nlmsg_len);
1635 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1637 return 0;
1640 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1641 size_t len,
1642 abi_long (*target_to_host_nlmsg)
1643 (struct nlmsghdr *))
1645 int ret;
1647 while (len > sizeof(struct nlmsghdr)) {
1648 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1649 tswap32(nlh->nlmsg_len) > len) {
1650 break;
1652 tswap_nlmsghdr(nlh);
1653 switch (nlh->nlmsg_type) {
1654 case NLMSG_DONE:
1655 return 0;
1656 case NLMSG_NOOP:
1657 break;
1658 case NLMSG_ERROR:
1660 struct nlmsgerr *e = NLMSG_DATA(nlh);
1661 e->error = tswap32(e->error);
1662 tswap_nlmsghdr(&e->msg);
1664 default:
1665 ret = target_to_host_nlmsg(nlh);
1666 if (ret < 0) {
1667 return ret;
1670 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1671 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1673 return 0;
1676 #ifdef CONFIG_RTNETLINK
1677 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1678 size_t len,
1679 abi_long (*host_to_target_rtattr)
1680 (struct rtattr *))
1682 unsigned short rta_len;
1683 abi_long ret;
1685 while (len > sizeof(struct rtattr)) {
1686 rta_len = rtattr->rta_len;
1687 if (rta_len < sizeof(struct rtattr) ||
1688 rta_len > len) {
1689 break;
1691 ret = host_to_target_rtattr(rtattr);
1692 rtattr->rta_len = tswap16(rtattr->rta_len);
1693 rtattr->rta_type = tswap16(rtattr->rta_type);
1694 if (ret < 0) {
1695 return ret;
1697 len -= RTA_ALIGN(rta_len);
1698 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1700 return 0;
1703 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1705 uint32_t *u32;
1706 struct rtnl_link_stats *st;
1707 struct rtnl_link_stats64 *st64;
1708 struct rtnl_link_ifmap *map;
1710 switch (rtattr->rta_type) {
1711 /* binary stream */
1712 case IFLA_ADDRESS:
1713 case IFLA_BROADCAST:
1714 /* string */
1715 case IFLA_IFNAME:
1716 case IFLA_QDISC:
1717 break;
1718 /* uin8_t */
1719 case IFLA_OPERSTATE:
1720 case IFLA_LINKMODE:
1721 case IFLA_CARRIER:
1722 case IFLA_PROTO_DOWN:
1723 break;
1724 /* uint32_t */
1725 case IFLA_MTU:
1726 case IFLA_LINK:
1727 case IFLA_WEIGHT:
1728 case IFLA_TXQLEN:
1729 case IFLA_CARRIER_CHANGES:
1730 case IFLA_NUM_RX_QUEUES:
1731 case IFLA_NUM_TX_QUEUES:
1732 case IFLA_PROMISCUITY:
1733 case IFLA_EXT_MASK:
1734 case IFLA_LINK_NETNSID:
1735 case IFLA_GROUP:
1736 case IFLA_MASTER:
1737 case IFLA_NUM_VF:
1738 u32 = RTA_DATA(rtattr);
1739 *u32 = tswap32(*u32);
1740 break;
1741 /* struct rtnl_link_stats */
1742 case IFLA_STATS:
1743 st = RTA_DATA(rtattr);
1744 st->rx_packets = tswap32(st->rx_packets);
1745 st->tx_packets = tswap32(st->tx_packets);
1746 st->rx_bytes = tswap32(st->rx_bytes);
1747 st->tx_bytes = tswap32(st->tx_bytes);
1748 st->rx_errors = tswap32(st->rx_errors);
1749 st->tx_errors = tswap32(st->tx_errors);
1750 st->rx_dropped = tswap32(st->rx_dropped);
1751 st->tx_dropped = tswap32(st->tx_dropped);
1752 st->multicast = tswap32(st->multicast);
1753 st->collisions = tswap32(st->collisions);
1755 /* detailed rx_errors: */
1756 st->rx_length_errors = tswap32(st->rx_length_errors);
1757 st->rx_over_errors = tswap32(st->rx_over_errors);
1758 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1759 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1760 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1761 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1763 /* detailed tx_errors */
1764 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1765 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1766 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1767 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1768 st->tx_window_errors = tswap32(st->tx_window_errors);
1770 /* for cslip etc */
1771 st->rx_compressed = tswap32(st->rx_compressed);
1772 st->tx_compressed = tswap32(st->tx_compressed);
1773 break;
1774 /* struct rtnl_link_stats64 */
1775 case IFLA_STATS64:
1776 st64 = RTA_DATA(rtattr);
1777 st64->rx_packets = tswap64(st64->rx_packets);
1778 st64->tx_packets = tswap64(st64->tx_packets);
1779 st64->rx_bytes = tswap64(st64->rx_bytes);
1780 st64->tx_bytes = tswap64(st64->tx_bytes);
1781 st64->rx_errors = tswap64(st64->rx_errors);
1782 st64->tx_errors = tswap64(st64->tx_errors);
1783 st64->rx_dropped = tswap64(st64->rx_dropped);
1784 st64->tx_dropped = tswap64(st64->tx_dropped);
1785 st64->multicast = tswap64(st64->multicast);
1786 st64->collisions = tswap64(st64->collisions);
1788 /* detailed rx_errors: */
1789 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1790 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1791 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1792 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1793 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1794 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1796 /* detailed tx_errors */
1797 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1798 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1799 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1800 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1801 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1803 /* for cslip etc */
1804 st64->rx_compressed = tswap64(st64->rx_compressed);
1805 st64->tx_compressed = tswap64(st64->tx_compressed);
1806 break;
1807 /* struct rtnl_link_ifmap */
1808 case IFLA_MAP:
1809 map = RTA_DATA(rtattr);
1810 map->mem_start = tswap64(map->mem_start);
1811 map->mem_end = tswap64(map->mem_end);
1812 map->base_addr = tswap64(map->base_addr);
1813 map->irq = tswap16(map->irq);
1814 break;
1815 /* nested */
1816 case IFLA_AF_SPEC:
1817 case IFLA_LINKINFO:
1818 /* FIXME: implement nested type */
1819 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1820 break;
1821 default:
1822 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1823 break;
1825 return 0;
1828 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1830 uint32_t *u32;
1831 struct ifa_cacheinfo *ci;
1833 switch (rtattr->rta_type) {
1834 /* binary: depends on family type */
1835 case IFA_ADDRESS:
1836 case IFA_LOCAL:
1837 break;
1838 /* string */
1839 case IFA_LABEL:
1840 break;
1841 /* u32 */
1842 case IFA_FLAGS:
1843 case IFA_BROADCAST:
1844 u32 = RTA_DATA(rtattr);
1845 *u32 = tswap32(*u32);
1846 break;
1847 /* struct ifa_cacheinfo */
1848 case IFA_CACHEINFO:
1849 ci = RTA_DATA(rtattr);
1850 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1851 ci->ifa_valid = tswap32(ci->ifa_valid);
1852 ci->cstamp = tswap32(ci->cstamp);
1853 ci->tstamp = tswap32(ci->tstamp);
1854 break;
1855 default:
1856 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1857 break;
1859 return 0;
1862 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1864 uint32_t *u32;
1865 switch (rtattr->rta_type) {
1866 /* binary: depends on family type */
1867 case RTA_GATEWAY:
1868 case RTA_DST:
1869 case RTA_PREFSRC:
1870 break;
1871 /* u32 */
1872 case RTA_PRIORITY:
1873 case RTA_TABLE:
1874 case RTA_OIF:
1875 u32 = RTA_DATA(rtattr);
1876 *u32 = tswap32(*u32);
1877 break;
1878 default:
1879 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1880 break;
1882 return 0;
1885 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1886 uint32_t rtattr_len)
1888 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1889 host_to_target_data_link_rtattr);
1892 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1893 uint32_t rtattr_len)
1895 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1896 host_to_target_data_addr_rtattr);
1899 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1900 uint32_t rtattr_len)
1902 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1903 host_to_target_data_route_rtattr);
1906 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1908 uint32_t nlmsg_len;
1909 struct ifinfomsg *ifi;
1910 struct ifaddrmsg *ifa;
1911 struct rtmsg *rtm;
1913 nlmsg_len = nlh->nlmsg_len;
1914 switch (nlh->nlmsg_type) {
1915 case RTM_NEWLINK:
1916 case RTM_DELLINK:
1917 case RTM_GETLINK:
1918 ifi = NLMSG_DATA(nlh);
1919 ifi->ifi_type = tswap16(ifi->ifi_type);
1920 ifi->ifi_index = tswap32(ifi->ifi_index);
1921 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1922 ifi->ifi_change = tswap32(ifi->ifi_change);
1923 host_to_target_link_rtattr(IFLA_RTA(ifi),
1924 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1925 break;
1926 case RTM_NEWADDR:
1927 case RTM_DELADDR:
1928 case RTM_GETADDR:
1929 ifa = NLMSG_DATA(nlh);
1930 ifa->ifa_index = tswap32(ifa->ifa_index);
1931 host_to_target_addr_rtattr(IFA_RTA(ifa),
1932 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1933 break;
1934 case RTM_NEWROUTE:
1935 case RTM_DELROUTE:
1936 case RTM_GETROUTE:
1937 rtm = NLMSG_DATA(nlh);
1938 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1939 host_to_target_route_rtattr(RTM_RTA(rtm),
1940 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1941 break;
1942 default:
1943 return -TARGET_EINVAL;
1945 return 0;
1948 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1949 size_t len)
1951 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1954 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1955 size_t len,
1956 abi_long (*target_to_host_rtattr)
1957 (struct rtattr *))
1959 abi_long ret;
1961 while (len >= sizeof(struct rtattr)) {
1962 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
1963 tswap16(rtattr->rta_len) > len) {
1964 break;
1966 rtattr->rta_len = tswap16(rtattr->rta_len);
1967 rtattr->rta_type = tswap16(rtattr->rta_type);
1968 ret = target_to_host_rtattr(rtattr);
1969 if (ret < 0) {
1970 return ret;
1972 len -= RTA_ALIGN(rtattr->rta_len);
1973 rtattr = (struct rtattr *)(((char *)rtattr) +
1974 RTA_ALIGN(rtattr->rta_len));
1976 return 0;
1979 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
1981 switch (rtattr->rta_type) {
1982 default:
1983 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
1984 break;
1986 return 0;
1989 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
1991 switch (rtattr->rta_type) {
1992 /* binary: depends on family type */
1993 case IFA_LOCAL:
1994 case IFA_ADDRESS:
1995 break;
1996 default:
1997 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
1998 break;
2000 return 0;
2003 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2005 uint32_t *u32;
2006 switch (rtattr->rta_type) {
2007 /* binary: depends on family type */
2008 case RTA_DST:
2009 case RTA_SRC:
2010 case RTA_GATEWAY:
2011 break;
2012 /* u32 */
2013 case RTA_OIF:
2014 u32 = RTA_DATA(rtattr);
2015 *u32 = tswap32(*u32);
2016 break;
2017 default:
2018 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2019 break;
2021 return 0;
2024 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2025 uint32_t rtattr_len)
2027 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2028 target_to_host_data_link_rtattr);
2031 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2032 uint32_t rtattr_len)
2034 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2035 target_to_host_data_addr_rtattr);
2038 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2039 uint32_t rtattr_len)
2041 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2042 target_to_host_data_route_rtattr);
2045 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2047 struct ifinfomsg *ifi;
2048 struct ifaddrmsg *ifa;
2049 struct rtmsg *rtm;
2051 switch (nlh->nlmsg_type) {
2052 case RTM_GETLINK:
2053 break;
2054 case RTM_NEWLINK:
2055 case RTM_DELLINK:
2056 ifi = NLMSG_DATA(nlh);
2057 ifi->ifi_type = tswap16(ifi->ifi_type);
2058 ifi->ifi_index = tswap32(ifi->ifi_index);
2059 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2060 ifi->ifi_change = tswap32(ifi->ifi_change);
2061 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2062 NLMSG_LENGTH(sizeof(*ifi)));
2063 break;
2064 case RTM_GETADDR:
2065 case RTM_NEWADDR:
2066 case RTM_DELADDR:
2067 ifa = NLMSG_DATA(nlh);
2068 ifa->ifa_index = tswap32(ifa->ifa_index);
2069 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2070 NLMSG_LENGTH(sizeof(*ifa)));
2071 break;
2072 case RTM_GETROUTE:
2073 break;
2074 case RTM_NEWROUTE:
2075 case RTM_DELROUTE:
2076 rtm = NLMSG_DATA(nlh);
2077 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2078 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2079 NLMSG_LENGTH(sizeof(*rtm)));
2080 break;
2081 default:
2082 return -TARGET_EOPNOTSUPP;
2084 return 0;
2087 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2089 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2091 #endif /* CONFIG_RTNETLINK */
2093 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2095 switch (nlh->nlmsg_type) {
2096 default:
2097 gemu_log("Unknown host audit message type %d\n",
2098 nlh->nlmsg_type);
2099 return -TARGET_EINVAL;
2101 return 0;
2104 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2105 size_t len)
2107 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2110 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2112 switch (nlh->nlmsg_type) {
2113 case AUDIT_USER:
2114 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2115 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2116 break;
2117 default:
2118 gemu_log("Unknown target audit message type %d\n",
2119 nlh->nlmsg_type);
2120 return -TARGET_EINVAL;
2123 return 0;
2126 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2128 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2131 /* do_setsockopt() Must return target values and target errnos. */
2132 static abi_long do_setsockopt(int sockfd, int level, int optname,
2133 abi_ulong optval_addr, socklen_t optlen)
2135 abi_long ret;
2136 int val;
2137 struct ip_mreqn *ip_mreq;
2138 struct ip_mreq_source *ip_mreq_source;
2140 switch(level) {
2141 case SOL_TCP:
2142 /* TCP options all take an 'int' value. */
2143 if (optlen < sizeof(uint32_t))
2144 return -TARGET_EINVAL;
2146 if (get_user_u32(val, optval_addr))
2147 return -TARGET_EFAULT;
2148 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2149 break;
2150 case SOL_IP:
2151 switch(optname) {
2152 case IP_TOS:
2153 case IP_TTL:
2154 case IP_HDRINCL:
2155 case IP_ROUTER_ALERT:
2156 case IP_RECVOPTS:
2157 case IP_RETOPTS:
2158 case IP_PKTINFO:
2159 case IP_MTU_DISCOVER:
2160 case IP_RECVERR:
2161 case IP_RECVTOS:
2162 #ifdef IP_FREEBIND
2163 case IP_FREEBIND:
2164 #endif
2165 case IP_MULTICAST_TTL:
2166 case IP_MULTICAST_LOOP:
2167 val = 0;
2168 if (optlen >= sizeof(uint32_t)) {
2169 if (get_user_u32(val, optval_addr))
2170 return -TARGET_EFAULT;
2171 } else if (optlen >= 1) {
2172 if (get_user_u8(val, optval_addr))
2173 return -TARGET_EFAULT;
2175 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2176 break;
2177 case IP_ADD_MEMBERSHIP:
2178 case IP_DROP_MEMBERSHIP:
2179 if (optlen < sizeof (struct target_ip_mreq) ||
2180 optlen > sizeof (struct target_ip_mreqn))
2181 return -TARGET_EINVAL;
2183 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2184 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2185 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2186 break;
2188 case IP_BLOCK_SOURCE:
2189 case IP_UNBLOCK_SOURCE:
2190 case IP_ADD_SOURCE_MEMBERSHIP:
2191 case IP_DROP_SOURCE_MEMBERSHIP:
2192 if (optlen != sizeof (struct target_ip_mreq_source))
2193 return -TARGET_EINVAL;
2195 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2196 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2197 unlock_user (ip_mreq_source, optval_addr, 0);
2198 break;
2200 default:
2201 goto unimplemented;
2203 break;
2204 case SOL_IPV6:
2205 switch (optname) {
2206 case IPV6_MTU_DISCOVER:
2207 case IPV6_MTU:
2208 case IPV6_V6ONLY:
2209 case IPV6_RECVPKTINFO:
2210 val = 0;
2211 if (optlen < sizeof(uint32_t)) {
2212 return -TARGET_EINVAL;
2214 if (get_user_u32(val, optval_addr)) {
2215 return -TARGET_EFAULT;
2217 ret = get_errno(setsockopt(sockfd, level, optname,
2218 &val, sizeof(val)));
2219 break;
2220 default:
2221 goto unimplemented;
2223 break;
2224 case SOL_RAW:
2225 switch (optname) {
2226 case ICMP_FILTER:
2227 /* struct icmp_filter takes an u32 value */
2228 if (optlen < sizeof(uint32_t)) {
2229 return -TARGET_EINVAL;
2232 if (get_user_u32(val, optval_addr)) {
2233 return -TARGET_EFAULT;
2235 ret = get_errno(setsockopt(sockfd, level, optname,
2236 &val, sizeof(val)));
2237 break;
2239 default:
2240 goto unimplemented;
2242 break;
2243 case TARGET_SOL_SOCKET:
2244 switch (optname) {
2245 case TARGET_SO_RCVTIMEO:
2247 struct timeval tv;
2249 optname = SO_RCVTIMEO;
2251 set_timeout:
2252 if (optlen != sizeof(struct target_timeval)) {
2253 return -TARGET_EINVAL;
2256 if (copy_from_user_timeval(&tv, optval_addr)) {
2257 return -TARGET_EFAULT;
2260 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2261 &tv, sizeof(tv)));
2262 return ret;
2264 case TARGET_SO_SNDTIMEO:
2265 optname = SO_SNDTIMEO;
2266 goto set_timeout;
2267 case TARGET_SO_ATTACH_FILTER:
2269 struct target_sock_fprog *tfprog;
2270 struct target_sock_filter *tfilter;
2271 struct sock_fprog fprog;
2272 struct sock_filter *filter;
2273 int i;
2275 if (optlen != sizeof(*tfprog)) {
2276 return -TARGET_EINVAL;
2278 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2279 return -TARGET_EFAULT;
2281 if (!lock_user_struct(VERIFY_READ, tfilter,
2282 tswapal(tfprog->filter), 0)) {
2283 unlock_user_struct(tfprog, optval_addr, 1);
2284 return -TARGET_EFAULT;
2287 fprog.len = tswap16(tfprog->len);
2288 filter = g_try_new(struct sock_filter, fprog.len);
2289 if (filter == NULL) {
2290 unlock_user_struct(tfilter, tfprog->filter, 1);
2291 unlock_user_struct(tfprog, optval_addr, 1);
2292 return -TARGET_ENOMEM;
2294 for (i = 0; i < fprog.len; i++) {
2295 filter[i].code = tswap16(tfilter[i].code);
2296 filter[i].jt = tfilter[i].jt;
2297 filter[i].jf = tfilter[i].jf;
2298 filter[i].k = tswap32(tfilter[i].k);
2300 fprog.filter = filter;
2302 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2303 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2304 g_free(filter);
2306 unlock_user_struct(tfilter, tfprog->filter, 1);
2307 unlock_user_struct(tfprog, optval_addr, 1);
2308 return ret;
2310 case TARGET_SO_BINDTODEVICE:
2312 char *dev_ifname, *addr_ifname;
2314 if (optlen > IFNAMSIZ - 1) {
2315 optlen = IFNAMSIZ - 1;
2317 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2318 if (!dev_ifname) {
2319 return -TARGET_EFAULT;
2321 optname = SO_BINDTODEVICE;
2322 addr_ifname = alloca(IFNAMSIZ);
2323 memcpy(addr_ifname, dev_ifname, optlen);
2324 addr_ifname[optlen] = 0;
2325 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2326 addr_ifname, optlen));
2327 unlock_user (dev_ifname, optval_addr, 0);
2328 return ret;
2330 /* Options with 'int' argument. */
2331 case TARGET_SO_DEBUG:
2332 optname = SO_DEBUG;
2333 break;
2334 case TARGET_SO_REUSEADDR:
2335 optname = SO_REUSEADDR;
2336 break;
2337 case TARGET_SO_TYPE:
2338 optname = SO_TYPE;
2339 break;
2340 case TARGET_SO_ERROR:
2341 optname = SO_ERROR;
2342 break;
2343 case TARGET_SO_DONTROUTE:
2344 optname = SO_DONTROUTE;
2345 break;
2346 case TARGET_SO_BROADCAST:
2347 optname = SO_BROADCAST;
2348 break;
2349 case TARGET_SO_SNDBUF:
2350 optname = SO_SNDBUF;
2351 break;
2352 case TARGET_SO_SNDBUFFORCE:
2353 optname = SO_SNDBUFFORCE;
2354 break;
2355 case TARGET_SO_RCVBUF:
2356 optname = SO_RCVBUF;
2357 break;
2358 case TARGET_SO_RCVBUFFORCE:
2359 optname = SO_RCVBUFFORCE;
2360 break;
2361 case TARGET_SO_KEEPALIVE:
2362 optname = SO_KEEPALIVE;
2363 break;
2364 case TARGET_SO_OOBINLINE:
2365 optname = SO_OOBINLINE;
2366 break;
2367 case TARGET_SO_NO_CHECK:
2368 optname = SO_NO_CHECK;
2369 break;
2370 case TARGET_SO_PRIORITY:
2371 optname = SO_PRIORITY;
2372 break;
2373 #ifdef SO_BSDCOMPAT
2374 case TARGET_SO_BSDCOMPAT:
2375 optname = SO_BSDCOMPAT;
2376 break;
2377 #endif
2378 case TARGET_SO_PASSCRED:
2379 optname = SO_PASSCRED;
2380 break;
2381 case TARGET_SO_PASSSEC:
2382 optname = SO_PASSSEC;
2383 break;
2384 case TARGET_SO_TIMESTAMP:
2385 optname = SO_TIMESTAMP;
2386 break;
2387 case TARGET_SO_RCVLOWAT:
2388 optname = SO_RCVLOWAT;
2389 break;
2390 break;
2391 default:
2392 goto unimplemented;
2394 if (optlen < sizeof(uint32_t))
2395 return -TARGET_EINVAL;
2397 if (get_user_u32(val, optval_addr))
2398 return -TARGET_EFAULT;
2399 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2400 break;
2401 default:
2402 unimplemented:
2403 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2404 ret = -TARGET_ENOPROTOOPT;
2406 return ret;
2409 /* do_getsockopt() Must return target values and target errnos. */
2410 static abi_long do_getsockopt(int sockfd, int level, int optname,
2411 abi_ulong optval_addr, abi_ulong optlen)
2413 abi_long ret;
2414 int len, val;
2415 socklen_t lv;
2417 switch(level) {
2418 case TARGET_SOL_SOCKET:
2419 level = SOL_SOCKET;
2420 switch (optname) {
2421 /* These don't just return a single integer */
2422 case TARGET_SO_LINGER:
2423 case TARGET_SO_RCVTIMEO:
2424 case TARGET_SO_SNDTIMEO:
2425 case TARGET_SO_PEERNAME:
2426 goto unimplemented;
2427 case TARGET_SO_PEERCRED: {
2428 struct ucred cr;
2429 socklen_t crlen;
2430 struct target_ucred *tcr;
2432 if (get_user_u32(len, optlen)) {
2433 return -TARGET_EFAULT;
2435 if (len < 0) {
2436 return -TARGET_EINVAL;
2439 crlen = sizeof(cr);
2440 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2441 &cr, &crlen));
2442 if (ret < 0) {
2443 return ret;
2445 if (len > crlen) {
2446 len = crlen;
2448 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2449 return -TARGET_EFAULT;
2451 __put_user(cr.pid, &tcr->pid);
2452 __put_user(cr.uid, &tcr->uid);
2453 __put_user(cr.gid, &tcr->gid);
2454 unlock_user_struct(tcr, optval_addr, 1);
2455 if (put_user_u32(len, optlen)) {
2456 return -TARGET_EFAULT;
2458 break;
2460 /* Options with 'int' argument. */
2461 case TARGET_SO_DEBUG:
2462 optname = SO_DEBUG;
2463 goto int_case;
2464 case TARGET_SO_REUSEADDR:
2465 optname = SO_REUSEADDR;
2466 goto int_case;
2467 case TARGET_SO_TYPE:
2468 optname = SO_TYPE;
2469 goto int_case;
2470 case TARGET_SO_ERROR:
2471 optname = SO_ERROR;
2472 goto int_case;
2473 case TARGET_SO_DONTROUTE:
2474 optname = SO_DONTROUTE;
2475 goto int_case;
2476 case TARGET_SO_BROADCAST:
2477 optname = SO_BROADCAST;
2478 goto int_case;
2479 case TARGET_SO_SNDBUF:
2480 optname = SO_SNDBUF;
2481 goto int_case;
2482 case TARGET_SO_RCVBUF:
2483 optname = SO_RCVBUF;
2484 goto int_case;
2485 case TARGET_SO_KEEPALIVE:
2486 optname = SO_KEEPALIVE;
2487 goto int_case;
2488 case TARGET_SO_OOBINLINE:
2489 optname = SO_OOBINLINE;
2490 goto int_case;
2491 case TARGET_SO_NO_CHECK:
2492 optname = SO_NO_CHECK;
2493 goto int_case;
2494 case TARGET_SO_PRIORITY:
2495 optname = SO_PRIORITY;
2496 goto int_case;
2497 #ifdef SO_BSDCOMPAT
2498 case TARGET_SO_BSDCOMPAT:
2499 optname = SO_BSDCOMPAT;
2500 goto int_case;
2501 #endif
2502 case TARGET_SO_PASSCRED:
2503 optname = SO_PASSCRED;
2504 goto int_case;
2505 case TARGET_SO_TIMESTAMP:
2506 optname = SO_TIMESTAMP;
2507 goto int_case;
2508 case TARGET_SO_RCVLOWAT:
2509 optname = SO_RCVLOWAT;
2510 goto int_case;
2511 case TARGET_SO_ACCEPTCONN:
2512 optname = SO_ACCEPTCONN;
2513 goto int_case;
2514 default:
2515 goto int_case;
2517 break;
2518 case SOL_TCP:
2519 /* TCP options all take an 'int' value. */
2520 int_case:
2521 if (get_user_u32(len, optlen))
2522 return -TARGET_EFAULT;
2523 if (len < 0)
2524 return -TARGET_EINVAL;
2525 lv = sizeof(lv);
2526 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2527 if (ret < 0)
2528 return ret;
2529 if (optname == SO_TYPE) {
2530 val = host_to_target_sock_type(val);
2532 if (len > lv)
2533 len = lv;
2534 if (len == 4) {
2535 if (put_user_u32(val, optval_addr))
2536 return -TARGET_EFAULT;
2537 } else {
2538 if (put_user_u8(val, optval_addr))
2539 return -TARGET_EFAULT;
2541 if (put_user_u32(len, optlen))
2542 return -TARGET_EFAULT;
2543 break;
2544 case SOL_IP:
2545 switch(optname) {
2546 case IP_TOS:
2547 case IP_TTL:
2548 case IP_HDRINCL:
2549 case IP_ROUTER_ALERT:
2550 case IP_RECVOPTS:
2551 case IP_RETOPTS:
2552 case IP_PKTINFO:
2553 case IP_MTU_DISCOVER:
2554 case IP_RECVERR:
2555 case IP_RECVTOS:
2556 #ifdef IP_FREEBIND
2557 case IP_FREEBIND:
2558 #endif
2559 case IP_MULTICAST_TTL:
2560 case IP_MULTICAST_LOOP:
2561 if (get_user_u32(len, optlen))
2562 return -TARGET_EFAULT;
2563 if (len < 0)
2564 return -TARGET_EINVAL;
2565 lv = sizeof(lv);
2566 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2567 if (ret < 0)
2568 return ret;
2569 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2570 len = 1;
2571 if (put_user_u32(len, optlen)
2572 || put_user_u8(val, optval_addr))
2573 return -TARGET_EFAULT;
2574 } else {
2575 if (len > sizeof(int))
2576 len = sizeof(int);
2577 if (put_user_u32(len, optlen)
2578 || put_user_u32(val, optval_addr))
2579 return -TARGET_EFAULT;
2581 break;
2582 default:
2583 ret = -TARGET_ENOPROTOOPT;
2584 break;
2586 break;
2587 default:
2588 unimplemented:
2589 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2590 level, optname);
2591 ret = -TARGET_EOPNOTSUPP;
2592 break;
2594 return ret;
2597 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2598 int count, int copy)
2600 struct target_iovec *target_vec;
2601 struct iovec *vec;
2602 abi_ulong total_len, max_len;
2603 int i;
2604 int err = 0;
2605 bool bad_address = false;
2607 if (count == 0) {
2608 errno = 0;
2609 return NULL;
2611 if (count < 0 || count > IOV_MAX) {
2612 errno = EINVAL;
2613 return NULL;
2616 vec = g_try_new0(struct iovec, count);
2617 if (vec == NULL) {
2618 errno = ENOMEM;
2619 return NULL;
2622 target_vec = lock_user(VERIFY_READ, target_addr,
2623 count * sizeof(struct target_iovec), 1);
2624 if (target_vec == NULL) {
2625 err = EFAULT;
2626 goto fail2;
2629 /* ??? If host page size > target page size, this will result in a
2630 value larger than what we can actually support. */
2631 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2632 total_len = 0;
2634 for (i = 0; i < count; i++) {
2635 abi_ulong base = tswapal(target_vec[i].iov_base);
2636 abi_long len = tswapal(target_vec[i].iov_len);
2638 if (len < 0) {
2639 err = EINVAL;
2640 goto fail;
2641 } else if (len == 0) {
2642 /* Zero length pointer is ignored. */
2643 vec[i].iov_base = 0;
2644 } else {
2645 vec[i].iov_base = lock_user(type, base, len, copy);
2646 /* If the first buffer pointer is bad, this is a fault. But
2647 * subsequent bad buffers will result in a partial write; this
2648 * is realized by filling the vector with null pointers and
2649 * zero lengths. */
2650 if (!vec[i].iov_base) {
2651 if (i == 0) {
2652 err = EFAULT;
2653 goto fail;
2654 } else {
2655 bad_address = true;
2658 if (bad_address) {
2659 len = 0;
2661 if (len > max_len - total_len) {
2662 len = max_len - total_len;
2665 vec[i].iov_len = len;
2666 total_len += len;
2669 unlock_user(target_vec, target_addr, 0);
2670 return vec;
2672 fail:
2673 while (--i >= 0) {
2674 if (tswapal(target_vec[i].iov_len) > 0) {
2675 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2678 unlock_user(target_vec, target_addr, 0);
2679 fail2:
2680 g_free(vec);
2681 errno = err;
2682 return NULL;
2685 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2686 int count, int copy)
2688 struct target_iovec *target_vec;
2689 int i;
2691 target_vec = lock_user(VERIFY_READ, target_addr,
2692 count * sizeof(struct target_iovec), 1);
2693 if (target_vec) {
2694 for (i = 0; i < count; i++) {
2695 abi_ulong base = tswapal(target_vec[i].iov_base);
2696 abi_long len = tswapal(target_vec[i].iov_len);
2697 if (len < 0) {
2698 break;
2700 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2702 unlock_user(target_vec, target_addr, 0);
2705 g_free(vec);
2708 static inline int target_to_host_sock_type(int *type)
2710 int host_type = 0;
2711 int target_type = *type;
2713 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2714 case TARGET_SOCK_DGRAM:
2715 host_type = SOCK_DGRAM;
2716 break;
2717 case TARGET_SOCK_STREAM:
2718 host_type = SOCK_STREAM;
2719 break;
2720 default:
2721 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2722 break;
2724 if (target_type & TARGET_SOCK_CLOEXEC) {
2725 #if defined(SOCK_CLOEXEC)
2726 host_type |= SOCK_CLOEXEC;
2727 #else
2728 return -TARGET_EINVAL;
2729 #endif
2731 if (target_type & TARGET_SOCK_NONBLOCK) {
2732 #if defined(SOCK_NONBLOCK)
2733 host_type |= SOCK_NONBLOCK;
2734 #elif !defined(O_NONBLOCK)
2735 return -TARGET_EINVAL;
2736 #endif
2738 *type = host_type;
2739 return 0;
2742 /* Try to emulate socket type flags after socket creation. */
2743 static int sock_flags_fixup(int fd, int target_type)
2745 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2746 if (target_type & TARGET_SOCK_NONBLOCK) {
2747 int flags = fcntl(fd, F_GETFL);
2748 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2749 close(fd);
2750 return -TARGET_EINVAL;
2753 #endif
2754 return fd;
2757 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2758 abi_ulong target_addr,
2759 socklen_t len)
2761 struct sockaddr *addr = host_addr;
2762 struct target_sockaddr *target_saddr;
2764 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2765 if (!target_saddr) {
2766 return -TARGET_EFAULT;
2769 memcpy(addr, target_saddr, len);
2770 addr->sa_family = tswap16(target_saddr->sa_family);
2771 /* spkt_protocol is big-endian */
2773 unlock_user(target_saddr, target_addr, 0);
2774 return 0;
2777 static TargetFdTrans target_packet_trans = {
2778 .target_to_host_addr = packet_target_to_host_sockaddr,
2781 #ifdef CONFIG_RTNETLINK
2782 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2784 return target_to_host_nlmsg_route(buf, len);
2787 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2789 return host_to_target_nlmsg_route(buf, len);
2792 static TargetFdTrans target_netlink_route_trans = {
2793 .target_to_host_data = netlink_route_target_to_host,
2794 .host_to_target_data = netlink_route_host_to_target,
2796 #endif /* CONFIG_RTNETLINK */
2798 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2800 return target_to_host_nlmsg_audit(buf, len);
2803 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2805 return host_to_target_nlmsg_audit(buf, len);
2808 static TargetFdTrans target_netlink_audit_trans = {
2809 .target_to_host_data = netlink_audit_target_to_host,
2810 .host_to_target_data = netlink_audit_host_to_target,
2813 /* do_socket() Must return target values and target errnos. */
2814 static abi_long do_socket(int domain, int type, int protocol)
2816 int target_type = type;
2817 int ret;
2819 ret = target_to_host_sock_type(&type);
2820 if (ret) {
2821 return ret;
2824 if (domain == PF_NETLINK && !(
2825 #ifdef CONFIG_RTNETLINK
2826 protocol == NETLINK_ROUTE ||
2827 #endif
2828 protocol == NETLINK_KOBJECT_UEVENT ||
2829 protocol == NETLINK_AUDIT)) {
2830 return -EPFNOSUPPORT;
2833 if (domain == AF_PACKET ||
2834 (domain == AF_INET && type == SOCK_PACKET)) {
2835 protocol = tswap16(protocol);
2838 ret = get_errno(socket(domain, type, protocol));
2839 if (ret >= 0) {
2840 ret = sock_flags_fixup(ret, target_type);
2841 if (type == SOCK_PACKET) {
2842 /* Manage an obsolete case :
2843 * if socket type is SOCK_PACKET, bind by name
2845 fd_trans_register(ret, &target_packet_trans);
2846 } else if (domain == PF_NETLINK) {
2847 switch (protocol) {
2848 #ifdef CONFIG_RTNETLINK
2849 case NETLINK_ROUTE:
2850 fd_trans_register(ret, &target_netlink_route_trans);
2851 break;
2852 #endif
2853 case NETLINK_KOBJECT_UEVENT:
2854 /* nothing to do: messages are strings */
2855 break;
2856 case NETLINK_AUDIT:
2857 fd_trans_register(ret, &target_netlink_audit_trans);
2858 break;
2859 default:
2860 g_assert_not_reached();
2864 return ret;
2867 /* do_bind() Must return target values and target errnos. */
2868 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2869 socklen_t addrlen)
2871 void *addr;
2872 abi_long ret;
2874 if ((int)addrlen < 0) {
2875 return -TARGET_EINVAL;
2878 addr = alloca(addrlen+1);
2880 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2881 if (ret)
2882 return ret;
2884 return get_errno(bind(sockfd, addr, addrlen));
2887 /* do_connect() Must return target values and target errnos. */
2888 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2889 socklen_t addrlen)
2891 void *addr;
2892 abi_long ret;
2894 if ((int)addrlen < 0) {
2895 return -TARGET_EINVAL;
2898 addr = alloca(addrlen+1);
2900 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2901 if (ret)
2902 return ret;
2904 return get_errno(safe_connect(sockfd, addr, addrlen));
2907 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2908 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2909 int flags, int send)
2911 abi_long ret, len;
2912 struct msghdr msg;
2913 int count;
2914 struct iovec *vec;
2915 abi_ulong target_vec;
2917 if (msgp->msg_name) {
2918 msg.msg_namelen = tswap32(msgp->msg_namelen);
2919 msg.msg_name = alloca(msg.msg_namelen+1);
2920 ret = target_to_host_sockaddr(fd, msg.msg_name,
2921 tswapal(msgp->msg_name),
2922 msg.msg_namelen);
2923 if (ret) {
2924 goto out2;
2926 } else {
2927 msg.msg_name = NULL;
2928 msg.msg_namelen = 0;
2930 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2931 msg.msg_control = alloca(msg.msg_controllen);
2932 msg.msg_flags = tswap32(msgp->msg_flags);
2934 count = tswapal(msgp->msg_iovlen);
2935 target_vec = tswapal(msgp->msg_iov);
2936 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2937 target_vec, count, send);
2938 if (vec == NULL) {
2939 ret = -host_to_target_errno(errno);
2940 goto out2;
2942 msg.msg_iovlen = count;
2943 msg.msg_iov = vec;
2945 if (send) {
2946 if (fd_trans_target_to_host_data(fd)) {
2947 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2948 msg.msg_iov->iov_len);
2949 } else {
2950 ret = target_to_host_cmsg(&msg, msgp);
2952 if (ret == 0) {
2953 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2955 } else {
2956 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2957 if (!is_error(ret)) {
2958 len = ret;
2959 if (fd_trans_host_to_target_data(fd)) {
2960 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2961 msg.msg_iov->iov_len);
2962 } else {
2963 ret = host_to_target_cmsg(msgp, &msg);
2965 if (!is_error(ret)) {
2966 msgp->msg_namelen = tswap32(msg.msg_namelen);
2967 if (msg.msg_name != NULL) {
2968 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2969 msg.msg_name, msg.msg_namelen);
2970 if (ret) {
2971 goto out;
2975 ret = len;
2980 out:
2981 unlock_iovec(vec, target_vec, count, !send);
2982 out2:
2983 return ret;
2986 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2987 int flags, int send)
2989 abi_long ret;
2990 struct target_msghdr *msgp;
2992 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2993 msgp,
2994 target_msg,
2995 send ? 1 : 0)) {
2996 return -TARGET_EFAULT;
2998 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2999 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3000 return ret;
3003 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3004 * so it might not have this *mmsg-specific flag either.
3006 #ifndef MSG_WAITFORONE
3007 #define MSG_WAITFORONE 0x10000
3008 #endif
3010 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3011 unsigned int vlen, unsigned int flags,
3012 int send)
3014 struct target_mmsghdr *mmsgp;
3015 abi_long ret = 0;
3016 int i;
3018 if (vlen > UIO_MAXIOV) {
3019 vlen = UIO_MAXIOV;
3022 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3023 if (!mmsgp) {
3024 return -TARGET_EFAULT;
3027 for (i = 0; i < vlen; i++) {
3028 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3029 if (is_error(ret)) {
3030 break;
3032 mmsgp[i].msg_len = tswap32(ret);
3033 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3034 if (flags & MSG_WAITFORONE) {
3035 flags |= MSG_DONTWAIT;
3039 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3041 /* Return number of datagrams sent if we sent any at all;
3042 * otherwise return the error.
3044 if (i) {
3045 return i;
3047 return ret;
3050 /* If we don't have a system accept4() then just call accept.
3051 * The callsites to do_accept4() will ensure that they don't
3052 * pass a non-zero flags argument in this config.
3054 #ifndef CONFIG_ACCEPT4
3055 static inline int accept4(int sockfd, struct sockaddr *addr,
3056 socklen_t *addrlen, int flags)
3058 assert(flags == 0);
3059 return accept(sockfd, addr, addrlen);
3061 #endif
3063 /* do_accept4() Must return target values and target errnos. */
3064 static abi_long do_accept4(int fd, abi_ulong target_addr,
3065 abi_ulong target_addrlen_addr, int flags)
3067 socklen_t addrlen;
3068 void *addr;
3069 abi_long ret;
3070 int host_flags;
3072 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3074 if (target_addr == 0) {
3075 return get_errno(accept4(fd, NULL, NULL, host_flags));
3078 /* linux returns EINVAL if addrlen pointer is invalid */
3079 if (get_user_u32(addrlen, target_addrlen_addr))
3080 return -TARGET_EINVAL;
3082 if ((int)addrlen < 0) {
3083 return -TARGET_EINVAL;
3086 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3087 return -TARGET_EINVAL;
3089 addr = alloca(addrlen);
3091 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
3092 if (!is_error(ret)) {
3093 host_to_target_sockaddr(target_addr, addr, addrlen);
3094 if (put_user_u32(addrlen, target_addrlen_addr))
3095 ret = -TARGET_EFAULT;
3097 return ret;
3100 /* do_getpeername() Must return target values and target errnos. */
3101 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3102 abi_ulong target_addrlen_addr)
3104 socklen_t addrlen;
3105 void *addr;
3106 abi_long ret;
3108 if (get_user_u32(addrlen, target_addrlen_addr))
3109 return -TARGET_EFAULT;
3111 if ((int)addrlen < 0) {
3112 return -TARGET_EINVAL;
3115 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3116 return -TARGET_EFAULT;
3118 addr = alloca(addrlen);
3120 ret = get_errno(getpeername(fd, addr, &addrlen));
3121 if (!is_error(ret)) {
3122 host_to_target_sockaddr(target_addr, addr, addrlen);
3123 if (put_user_u32(addrlen, target_addrlen_addr))
3124 ret = -TARGET_EFAULT;
3126 return ret;
3129 /* do_getsockname() Must return target values and target errnos. */
3130 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3131 abi_ulong target_addrlen_addr)
3133 socklen_t addrlen;
3134 void *addr;
3135 abi_long ret;
3137 if (get_user_u32(addrlen, target_addrlen_addr))
3138 return -TARGET_EFAULT;
3140 if ((int)addrlen < 0) {
3141 return -TARGET_EINVAL;
3144 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3145 return -TARGET_EFAULT;
3147 addr = alloca(addrlen);
3149 ret = get_errno(getsockname(fd, addr, &addrlen));
3150 if (!is_error(ret)) {
3151 host_to_target_sockaddr(target_addr, addr, addrlen);
3152 if (put_user_u32(addrlen, target_addrlen_addr))
3153 ret = -TARGET_EFAULT;
3155 return ret;
3158 /* do_socketpair() Must return target values and target errnos. */
3159 static abi_long do_socketpair(int domain, int type, int protocol,
3160 abi_ulong target_tab_addr)
3162 int tab[2];
3163 abi_long ret;
3165 target_to_host_sock_type(&type);
3167 ret = get_errno(socketpair(domain, type, protocol, tab));
3168 if (!is_error(ret)) {
3169 if (put_user_s32(tab[0], target_tab_addr)
3170 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3171 ret = -TARGET_EFAULT;
3173 return ret;
3176 /* do_sendto() Must return target values and target errnos. */
3177 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3178 abi_ulong target_addr, socklen_t addrlen)
3180 void *addr;
3181 void *host_msg;
3182 abi_long ret;
3184 if ((int)addrlen < 0) {
3185 return -TARGET_EINVAL;
3188 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3189 if (!host_msg)
3190 return -TARGET_EFAULT;
3191 if (fd_trans_target_to_host_data(fd)) {
3192 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3193 if (ret < 0) {
3194 unlock_user(host_msg, msg, 0);
3195 return ret;
3198 if (target_addr) {
3199 addr = alloca(addrlen+1);
3200 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3201 if (ret) {
3202 unlock_user(host_msg, msg, 0);
3203 return ret;
3205 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3206 } else {
3207 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3209 unlock_user(host_msg, msg, 0);
3210 return ret;
3213 /* do_recvfrom() Must return target values and target errnos. */
3214 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3215 abi_ulong target_addr,
3216 abi_ulong target_addrlen)
3218 socklen_t addrlen;
3219 void *addr;
3220 void *host_msg;
3221 abi_long ret;
3223 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3224 if (!host_msg)
3225 return -TARGET_EFAULT;
3226 if (target_addr) {
3227 if (get_user_u32(addrlen, target_addrlen)) {
3228 ret = -TARGET_EFAULT;
3229 goto fail;
3231 if ((int)addrlen < 0) {
3232 ret = -TARGET_EINVAL;
3233 goto fail;
3235 addr = alloca(addrlen);
3236 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3237 addr, &addrlen));
3238 } else {
3239 addr = NULL; /* To keep compiler quiet. */
3240 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3242 if (!is_error(ret)) {
3243 if (target_addr) {
3244 host_to_target_sockaddr(target_addr, addr, addrlen);
3245 if (put_user_u32(addrlen, target_addrlen)) {
3246 ret = -TARGET_EFAULT;
3247 goto fail;
3250 unlock_user(host_msg, msg, len);
3251 } else {
3252 fail:
3253 unlock_user(host_msg, msg, 0);
3255 return ret;
3258 #ifdef TARGET_NR_socketcall
3259 /* do_socketcall() Must return target values and target errnos. */
3260 static abi_long do_socketcall(int num, abi_ulong vptr)
3262 static const unsigned ac[] = { /* number of arguments per call */
3263 [SOCKOP_socket] = 3, /* domain, type, protocol */
3264 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3265 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3266 [SOCKOP_listen] = 2, /* sockfd, backlog */
3267 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3268 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3269 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3270 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3271 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3272 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3273 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3274 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3275 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3276 [SOCKOP_shutdown] = 2, /* sockfd, how */
3277 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3278 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3279 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3280 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3281 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3282 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3284 abi_long a[6]; /* max 6 args */
3286 /* first, collect the arguments in a[] according to ac[] */
3287 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3288 unsigned i;
3289 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3290 for (i = 0; i < ac[num]; ++i) {
3291 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3292 return -TARGET_EFAULT;
3297 /* now when we have the args, actually handle the call */
3298 switch (num) {
3299 case SOCKOP_socket: /* domain, type, protocol */
3300 return do_socket(a[0], a[1], a[2]);
3301 case SOCKOP_bind: /* sockfd, addr, addrlen */
3302 return do_bind(a[0], a[1], a[2]);
3303 case SOCKOP_connect: /* sockfd, addr, addrlen */
3304 return do_connect(a[0], a[1], a[2]);
3305 case SOCKOP_listen: /* sockfd, backlog */
3306 return get_errno(listen(a[0], a[1]));
3307 case SOCKOP_accept: /* sockfd, addr, addrlen */
3308 return do_accept4(a[0], a[1], a[2], 0);
3309 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3310 return do_accept4(a[0], a[1], a[2], a[3]);
3311 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3312 return do_getsockname(a[0], a[1], a[2]);
3313 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3314 return do_getpeername(a[0], a[1], a[2]);
3315 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3316 return do_socketpair(a[0], a[1], a[2], a[3]);
3317 case SOCKOP_send: /* sockfd, msg, len, flags */
3318 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3319 case SOCKOP_recv: /* sockfd, msg, len, flags */
3320 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3321 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3322 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3323 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3324 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3325 case SOCKOP_shutdown: /* sockfd, how */
3326 return get_errno(shutdown(a[0], a[1]));
3327 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3328 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3329 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3330 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3331 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3332 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3333 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3334 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3335 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3336 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3337 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3338 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3339 default:
3340 gemu_log("Unsupported socketcall: %d\n", num);
3341 return -TARGET_ENOSYS;
3344 #endif
3346 #define N_SHM_REGIONS 32
3348 static struct shm_region {
3349 abi_ulong start;
3350 abi_ulong size;
3351 bool in_use;
3352 } shm_regions[N_SHM_REGIONS];
3354 struct target_semid_ds
3356 struct target_ipc_perm sem_perm;
3357 abi_ulong sem_otime;
3358 #if !defined(TARGET_PPC64)
3359 abi_ulong __unused1;
3360 #endif
3361 abi_ulong sem_ctime;
3362 #if !defined(TARGET_PPC64)
3363 abi_ulong __unused2;
3364 #endif
3365 abi_ulong sem_nsems;
3366 abi_ulong __unused3;
3367 abi_ulong __unused4;
3370 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3371 abi_ulong target_addr)
3373 struct target_ipc_perm *target_ip;
3374 struct target_semid_ds *target_sd;
3376 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3377 return -TARGET_EFAULT;
3378 target_ip = &(target_sd->sem_perm);
3379 host_ip->__key = tswap32(target_ip->__key);
3380 host_ip->uid = tswap32(target_ip->uid);
3381 host_ip->gid = tswap32(target_ip->gid);
3382 host_ip->cuid = tswap32(target_ip->cuid);
3383 host_ip->cgid = tswap32(target_ip->cgid);
3384 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3385 host_ip->mode = tswap32(target_ip->mode);
3386 #else
3387 host_ip->mode = tswap16(target_ip->mode);
3388 #endif
3389 #if defined(TARGET_PPC)
3390 host_ip->__seq = tswap32(target_ip->__seq);
3391 #else
3392 host_ip->__seq = tswap16(target_ip->__seq);
3393 #endif
3394 unlock_user_struct(target_sd, target_addr, 0);
3395 return 0;
3398 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3399 struct ipc_perm *host_ip)
3401 struct target_ipc_perm *target_ip;
3402 struct target_semid_ds *target_sd;
3404 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3405 return -TARGET_EFAULT;
3406 target_ip = &(target_sd->sem_perm);
3407 target_ip->__key = tswap32(host_ip->__key);
3408 target_ip->uid = tswap32(host_ip->uid);
3409 target_ip->gid = tswap32(host_ip->gid);
3410 target_ip->cuid = tswap32(host_ip->cuid);
3411 target_ip->cgid = tswap32(host_ip->cgid);
3412 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3413 target_ip->mode = tswap32(host_ip->mode);
3414 #else
3415 target_ip->mode = tswap16(host_ip->mode);
3416 #endif
3417 #if defined(TARGET_PPC)
3418 target_ip->__seq = tswap32(host_ip->__seq);
3419 #else
3420 target_ip->__seq = tswap16(host_ip->__seq);
3421 #endif
3422 unlock_user_struct(target_sd, target_addr, 1);
3423 return 0;
3426 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3427 abi_ulong target_addr)
3429 struct target_semid_ds *target_sd;
3431 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3432 return -TARGET_EFAULT;
3433 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3434 return -TARGET_EFAULT;
3435 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3436 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3437 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3438 unlock_user_struct(target_sd, target_addr, 0);
3439 return 0;
3442 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3443 struct semid_ds *host_sd)
3445 struct target_semid_ds *target_sd;
3447 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3448 return -TARGET_EFAULT;
3449 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3450 return -TARGET_EFAULT;
3451 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3452 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3453 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3454 unlock_user_struct(target_sd, target_addr, 1);
3455 return 0;
3458 struct target_seminfo {
3459 int semmap;
3460 int semmni;
3461 int semmns;
3462 int semmnu;
3463 int semmsl;
3464 int semopm;
3465 int semume;
3466 int semusz;
3467 int semvmx;
3468 int semaem;
3471 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3472 struct seminfo *host_seminfo)
3474 struct target_seminfo *target_seminfo;
3475 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3476 return -TARGET_EFAULT;
3477 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3478 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3479 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3480 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3481 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3482 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3483 __put_user(host_seminfo->semume, &target_seminfo->semume);
3484 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3485 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3486 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3487 unlock_user_struct(target_seminfo, target_addr, 1);
3488 return 0;
3491 union semun {
3492 int val;
3493 struct semid_ds *buf;
3494 unsigned short *array;
3495 struct seminfo *__buf;
3498 union target_semun {
3499 int val;
3500 abi_ulong buf;
3501 abi_ulong array;
3502 abi_ulong __buf;
3505 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3506 abi_ulong target_addr)
3508 int nsems;
3509 unsigned short *array;
3510 union semun semun;
3511 struct semid_ds semid_ds;
3512 int i, ret;
3514 semun.buf = &semid_ds;
3516 ret = semctl(semid, 0, IPC_STAT, semun);
3517 if (ret == -1)
3518 return get_errno(ret);
3520 nsems = semid_ds.sem_nsems;
3522 *host_array = g_try_new(unsigned short, nsems);
3523 if (!*host_array) {
3524 return -TARGET_ENOMEM;
3526 array = lock_user(VERIFY_READ, target_addr,
3527 nsems*sizeof(unsigned short), 1);
3528 if (!array) {
3529 g_free(*host_array);
3530 return -TARGET_EFAULT;
3533 for(i=0; i<nsems; i++) {
3534 __get_user((*host_array)[i], &array[i]);
3536 unlock_user(array, target_addr, 0);
3538 return 0;
3541 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3542 unsigned short **host_array)
3544 int nsems;
3545 unsigned short *array;
3546 union semun semun;
3547 struct semid_ds semid_ds;
3548 int i, ret;
3550 semun.buf = &semid_ds;
3552 ret = semctl(semid, 0, IPC_STAT, semun);
3553 if (ret == -1)
3554 return get_errno(ret);
3556 nsems = semid_ds.sem_nsems;
3558 array = lock_user(VERIFY_WRITE, target_addr,
3559 nsems*sizeof(unsigned short), 0);
3560 if (!array)
3561 return -TARGET_EFAULT;
3563 for(i=0; i<nsems; i++) {
3564 __put_user((*host_array)[i], &array[i]);
3566 g_free(*host_array);
3567 unlock_user(array, target_addr, 1);
3569 return 0;
3572 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3573 abi_ulong target_arg)
3575 union target_semun target_su = { .buf = target_arg };
3576 union semun arg;
3577 struct semid_ds dsarg;
3578 unsigned short *array = NULL;
3579 struct seminfo seminfo;
3580 abi_long ret = -TARGET_EINVAL;
3581 abi_long err;
3582 cmd &= 0xff;
3584 switch( cmd ) {
3585 case GETVAL:
3586 case SETVAL:
3587 /* In 64 bit cross-endian situations, we will erroneously pick up
3588 * the wrong half of the union for the "val" element. To rectify
3589 * this, the entire 8-byte structure is byteswapped, followed by
3590 * a swap of the 4 byte val field. In other cases, the data is
3591 * already in proper host byte order. */
3592 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3593 target_su.buf = tswapal(target_su.buf);
3594 arg.val = tswap32(target_su.val);
3595 } else {
3596 arg.val = target_su.val;
3598 ret = get_errno(semctl(semid, semnum, cmd, arg));
3599 break;
3600 case GETALL:
3601 case SETALL:
3602 err = target_to_host_semarray(semid, &array, target_su.array);
3603 if (err)
3604 return err;
3605 arg.array = array;
3606 ret = get_errno(semctl(semid, semnum, cmd, arg));
3607 err = host_to_target_semarray(semid, target_su.array, &array);
3608 if (err)
3609 return err;
3610 break;
3611 case IPC_STAT:
3612 case IPC_SET:
3613 case SEM_STAT:
3614 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3615 if (err)
3616 return err;
3617 arg.buf = &dsarg;
3618 ret = get_errno(semctl(semid, semnum, cmd, arg));
3619 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3620 if (err)
3621 return err;
3622 break;
3623 case IPC_INFO:
3624 case SEM_INFO:
3625 arg.__buf = &seminfo;
3626 ret = get_errno(semctl(semid, semnum, cmd, arg));
3627 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3628 if (err)
3629 return err;
3630 break;
3631 case IPC_RMID:
3632 case GETPID:
3633 case GETNCNT:
3634 case GETZCNT:
3635 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3636 break;
3639 return ret;
3642 struct target_sembuf {
3643 unsigned short sem_num;
3644 short sem_op;
3645 short sem_flg;
3648 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3649 abi_ulong target_addr,
3650 unsigned nsops)
3652 struct target_sembuf *target_sembuf;
3653 int i;
3655 target_sembuf = lock_user(VERIFY_READ, target_addr,
3656 nsops*sizeof(struct target_sembuf), 1);
3657 if (!target_sembuf)
3658 return -TARGET_EFAULT;
3660 for(i=0; i<nsops; i++) {
3661 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3662 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3663 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3666 unlock_user(target_sembuf, target_addr, 0);
3668 return 0;
3671 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3673 struct sembuf sops[nsops];
3675 if (target_to_host_sembuf(sops, ptr, nsops))
3676 return -TARGET_EFAULT;
3678 return get_errno(semop(semid, sops, nsops));
3681 struct target_msqid_ds
3683 struct target_ipc_perm msg_perm;
3684 abi_ulong msg_stime;
3685 #if TARGET_ABI_BITS == 32
3686 abi_ulong __unused1;
3687 #endif
3688 abi_ulong msg_rtime;
3689 #if TARGET_ABI_BITS == 32
3690 abi_ulong __unused2;
3691 #endif
3692 abi_ulong msg_ctime;
3693 #if TARGET_ABI_BITS == 32
3694 abi_ulong __unused3;
3695 #endif
3696 abi_ulong __msg_cbytes;
3697 abi_ulong msg_qnum;
3698 abi_ulong msg_qbytes;
3699 abi_ulong msg_lspid;
3700 abi_ulong msg_lrpid;
3701 abi_ulong __unused4;
3702 abi_ulong __unused5;
3705 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3706 abi_ulong target_addr)
3708 struct target_msqid_ds *target_md;
3710 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3711 return -TARGET_EFAULT;
3712 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3713 return -TARGET_EFAULT;
3714 host_md->msg_stime = tswapal(target_md->msg_stime);
3715 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3716 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3717 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3718 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3719 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3720 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3721 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3722 unlock_user_struct(target_md, target_addr, 0);
3723 return 0;
3726 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3727 struct msqid_ds *host_md)
3729 struct target_msqid_ds *target_md;
3731 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3732 return -TARGET_EFAULT;
3733 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3734 return -TARGET_EFAULT;
3735 target_md->msg_stime = tswapal(host_md->msg_stime);
3736 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3737 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3738 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3739 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3740 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3741 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3742 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3743 unlock_user_struct(target_md, target_addr, 1);
3744 return 0;
3747 struct target_msginfo {
3748 int msgpool;
3749 int msgmap;
3750 int msgmax;
3751 int msgmnb;
3752 int msgmni;
3753 int msgssz;
3754 int msgtql;
3755 unsigned short int msgseg;
3758 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3759 struct msginfo *host_msginfo)
3761 struct target_msginfo *target_msginfo;
3762 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3763 return -TARGET_EFAULT;
3764 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3765 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3766 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3767 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3768 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3769 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3770 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3771 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3772 unlock_user_struct(target_msginfo, target_addr, 1);
3773 return 0;
3776 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3778 struct msqid_ds dsarg;
3779 struct msginfo msginfo;
3780 abi_long ret = -TARGET_EINVAL;
3782 cmd &= 0xff;
3784 switch (cmd) {
3785 case IPC_STAT:
3786 case IPC_SET:
3787 case MSG_STAT:
3788 if (target_to_host_msqid_ds(&dsarg,ptr))
3789 return -TARGET_EFAULT;
3790 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3791 if (host_to_target_msqid_ds(ptr,&dsarg))
3792 return -TARGET_EFAULT;
3793 break;
3794 case IPC_RMID:
3795 ret = get_errno(msgctl(msgid, cmd, NULL));
3796 break;
3797 case IPC_INFO:
3798 case MSG_INFO:
3799 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3800 if (host_to_target_msginfo(ptr, &msginfo))
3801 return -TARGET_EFAULT;
3802 break;
3805 return ret;
3808 struct target_msgbuf {
3809 abi_long mtype;
3810 char mtext[1];
3813 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3814 ssize_t msgsz, int msgflg)
3816 struct target_msgbuf *target_mb;
3817 struct msgbuf *host_mb;
3818 abi_long ret = 0;
3820 if (msgsz < 0) {
3821 return -TARGET_EINVAL;
3824 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3825 return -TARGET_EFAULT;
3826 host_mb = g_try_malloc(msgsz + sizeof(long));
3827 if (!host_mb) {
3828 unlock_user_struct(target_mb, msgp, 0);
3829 return -TARGET_ENOMEM;
3831 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3832 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3833 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3834 g_free(host_mb);
3835 unlock_user_struct(target_mb, msgp, 0);
3837 return ret;
3840 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3841 ssize_t msgsz, abi_long msgtyp,
3842 int msgflg)
3844 struct target_msgbuf *target_mb;
3845 char *target_mtext;
3846 struct msgbuf *host_mb;
3847 abi_long ret = 0;
3849 if (msgsz < 0) {
3850 return -TARGET_EINVAL;
3853 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3854 return -TARGET_EFAULT;
3856 host_mb = g_try_malloc(msgsz + sizeof(long));
3857 if (!host_mb) {
3858 ret = -TARGET_ENOMEM;
3859 goto end;
3861 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3863 if (ret > 0) {
3864 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3865 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3866 if (!target_mtext) {
3867 ret = -TARGET_EFAULT;
3868 goto end;
3870 memcpy(target_mb->mtext, host_mb->mtext, ret);
3871 unlock_user(target_mtext, target_mtext_addr, ret);
3874 target_mb->mtype = tswapal(host_mb->mtype);
3876 end:
3877 if (target_mb)
3878 unlock_user_struct(target_mb, msgp, 1);
3879 g_free(host_mb);
3880 return ret;
3883 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3884 abi_ulong target_addr)
3886 struct target_shmid_ds *target_sd;
3888 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3889 return -TARGET_EFAULT;
3890 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3891 return -TARGET_EFAULT;
3892 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3893 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3894 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3895 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3896 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3897 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3898 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3899 unlock_user_struct(target_sd, target_addr, 0);
3900 return 0;
3903 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3904 struct shmid_ds *host_sd)
3906 struct target_shmid_ds *target_sd;
3908 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3909 return -TARGET_EFAULT;
3910 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3911 return -TARGET_EFAULT;
3912 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3913 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3914 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3915 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3916 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3917 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3918 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3919 unlock_user_struct(target_sd, target_addr, 1);
3920 return 0;
3923 struct target_shminfo {
3924 abi_ulong shmmax;
3925 abi_ulong shmmin;
3926 abi_ulong shmmni;
3927 abi_ulong shmseg;
3928 abi_ulong shmall;
3931 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3932 struct shminfo *host_shminfo)
3934 struct target_shminfo *target_shminfo;
3935 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3936 return -TARGET_EFAULT;
3937 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3938 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3939 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3940 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3941 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3942 unlock_user_struct(target_shminfo, target_addr, 1);
3943 return 0;
3946 struct target_shm_info {
3947 int used_ids;
3948 abi_ulong shm_tot;
3949 abi_ulong shm_rss;
3950 abi_ulong shm_swp;
3951 abi_ulong swap_attempts;
3952 abi_ulong swap_successes;
3955 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3956 struct shm_info *host_shm_info)
3958 struct target_shm_info *target_shm_info;
3959 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3960 return -TARGET_EFAULT;
3961 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3962 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3963 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3964 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3965 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3966 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3967 unlock_user_struct(target_shm_info, target_addr, 1);
3968 return 0;
3971 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3973 struct shmid_ds dsarg;
3974 struct shminfo shminfo;
3975 struct shm_info shm_info;
3976 abi_long ret = -TARGET_EINVAL;
3978 cmd &= 0xff;
3980 switch(cmd) {
3981 case IPC_STAT:
3982 case IPC_SET:
3983 case SHM_STAT:
3984 if (target_to_host_shmid_ds(&dsarg, buf))
3985 return -TARGET_EFAULT;
3986 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3987 if (host_to_target_shmid_ds(buf, &dsarg))
3988 return -TARGET_EFAULT;
3989 break;
3990 case IPC_INFO:
3991 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3992 if (host_to_target_shminfo(buf, &shminfo))
3993 return -TARGET_EFAULT;
3994 break;
3995 case SHM_INFO:
3996 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3997 if (host_to_target_shm_info(buf, &shm_info))
3998 return -TARGET_EFAULT;
3999 break;
4000 case IPC_RMID:
4001 case SHM_LOCK:
4002 case SHM_UNLOCK:
4003 ret = get_errno(shmctl(shmid, cmd, NULL));
4004 break;
4007 return ret;
4010 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4012 abi_long raddr;
4013 void *host_raddr;
4014 struct shmid_ds shm_info;
4015 int i,ret;
4017 /* find out the length of the shared memory segment */
4018 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4019 if (is_error(ret)) {
4020 /* can't get length, bail out */
4021 return ret;
4024 mmap_lock();
4026 if (shmaddr)
4027 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4028 else {
4029 abi_ulong mmap_start;
4031 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4033 if (mmap_start == -1) {
4034 errno = ENOMEM;
4035 host_raddr = (void *)-1;
4036 } else
4037 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4040 if (host_raddr == (void *)-1) {
4041 mmap_unlock();
4042 return get_errno((long)host_raddr);
4044 raddr=h2g((unsigned long)host_raddr);
4046 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4047 PAGE_VALID | PAGE_READ |
4048 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4050 for (i = 0; i < N_SHM_REGIONS; i++) {
4051 if (!shm_regions[i].in_use) {
4052 shm_regions[i].in_use = true;
4053 shm_regions[i].start = raddr;
4054 shm_regions[i].size = shm_info.shm_segsz;
4055 break;
4059 mmap_unlock();
4060 return raddr;
4064 static inline abi_long do_shmdt(abi_ulong shmaddr)
4066 int i;
4068 for (i = 0; i < N_SHM_REGIONS; ++i) {
4069 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4070 shm_regions[i].in_use = false;
4071 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4072 break;
4076 return get_errno(shmdt(g2h(shmaddr)));
4079 #ifdef TARGET_NR_ipc
4080 /* ??? This only works with linear mappings. */
4081 /* do_ipc() must return target values and target errnos. */
4082 static abi_long do_ipc(unsigned int call, abi_long first,
4083 abi_long second, abi_long third,
4084 abi_long ptr, abi_long fifth)
4086 int version;
4087 abi_long ret = 0;
4089 version = call >> 16;
4090 call &= 0xffff;
4092 switch (call) {
4093 case IPCOP_semop:
4094 ret = do_semop(first, ptr, second);
4095 break;
4097 case IPCOP_semget:
4098 ret = get_errno(semget(first, second, third));
4099 break;
4101 case IPCOP_semctl: {
4102 /* The semun argument to semctl is passed by value, so dereference the
4103 * ptr argument. */
4104 abi_ulong atptr;
4105 get_user_ual(atptr, ptr);
4106 ret = do_semctl(first, second, third, atptr);
4107 break;
4110 case IPCOP_msgget:
4111 ret = get_errno(msgget(first, second));
4112 break;
4114 case IPCOP_msgsnd:
4115 ret = do_msgsnd(first, ptr, second, third);
4116 break;
4118 case IPCOP_msgctl:
4119 ret = do_msgctl(first, second, ptr);
4120 break;
4122 case IPCOP_msgrcv:
4123 switch (version) {
4124 case 0:
4126 struct target_ipc_kludge {
4127 abi_long msgp;
4128 abi_long msgtyp;
4129 } *tmp;
4131 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4132 ret = -TARGET_EFAULT;
4133 break;
4136 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4138 unlock_user_struct(tmp, ptr, 0);
4139 break;
4141 default:
4142 ret = do_msgrcv(first, ptr, second, fifth, third);
4144 break;
4146 case IPCOP_shmat:
4147 switch (version) {
4148 default:
4150 abi_ulong raddr;
4151 raddr = do_shmat(first, ptr, second);
4152 if (is_error(raddr))
4153 return get_errno(raddr);
4154 if (put_user_ual(raddr, third))
4155 return -TARGET_EFAULT;
4156 break;
4158 case 1:
4159 ret = -TARGET_EINVAL;
4160 break;
4162 break;
4163 case IPCOP_shmdt:
4164 ret = do_shmdt(ptr);
4165 break;
4167 case IPCOP_shmget:
4168 /* IPC_* flag values are the same on all linux platforms */
4169 ret = get_errno(shmget(first, second, third));
4170 break;
4172 /* IPC_* and SHM_* command values are the same on all linux platforms */
4173 case IPCOP_shmctl:
4174 ret = do_shmctl(first, second, ptr);
4175 break;
4176 default:
4177 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4178 ret = -TARGET_ENOSYS;
4179 break;
4181 return ret;
4183 #endif
4185 /* kernel structure types definitions */
4187 #define STRUCT(name, ...) STRUCT_ ## name,
4188 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4189 enum {
4190 #include "syscall_types.h"
4191 STRUCT_MAX
4193 #undef STRUCT
4194 #undef STRUCT_SPECIAL
4196 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4197 #define STRUCT_SPECIAL(name)
4198 #include "syscall_types.h"
4199 #undef STRUCT
4200 #undef STRUCT_SPECIAL
4202 typedef struct IOCTLEntry IOCTLEntry;
4204 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4205 int fd, int cmd, abi_long arg);
4207 struct IOCTLEntry {
4208 int target_cmd;
4209 unsigned int host_cmd;
4210 const char *name;
4211 int access;
4212 do_ioctl_fn *do_ioctl;
4213 const argtype arg_type[5];
4216 #define IOC_R 0x0001
4217 #define IOC_W 0x0002
4218 #define IOC_RW (IOC_R | IOC_W)
4220 #define MAX_STRUCT_SIZE 4096
4222 #ifdef CONFIG_FIEMAP
4223 /* So fiemap access checks don't overflow on 32 bit systems.
4224 * This is very slightly smaller than the limit imposed by
4225 * the underlying kernel.
4227 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4228 / sizeof(struct fiemap_extent))
4230 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4231 int fd, int cmd, abi_long arg)
4233 /* The parameter for this ioctl is a struct fiemap followed
4234 * by an array of struct fiemap_extent whose size is set
4235 * in fiemap->fm_extent_count. The array is filled in by the
4236 * ioctl.
4238 int target_size_in, target_size_out;
4239 struct fiemap *fm;
4240 const argtype *arg_type = ie->arg_type;
4241 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4242 void *argptr, *p;
4243 abi_long ret;
4244 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4245 uint32_t outbufsz;
4246 int free_fm = 0;
4248 assert(arg_type[0] == TYPE_PTR);
4249 assert(ie->access == IOC_RW);
4250 arg_type++;
4251 target_size_in = thunk_type_size(arg_type, 0);
4252 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4253 if (!argptr) {
4254 return -TARGET_EFAULT;
4256 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4257 unlock_user(argptr, arg, 0);
4258 fm = (struct fiemap *)buf_temp;
4259 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4260 return -TARGET_EINVAL;
4263 outbufsz = sizeof (*fm) +
4264 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4266 if (outbufsz > MAX_STRUCT_SIZE) {
4267 /* We can't fit all the extents into the fixed size buffer.
4268 * Allocate one that is large enough and use it instead.
4270 fm = g_try_malloc(outbufsz);
4271 if (!fm) {
4272 return -TARGET_ENOMEM;
4274 memcpy(fm, buf_temp, sizeof(struct fiemap));
4275 free_fm = 1;
4277 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
4278 if (!is_error(ret)) {
4279 target_size_out = target_size_in;
4280 /* An extent_count of 0 means we were only counting the extents
4281 * so there are no structs to copy
4283 if (fm->fm_extent_count != 0) {
4284 target_size_out += fm->fm_mapped_extents * extent_size;
4286 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4287 if (!argptr) {
4288 ret = -TARGET_EFAULT;
4289 } else {
4290 /* Convert the struct fiemap */
4291 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4292 if (fm->fm_extent_count != 0) {
4293 p = argptr + target_size_in;
4294 /* ...and then all the struct fiemap_extents */
4295 for (i = 0; i < fm->fm_mapped_extents; i++) {
4296 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4297 THUNK_TARGET);
4298 p += extent_size;
4301 unlock_user(argptr, arg, target_size_out);
4304 if (free_fm) {
4305 g_free(fm);
4307 return ret;
4309 #endif
4311 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4312 int fd, int cmd, abi_long arg)
4314 const argtype *arg_type = ie->arg_type;
4315 int target_size;
4316 void *argptr;
4317 int ret;
4318 struct ifconf *host_ifconf;
4319 uint32_t outbufsz;
4320 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4321 int target_ifreq_size;
4322 int nb_ifreq;
4323 int free_buf = 0;
4324 int i;
4325 int target_ifc_len;
4326 abi_long target_ifc_buf;
4327 int host_ifc_len;
4328 char *host_ifc_buf;
4330 assert(arg_type[0] == TYPE_PTR);
4331 assert(ie->access == IOC_RW);
4333 arg_type++;
4334 target_size = thunk_type_size(arg_type, 0);
4336 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4337 if (!argptr)
4338 return -TARGET_EFAULT;
4339 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4340 unlock_user(argptr, arg, 0);
4342 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4343 target_ifc_len = host_ifconf->ifc_len;
4344 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4346 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4347 nb_ifreq = target_ifc_len / target_ifreq_size;
4348 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4350 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4351 if (outbufsz > MAX_STRUCT_SIZE) {
4352 /* We can't fit all the extents into the fixed size buffer.
4353 * Allocate one that is large enough and use it instead.
4355 host_ifconf = malloc(outbufsz);
4356 if (!host_ifconf) {
4357 return -TARGET_ENOMEM;
4359 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4360 free_buf = 1;
4362 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4364 host_ifconf->ifc_len = host_ifc_len;
4365 host_ifconf->ifc_buf = host_ifc_buf;
4367 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
4368 if (!is_error(ret)) {
4369 /* convert host ifc_len to target ifc_len */
4371 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4372 target_ifc_len = nb_ifreq * target_ifreq_size;
4373 host_ifconf->ifc_len = target_ifc_len;
4375 /* restore target ifc_buf */
4377 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4379 /* copy struct ifconf to target user */
4381 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4382 if (!argptr)
4383 return -TARGET_EFAULT;
4384 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4385 unlock_user(argptr, arg, target_size);
4387 /* copy ifreq[] to target user */
4389 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4390 for (i = 0; i < nb_ifreq ; i++) {
4391 thunk_convert(argptr + i * target_ifreq_size,
4392 host_ifc_buf + i * sizeof(struct ifreq),
4393 ifreq_arg_type, THUNK_TARGET);
4395 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4398 if (free_buf) {
4399 free(host_ifconf);
4402 return ret;
4405 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4406 int cmd, abi_long arg)
4408 void *argptr;
4409 struct dm_ioctl *host_dm;
4410 abi_long guest_data;
4411 uint32_t guest_data_size;
4412 int target_size;
4413 const argtype *arg_type = ie->arg_type;
4414 abi_long ret;
4415 void *big_buf = NULL;
4416 char *host_data;
4418 arg_type++;
4419 target_size = thunk_type_size(arg_type, 0);
4420 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4421 if (!argptr) {
4422 ret = -TARGET_EFAULT;
4423 goto out;
4425 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4426 unlock_user(argptr, arg, 0);
4428 /* buf_temp is too small, so fetch things into a bigger buffer */
4429 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4430 memcpy(big_buf, buf_temp, target_size);
4431 buf_temp = big_buf;
4432 host_dm = big_buf;
4434 guest_data = arg + host_dm->data_start;
4435 if ((guest_data - arg) < 0) {
4436 ret = -EINVAL;
4437 goto out;
4439 guest_data_size = host_dm->data_size - host_dm->data_start;
4440 host_data = (char*)host_dm + host_dm->data_start;
4442 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4443 switch (ie->host_cmd) {
4444 case DM_REMOVE_ALL:
4445 case DM_LIST_DEVICES:
4446 case DM_DEV_CREATE:
4447 case DM_DEV_REMOVE:
4448 case DM_DEV_SUSPEND:
4449 case DM_DEV_STATUS:
4450 case DM_DEV_WAIT:
4451 case DM_TABLE_STATUS:
4452 case DM_TABLE_CLEAR:
4453 case DM_TABLE_DEPS:
4454 case DM_LIST_VERSIONS:
4455 /* no input data */
4456 break;
4457 case DM_DEV_RENAME:
4458 case DM_DEV_SET_GEOMETRY:
4459 /* data contains only strings */
4460 memcpy(host_data, argptr, guest_data_size);
4461 break;
4462 case DM_TARGET_MSG:
4463 memcpy(host_data, argptr, guest_data_size);
4464 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4465 break;
4466 case DM_TABLE_LOAD:
4468 void *gspec = argptr;
4469 void *cur_data = host_data;
4470 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4471 int spec_size = thunk_type_size(arg_type, 0);
4472 int i;
4474 for (i = 0; i < host_dm->target_count; i++) {
4475 struct dm_target_spec *spec = cur_data;
4476 uint32_t next;
4477 int slen;
4479 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4480 slen = strlen((char*)gspec + spec_size) + 1;
4481 next = spec->next;
4482 spec->next = sizeof(*spec) + slen;
4483 strcpy((char*)&spec[1], gspec + spec_size);
4484 gspec += next;
4485 cur_data += spec->next;
4487 break;
4489 default:
4490 ret = -TARGET_EINVAL;
4491 unlock_user(argptr, guest_data, 0);
4492 goto out;
4494 unlock_user(argptr, guest_data, 0);
4496 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4497 if (!is_error(ret)) {
4498 guest_data = arg + host_dm->data_start;
4499 guest_data_size = host_dm->data_size - host_dm->data_start;
4500 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4501 switch (ie->host_cmd) {
4502 case DM_REMOVE_ALL:
4503 case DM_DEV_CREATE:
4504 case DM_DEV_REMOVE:
4505 case DM_DEV_RENAME:
4506 case DM_DEV_SUSPEND:
4507 case DM_DEV_STATUS:
4508 case DM_TABLE_LOAD:
4509 case DM_TABLE_CLEAR:
4510 case DM_TARGET_MSG:
4511 case DM_DEV_SET_GEOMETRY:
4512 /* no return data */
4513 break;
4514 case DM_LIST_DEVICES:
4516 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4517 uint32_t remaining_data = guest_data_size;
4518 void *cur_data = argptr;
4519 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4520 int nl_size = 12; /* can't use thunk_size due to alignment */
4522 while (1) {
4523 uint32_t next = nl->next;
4524 if (next) {
4525 nl->next = nl_size + (strlen(nl->name) + 1);
4527 if (remaining_data < nl->next) {
4528 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4529 break;
4531 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4532 strcpy(cur_data + nl_size, nl->name);
4533 cur_data += nl->next;
4534 remaining_data -= nl->next;
4535 if (!next) {
4536 break;
4538 nl = (void*)nl + next;
4540 break;
4542 case DM_DEV_WAIT:
4543 case DM_TABLE_STATUS:
4545 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4546 void *cur_data = argptr;
4547 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4548 int spec_size = thunk_type_size(arg_type, 0);
4549 int i;
4551 for (i = 0; i < host_dm->target_count; i++) {
4552 uint32_t next = spec->next;
4553 int slen = strlen((char*)&spec[1]) + 1;
4554 spec->next = (cur_data - argptr) + spec_size + slen;
4555 if (guest_data_size < spec->next) {
4556 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4557 break;
4559 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4560 strcpy(cur_data + spec_size, (char*)&spec[1]);
4561 cur_data = argptr + spec->next;
4562 spec = (void*)host_dm + host_dm->data_start + next;
4564 break;
4566 case DM_TABLE_DEPS:
4568 void *hdata = (void*)host_dm + host_dm->data_start;
4569 int count = *(uint32_t*)hdata;
4570 uint64_t *hdev = hdata + 8;
4571 uint64_t *gdev = argptr + 8;
4572 int i;
4574 *(uint32_t*)argptr = tswap32(count);
4575 for (i = 0; i < count; i++) {
4576 *gdev = tswap64(*hdev);
4577 gdev++;
4578 hdev++;
4580 break;
4582 case DM_LIST_VERSIONS:
4584 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4585 uint32_t remaining_data = guest_data_size;
4586 void *cur_data = argptr;
4587 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4588 int vers_size = thunk_type_size(arg_type, 0);
4590 while (1) {
4591 uint32_t next = vers->next;
4592 if (next) {
4593 vers->next = vers_size + (strlen(vers->name) + 1);
4595 if (remaining_data < vers->next) {
4596 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4597 break;
4599 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4600 strcpy(cur_data + vers_size, vers->name);
4601 cur_data += vers->next;
4602 remaining_data -= vers->next;
4603 if (!next) {
4604 break;
4606 vers = (void*)vers + next;
4608 break;
4610 default:
4611 unlock_user(argptr, guest_data, 0);
4612 ret = -TARGET_EINVAL;
4613 goto out;
4615 unlock_user(argptr, guest_data, guest_data_size);
4617 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4618 if (!argptr) {
4619 ret = -TARGET_EFAULT;
4620 goto out;
4622 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4623 unlock_user(argptr, arg, target_size);
4625 out:
4626 g_free(big_buf);
4627 return ret;
4630 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4631 int cmd, abi_long arg)
4633 void *argptr;
4634 int target_size;
4635 const argtype *arg_type = ie->arg_type;
4636 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4637 abi_long ret;
4639 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4640 struct blkpg_partition host_part;
4642 /* Read and convert blkpg */
4643 arg_type++;
4644 target_size = thunk_type_size(arg_type, 0);
4645 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4646 if (!argptr) {
4647 ret = -TARGET_EFAULT;
4648 goto out;
4650 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4651 unlock_user(argptr, arg, 0);
4653 switch (host_blkpg->op) {
4654 case BLKPG_ADD_PARTITION:
4655 case BLKPG_DEL_PARTITION:
4656 /* payload is struct blkpg_partition */
4657 break;
4658 default:
4659 /* Unknown opcode */
4660 ret = -TARGET_EINVAL;
4661 goto out;
4664 /* Read and convert blkpg->data */
4665 arg = (abi_long)(uintptr_t)host_blkpg->data;
4666 target_size = thunk_type_size(part_arg_type, 0);
4667 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4668 if (!argptr) {
4669 ret = -TARGET_EFAULT;
4670 goto out;
4672 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4673 unlock_user(argptr, arg, 0);
4675 /* Swizzle the data pointer to our local copy and call! */
4676 host_blkpg->data = &host_part;
4677 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
4679 out:
4680 return ret;
4683 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4684 int fd, int cmd, abi_long arg)
4686 const argtype *arg_type = ie->arg_type;
4687 const StructEntry *se;
4688 const argtype *field_types;
4689 const int *dst_offsets, *src_offsets;
4690 int target_size;
4691 void *argptr;
4692 abi_ulong *target_rt_dev_ptr;
4693 unsigned long *host_rt_dev_ptr;
4694 abi_long ret;
4695 int i;
4697 assert(ie->access == IOC_W);
4698 assert(*arg_type == TYPE_PTR);
4699 arg_type++;
4700 assert(*arg_type == TYPE_STRUCT);
4701 target_size = thunk_type_size(arg_type, 0);
4702 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4703 if (!argptr) {
4704 return -TARGET_EFAULT;
4706 arg_type++;
4707 assert(*arg_type == (int)STRUCT_rtentry);
4708 se = struct_entries + *arg_type++;
4709 assert(se->convert[0] == NULL);
4710 /* convert struct here to be able to catch rt_dev string */
4711 field_types = se->field_types;
4712 dst_offsets = se->field_offsets[THUNK_HOST];
4713 src_offsets = se->field_offsets[THUNK_TARGET];
4714 for (i = 0; i < se->nb_fields; i++) {
4715 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4716 assert(*field_types == TYPE_PTRVOID);
4717 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4718 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4719 if (*target_rt_dev_ptr != 0) {
4720 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4721 tswapal(*target_rt_dev_ptr));
4722 if (!*host_rt_dev_ptr) {
4723 unlock_user(argptr, arg, 0);
4724 return -TARGET_EFAULT;
4726 } else {
4727 *host_rt_dev_ptr = 0;
4729 field_types++;
4730 continue;
4732 field_types = thunk_convert(buf_temp + dst_offsets[i],
4733 argptr + src_offsets[i],
4734 field_types, THUNK_HOST);
4736 unlock_user(argptr, arg, 0);
4738 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4739 if (*host_rt_dev_ptr != 0) {
4740 unlock_user((void *)*host_rt_dev_ptr,
4741 *target_rt_dev_ptr, 0);
4743 return ret;
4746 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4747 int fd, int cmd, abi_long arg)
4749 int sig = target_to_host_signal(arg);
4750 return get_errno(ioctl(fd, ie->host_cmd, sig));
4753 static IOCTLEntry ioctl_entries[] = {
4754 #define IOCTL(cmd, access, ...) \
4755 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4756 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4757 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4758 #include "ioctls.h"
4759 { 0, 0, },
4762 /* ??? Implement proper locking for ioctls. */
4763 /* do_ioctl() Must return target values and target errnos. */
4764 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4766 const IOCTLEntry *ie;
4767 const argtype *arg_type;
4768 abi_long ret;
4769 uint8_t buf_temp[MAX_STRUCT_SIZE];
4770 int target_size;
4771 void *argptr;
4773 ie = ioctl_entries;
4774 for(;;) {
4775 if (ie->target_cmd == 0) {
4776 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4777 return -TARGET_ENOSYS;
4779 if (ie->target_cmd == cmd)
4780 break;
4781 ie++;
4783 arg_type = ie->arg_type;
4784 #if defined(DEBUG)
4785 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4786 #endif
4787 if (ie->do_ioctl) {
4788 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4791 switch(arg_type[0]) {
4792 case TYPE_NULL:
4793 /* no argument */
4794 ret = get_errno(ioctl(fd, ie->host_cmd));
4795 break;
4796 case TYPE_PTRVOID:
4797 case TYPE_INT:
4798 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4799 break;
4800 case TYPE_PTR:
4801 arg_type++;
4802 target_size = thunk_type_size(arg_type, 0);
4803 switch(ie->access) {
4804 case IOC_R:
4805 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4806 if (!is_error(ret)) {
4807 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4808 if (!argptr)
4809 return -TARGET_EFAULT;
4810 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4811 unlock_user(argptr, arg, target_size);
4813 break;
4814 case IOC_W:
4815 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4816 if (!argptr)
4817 return -TARGET_EFAULT;
4818 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4819 unlock_user(argptr, arg, 0);
4820 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4821 break;
4822 default:
4823 case IOC_RW:
4824 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4825 if (!argptr)
4826 return -TARGET_EFAULT;
4827 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4828 unlock_user(argptr, arg, 0);
4829 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4830 if (!is_error(ret)) {
4831 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4832 if (!argptr)
4833 return -TARGET_EFAULT;
4834 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4835 unlock_user(argptr, arg, target_size);
4837 break;
4839 break;
4840 default:
4841 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4842 (long)cmd, arg_type[0]);
4843 ret = -TARGET_ENOSYS;
4844 break;
4846 return ret;
4849 static const bitmask_transtbl iflag_tbl[] = {
4850 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4851 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4852 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4853 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4854 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4855 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4856 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4857 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4858 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4859 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4860 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4861 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4862 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4863 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4864 { 0, 0, 0, 0 }
4867 static const bitmask_transtbl oflag_tbl[] = {
4868 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4869 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4870 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4871 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4872 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4873 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4874 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4875 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4876 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4877 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4878 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4879 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4880 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4881 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4882 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4883 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4884 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4885 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4886 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4887 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4888 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4889 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4890 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4891 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4892 { 0, 0, 0, 0 }
4895 static const bitmask_transtbl cflag_tbl[] = {
4896 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4897 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4898 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4899 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4900 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4901 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4902 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4903 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4904 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4905 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4906 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4907 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4908 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4909 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4910 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4911 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4912 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4913 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4914 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4915 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4916 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4917 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4918 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4919 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4920 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4921 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4922 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4923 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4924 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4925 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4926 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4927 { 0, 0, 0, 0 }
4930 static const bitmask_transtbl lflag_tbl[] = {
4931 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4932 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4933 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4934 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4935 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4936 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4937 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4938 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4939 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4940 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4941 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4942 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4943 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4944 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4945 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4946 { 0, 0, 0, 0 }
4949 static void target_to_host_termios (void *dst, const void *src)
4951 struct host_termios *host = dst;
4952 const struct target_termios *target = src;
4954 host->c_iflag =
4955 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4956 host->c_oflag =
4957 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4958 host->c_cflag =
4959 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4960 host->c_lflag =
4961 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4962 host->c_line = target->c_line;
4964 memset(host->c_cc, 0, sizeof(host->c_cc));
4965 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4966 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4967 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4968 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4969 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4970 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4971 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4972 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4973 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4974 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4975 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4976 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4977 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4978 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4979 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4980 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4981 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4984 static void host_to_target_termios (void *dst, const void *src)
4986 struct target_termios *target = dst;
4987 const struct host_termios *host = src;
4989 target->c_iflag =
4990 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4991 target->c_oflag =
4992 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4993 target->c_cflag =
4994 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4995 target->c_lflag =
4996 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4997 target->c_line = host->c_line;
4999 memset(target->c_cc, 0, sizeof(target->c_cc));
5000 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5001 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5002 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5003 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5004 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5005 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5006 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5007 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5008 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5009 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5010 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5011 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5012 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5013 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5014 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5015 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5016 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5019 static const StructEntry struct_termios_def = {
5020 .convert = { host_to_target_termios, target_to_host_termios },
5021 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5022 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5025 static bitmask_transtbl mmap_flags_tbl[] = {
5026 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5027 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5028 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5029 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5030 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5031 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5032 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5033 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5034 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5035 MAP_NORESERVE },
5036 { 0, 0, 0, 0 }
5039 #if defined(TARGET_I386)
5041 /* NOTE: there is really one LDT for all the threads */
5042 static uint8_t *ldt_table;
5044 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5046 int size;
5047 void *p;
5049 if (!ldt_table)
5050 return 0;
5051 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5052 if (size > bytecount)
5053 size = bytecount;
5054 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5055 if (!p)
5056 return -TARGET_EFAULT;
5057 /* ??? Should this by byteswapped? */
5058 memcpy(p, ldt_table, size);
5059 unlock_user(p, ptr, size);
5060 return size;
5063 /* XXX: add locking support */
5064 static abi_long write_ldt(CPUX86State *env,
5065 abi_ulong ptr, unsigned long bytecount, int oldmode)
5067 struct target_modify_ldt_ldt_s ldt_info;
5068 struct target_modify_ldt_ldt_s *target_ldt_info;
5069 int seg_32bit, contents, read_exec_only, limit_in_pages;
5070 int seg_not_present, useable, lm;
5071 uint32_t *lp, entry_1, entry_2;
5073 if (bytecount != sizeof(ldt_info))
5074 return -TARGET_EINVAL;
5075 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5076 return -TARGET_EFAULT;
5077 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5078 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5079 ldt_info.limit = tswap32(target_ldt_info->limit);
5080 ldt_info.flags = tswap32(target_ldt_info->flags);
5081 unlock_user_struct(target_ldt_info, ptr, 0);
5083 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5084 return -TARGET_EINVAL;
5085 seg_32bit = ldt_info.flags & 1;
5086 contents = (ldt_info.flags >> 1) & 3;
5087 read_exec_only = (ldt_info.flags >> 3) & 1;
5088 limit_in_pages = (ldt_info.flags >> 4) & 1;
5089 seg_not_present = (ldt_info.flags >> 5) & 1;
5090 useable = (ldt_info.flags >> 6) & 1;
5091 #ifdef TARGET_ABI32
5092 lm = 0;
5093 #else
5094 lm = (ldt_info.flags >> 7) & 1;
5095 #endif
5096 if (contents == 3) {
5097 if (oldmode)
5098 return -TARGET_EINVAL;
5099 if (seg_not_present == 0)
5100 return -TARGET_EINVAL;
5102 /* allocate the LDT */
5103 if (!ldt_table) {
5104 env->ldt.base = target_mmap(0,
5105 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5106 PROT_READ|PROT_WRITE,
5107 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5108 if (env->ldt.base == -1)
5109 return -TARGET_ENOMEM;
5110 memset(g2h(env->ldt.base), 0,
5111 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5112 env->ldt.limit = 0xffff;
5113 ldt_table = g2h(env->ldt.base);
5116 /* NOTE: same code as Linux kernel */
5117 /* Allow LDTs to be cleared by the user. */
5118 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5119 if (oldmode ||
5120 (contents == 0 &&
5121 read_exec_only == 1 &&
5122 seg_32bit == 0 &&
5123 limit_in_pages == 0 &&
5124 seg_not_present == 1 &&
5125 useable == 0 )) {
5126 entry_1 = 0;
5127 entry_2 = 0;
5128 goto install;
5132 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5133 (ldt_info.limit & 0x0ffff);
5134 entry_2 = (ldt_info.base_addr & 0xff000000) |
5135 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5136 (ldt_info.limit & 0xf0000) |
5137 ((read_exec_only ^ 1) << 9) |
5138 (contents << 10) |
5139 ((seg_not_present ^ 1) << 15) |
5140 (seg_32bit << 22) |
5141 (limit_in_pages << 23) |
5142 (lm << 21) |
5143 0x7000;
5144 if (!oldmode)
5145 entry_2 |= (useable << 20);
5147 /* Install the new entry ... */
5148 install:
5149 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5150 lp[0] = tswap32(entry_1);
5151 lp[1] = tswap32(entry_2);
5152 return 0;
5155 /* specific and weird i386 syscalls */
5156 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5157 unsigned long bytecount)
5159 abi_long ret;
5161 switch (func) {
5162 case 0:
5163 ret = read_ldt(ptr, bytecount);
5164 break;
5165 case 1:
5166 ret = write_ldt(env, ptr, bytecount, 1);
5167 break;
5168 case 0x11:
5169 ret = write_ldt(env, ptr, bytecount, 0);
5170 break;
5171 default:
5172 ret = -TARGET_ENOSYS;
5173 break;
5175 return ret;
5178 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5179 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5181 uint64_t *gdt_table = g2h(env->gdt.base);
5182 struct target_modify_ldt_ldt_s ldt_info;
5183 struct target_modify_ldt_ldt_s *target_ldt_info;
5184 int seg_32bit, contents, read_exec_only, limit_in_pages;
5185 int seg_not_present, useable, lm;
5186 uint32_t *lp, entry_1, entry_2;
5187 int i;
5189 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5190 if (!target_ldt_info)
5191 return -TARGET_EFAULT;
5192 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5193 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5194 ldt_info.limit = tswap32(target_ldt_info->limit);
5195 ldt_info.flags = tswap32(target_ldt_info->flags);
5196 if (ldt_info.entry_number == -1) {
5197 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5198 if (gdt_table[i] == 0) {
5199 ldt_info.entry_number = i;
5200 target_ldt_info->entry_number = tswap32(i);
5201 break;
5205 unlock_user_struct(target_ldt_info, ptr, 1);
5207 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5208 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5209 return -TARGET_EINVAL;
5210 seg_32bit = ldt_info.flags & 1;
5211 contents = (ldt_info.flags >> 1) & 3;
5212 read_exec_only = (ldt_info.flags >> 3) & 1;
5213 limit_in_pages = (ldt_info.flags >> 4) & 1;
5214 seg_not_present = (ldt_info.flags >> 5) & 1;
5215 useable = (ldt_info.flags >> 6) & 1;
5216 #ifdef TARGET_ABI32
5217 lm = 0;
5218 #else
5219 lm = (ldt_info.flags >> 7) & 1;
5220 #endif
5222 if (contents == 3) {
5223 if (seg_not_present == 0)
5224 return -TARGET_EINVAL;
5227 /* NOTE: same code as Linux kernel */
5228 /* Allow LDTs to be cleared by the user. */
5229 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5230 if ((contents == 0 &&
5231 read_exec_only == 1 &&
5232 seg_32bit == 0 &&
5233 limit_in_pages == 0 &&
5234 seg_not_present == 1 &&
5235 useable == 0 )) {
5236 entry_1 = 0;
5237 entry_2 = 0;
5238 goto install;
5242 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5243 (ldt_info.limit & 0x0ffff);
5244 entry_2 = (ldt_info.base_addr & 0xff000000) |
5245 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5246 (ldt_info.limit & 0xf0000) |
5247 ((read_exec_only ^ 1) << 9) |
5248 (contents << 10) |
5249 ((seg_not_present ^ 1) << 15) |
5250 (seg_32bit << 22) |
5251 (limit_in_pages << 23) |
5252 (useable << 20) |
5253 (lm << 21) |
5254 0x7000;
5256 /* Install the new entry ... */
5257 install:
5258 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5259 lp[0] = tswap32(entry_1);
5260 lp[1] = tswap32(entry_2);
5261 return 0;
5264 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5266 struct target_modify_ldt_ldt_s *target_ldt_info;
5267 uint64_t *gdt_table = g2h(env->gdt.base);
5268 uint32_t base_addr, limit, flags;
5269 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5270 int seg_not_present, useable, lm;
5271 uint32_t *lp, entry_1, entry_2;
5273 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5274 if (!target_ldt_info)
5275 return -TARGET_EFAULT;
5276 idx = tswap32(target_ldt_info->entry_number);
5277 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5278 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5279 unlock_user_struct(target_ldt_info, ptr, 1);
5280 return -TARGET_EINVAL;
5282 lp = (uint32_t *)(gdt_table + idx);
5283 entry_1 = tswap32(lp[0]);
5284 entry_2 = tswap32(lp[1]);
5286 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5287 contents = (entry_2 >> 10) & 3;
5288 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5289 seg_32bit = (entry_2 >> 22) & 1;
5290 limit_in_pages = (entry_2 >> 23) & 1;
5291 useable = (entry_2 >> 20) & 1;
5292 #ifdef TARGET_ABI32
5293 lm = 0;
5294 #else
5295 lm = (entry_2 >> 21) & 1;
5296 #endif
5297 flags = (seg_32bit << 0) | (contents << 1) |
5298 (read_exec_only << 3) | (limit_in_pages << 4) |
5299 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5300 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5301 base_addr = (entry_1 >> 16) |
5302 (entry_2 & 0xff000000) |
5303 ((entry_2 & 0xff) << 16);
5304 target_ldt_info->base_addr = tswapal(base_addr);
5305 target_ldt_info->limit = tswap32(limit);
5306 target_ldt_info->flags = tswap32(flags);
5307 unlock_user_struct(target_ldt_info, ptr, 1);
5308 return 0;
5310 #endif /* TARGET_I386 && TARGET_ABI32 */
5312 #ifndef TARGET_ABI32
5313 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5315 abi_long ret = 0;
5316 abi_ulong val;
5317 int idx;
5319 switch(code) {
5320 case TARGET_ARCH_SET_GS:
5321 case TARGET_ARCH_SET_FS:
5322 if (code == TARGET_ARCH_SET_GS)
5323 idx = R_GS;
5324 else
5325 idx = R_FS;
5326 cpu_x86_load_seg(env, idx, 0);
5327 env->segs[idx].base = addr;
5328 break;
5329 case TARGET_ARCH_GET_GS:
5330 case TARGET_ARCH_GET_FS:
5331 if (code == TARGET_ARCH_GET_GS)
5332 idx = R_GS;
5333 else
5334 idx = R_FS;
5335 val = env->segs[idx].base;
5336 if (put_user(val, addr, abi_ulong))
5337 ret = -TARGET_EFAULT;
5338 break;
5339 default:
5340 ret = -TARGET_EINVAL;
5341 break;
5343 return ret;
5345 #endif
5347 #endif /* defined(TARGET_I386) */
5349 #define NEW_STACK_SIZE 0x40000
5352 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5353 typedef struct {
5354 CPUArchState *env;
5355 pthread_mutex_t mutex;
5356 pthread_cond_t cond;
5357 pthread_t thread;
5358 uint32_t tid;
5359 abi_ulong child_tidptr;
5360 abi_ulong parent_tidptr;
5361 sigset_t sigmask;
5362 } new_thread_info;
5364 static void *clone_func(void *arg)
5366 new_thread_info *info = arg;
5367 CPUArchState *env;
5368 CPUState *cpu;
5369 TaskState *ts;
5371 rcu_register_thread();
5372 env = info->env;
5373 cpu = ENV_GET_CPU(env);
5374 thread_cpu = cpu;
5375 ts = (TaskState *)cpu->opaque;
5376 info->tid = gettid();
5377 cpu->host_tid = info->tid;
5378 task_settid(ts);
5379 if (info->child_tidptr)
5380 put_user_u32(info->tid, info->child_tidptr);
5381 if (info->parent_tidptr)
5382 put_user_u32(info->tid, info->parent_tidptr);
5383 /* Enable signals. */
5384 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5385 /* Signal to the parent that we're ready. */
5386 pthread_mutex_lock(&info->mutex);
5387 pthread_cond_broadcast(&info->cond);
5388 pthread_mutex_unlock(&info->mutex);
5389 /* Wait until the parent has finshed initializing the tls state. */
5390 pthread_mutex_lock(&clone_lock);
5391 pthread_mutex_unlock(&clone_lock);
5392 cpu_loop(env);
5393 /* never exits */
5394 return NULL;
5397 /* do_fork() Must return host values and target errnos (unlike most
5398 do_*() functions). */
5399 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5400 abi_ulong parent_tidptr, target_ulong newtls,
5401 abi_ulong child_tidptr)
5403 CPUState *cpu = ENV_GET_CPU(env);
5404 int ret;
5405 TaskState *ts;
5406 CPUState *new_cpu;
5407 CPUArchState *new_env;
5408 unsigned int nptl_flags;
5409 sigset_t sigmask;
5411 /* Emulate vfork() with fork() */
5412 if (flags & CLONE_VFORK)
5413 flags &= ~(CLONE_VFORK | CLONE_VM);
5415 if (flags & CLONE_VM) {
5416 TaskState *parent_ts = (TaskState *)cpu->opaque;
5417 new_thread_info info;
5418 pthread_attr_t attr;
5420 ts = g_new0(TaskState, 1);
5421 init_task_state(ts);
5422 /* we create a new CPU instance. */
5423 new_env = cpu_copy(env);
5424 /* Init regs that differ from the parent. */
5425 cpu_clone_regs(new_env, newsp);
5426 new_cpu = ENV_GET_CPU(new_env);
5427 new_cpu->opaque = ts;
5428 ts->bprm = parent_ts->bprm;
5429 ts->info = parent_ts->info;
5430 ts->signal_mask = parent_ts->signal_mask;
5431 nptl_flags = flags;
5432 flags &= ~CLONE_NPTL_FLAGS2;
5434 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5435 ts->child_tidptr = child_tidptr;
5438 if (nptl_flags & CLONE_SETTLS)
5439 cpu_set_tls (new_env, newtls);
5441 /* Grab a mutex so that thread setup appears atomic. */
5442 pthread_mutex_lock(&clone_lock);
5444 memset(&info, 0, sizeof(info));
5445 pthread_mutex_init(&info.mutex, NULL);
5446 pthread_mutex_lock(&info.mutex);
5447 pthread_cond_init(&info.cond, NULL);
5448 info.env = new_env;
5449 if (nptl_flags & CLONE_CHILD_SETTID)
5450 info.child_tidptr = child_tidptr;
5451 if (nptl_flags & CLONE_PARENT_SETTID)
5452 info.parent_tidptr = parent_tidptr;
5454 ret = pthread_attr_init(&attr);
5455 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5456 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5457 /* It is not safe to deliver signals until the child has finished
5458 initializing, so temporarily block all signals. */
5459 sigfillset(&sigmask);
5460 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5462 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5463 /* TODO: Free new CPU state if thread creation failed. */
5465 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5466 pthread_attr_destroy(&attr);
5467 if (ret == 0) {
5468 /* Wait for the child to initialize. */
5469 pthread_cond_wait(&info.cond, &info.mutex);
5470 ret = info.tid;
5471 if (flags & CLONE_PARENT_SETTID)
5472 put_user_u32(ret, parent_tidptr);
5473 } else {
5474 ret = -1;
5476 pthread_mutex_unlock(&info.mutex);
5477 pthread_cond_destroy(&info.cond);
5478 pthread_mutex_destroy(&info.mutex);
5479 pthread_mutex_unlock(&clone_lock);
5480 } else {
5481 /* if no CLONE_VM, we consider it is a fork */
5482 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5483 return -TARGET_EINVAL;
5486 if (block_signals()) {
5487 return -TARGET_ERESTARTSYS;
5490 fork_start();
5491 ret = fork();
5492 if (ret == 0) {
5493 /* Child Process. */
5494 rcu_after_fork();
5495 cpu_clone_regs(env, newsp);
5496 fork_end(1);
5497 /* There is a race condition here. The parent process could
5498 theoretically read the TID in the child process before the child
5499 tid is set. This would require using either ptrace
5500 (not implemented) or having *_tidptr to point at a shared memory
5501 mapping. We can't repeat the spinlock hack used above because
5502 the child process gets its own copy of the lock. */
5503 if (flags & CLONE_CHILD_SETTID)
5504 put_user_u32(gettid(), child_tidptr);
5505 if (flags & CLONE_PARENT_SETTID)
5506 put_user_u32(gettid(), parent_tidptr);
5507 ts = (TaskState *)cpu->opaque;
5508 if (flags & CLONE_SETTLS)
5509 cpu_set_tls (env, newtls);
5510 if (flags & CLONE_CHILD_CLEARTID)
5511 ts->child_tidptr = child_tidptr;
5512 } else {
5513 fork_end(0);
5516 return ret;
5519 /* warning : doesn't handle linux specific flags... */
5520 static int target_to_host_fcntl_cmd(int cmd)
5522 switch(cmd) {
5523 case TARGET_F_DUPFD:
5524 case TARGET_F_GETFD:
5525 case TARGET_F_SETFD:
5526 case TARGET_F_GETFL:
5527 case TARGET_F_SETFL:
5528 return cmd;
5529 case TARGET_F_GETLK:
5530 return F_GETLK;
5531 case TARGET_F_SETLK:
5532 return F_SETLK;
5533 case TARGET_F_SETLKW:
5534 return F_SETLKW;
5535 case TARGET_F_GETOWN:
5536 return F_GETOWN;
5537 case TARGET_F_SETOWN:
5538 return F_SETOWN;
5539 case TARGET_F_GETSIG:
5540 return F_GETSIG;
5541 case TARGET_F_SETSIG:
5542 return F_SETSIG;
5543 #if TARGET_ABI_BITS == 32
5544 case TARGET_F_GETLK64:
5545 return F_GETLK64;
5546 case TARGET_F_SETLK64:
5547 return F_SETLK64;
5548 case TARGET_F_SETLKW64:
5549 return F_SETLKW64;
5550 #endif
5551 case TARGET_F_SETLEASE:
5552 return F_SETLEASE;
5553 case TARGET_F_GETLEASE:
5554 return F_GETLEASE;
5555 #ifdef F_DUPFD_CLOEXEC
5556 case TARGET_F_DUPFD_CLOEXEC:
5557 return F_DUPFD_CLOEXEC;
5558 #endif
5559 case TARGET_F_NOTIFY:
5560 return F_NOTIFY;
5561 #ifdef F_GETOWN_EX
5562 case TARGET_F_GETOWN_EX:
5563 return F_GETOWN_EX;
5564 #endif
5565 #ifdef F_SETOWN_EX
5566 case TARGET_F_SETOWN_EX:
5567 return F_SETOWN_EX;
5568 #endif
5569 default:
5570 return -TARGET_EINVAL;
5572 return -TARGET_EINVAL;
5575 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5576 static const bitmask_transtbl flock_tbl[] = {
5577 TRANSTBL_CONVERT(F_RDLCK),
5578 TRANSTBL_CONVERT(F_WRLCK),
5579 TRANSTBL_CONVERT(F_UNLCK),
5580 TRANSTBL_CONVERT(F_EXLCK),
5581 TRANSTBL_CONVERT(F_SHLCK),
5582 { 0, 0, 0, 0 }
5585 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5587 struct flock fl;
5588 struct target_flock *target_fl;
5589 struct flock64 fl64;
5590 struct target_flock64 *target_fl64;
5591 #ifdef F_GETOWN_EX
5592 struct f_owner_ex fox;
5593 struct target_f_owner_ex *target_fox;
5594 #endif
5595 abi_long ret;
5596 int host_cmd = target_to_host_fcntl_cmd(cmd);
5598 if (host_cmd == -TARGET_EINVAL)
5599 return host_cmd;
5601 switch(cmd) {
5602 case TARGET_F_GETLK:
5603 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5604 return -TARGET_EFAULT;
5605 fl.l_type =
5606 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5607 fl.l_whence = tswap16(target_fl->l_whence);
5608 fl.l_start = tswapal(target_fl->l_start);
5609 fl.l_len = tswapal(target_fl->l_len);
5610 fl.l_pid = tswap32(target_fl->l_pid);
5611 unlock_user_struct(target_fl, arg, 0);
5612 ret = get_errno(fcntl(fd, host_cmd, &fl));
5613 if (ret == 0) {
5614 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
5615 return -TARGET_EFAULT;
5616 target_fl->l_type =
5617 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
5618 target_fl->l_whence = tswap16(fl.l_whence);
5619 target_fl->l_start = tswapal(fl.l_start);
5620 target_fl->l_len = tswapal(fl.l_len);
5621 target_fl->l_pid = tswap32(fl.l_pid);
5622 unlock_user_struct(target_fl, arg, 1);
5624 break;
5626 case TARGET_F_SETLK:
5627 case TARGET_F_SETLKW:
5628 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5629 return -TARGET_EFAULT;
5630 fl.l_type =
5631 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5632 fl.l_whence = tswap16(target_fl->l_whence);
5633 fl.l_start = tswapal(target_fl->l_start);
5634 fl.l_len = tswapal(target_fl->l_len);
5635 fl.l_pid = tswap32(target_fl->l_pid);
5636 unlock_user_struct(target_fl, arg, 0);
5637 ret = get_errno(fcntl(fd, host_cmd, &fl));
5638 break;
5640 case TARGET_F_GETLK64:
5641 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5642 return -TARGET_EFAULT;
5643 fl64.l_type =
5644 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5645 fl64.l_whence = tswap16(target_fl64->l_whence);
5646 fl64.l_start = tswap64(target_fl64->l_start);
5647 fl64.l_len = tswap64(target_fl64->l_len);
5648 fl64.l_pid = tswap32(target_fl64->l_pid);
5649 unlock_user_struct(target_fl64, arg, 0);
5650 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5651 if (ret == 0) {
5652 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
5653 return -TARGET_EFAULT;
5654 target_fl64->l_type =
5655 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
5656 target_fl64->l_whence = tswap16(fl64.l_whence);
5657 target_fl64->l_start = tswap64(fl64.l_start);
5658 target_fl64->l_len = tswap64(fl64.l_len);
5659 target_fl64->l_pid = tswap32(fl64.l_pid);
5660 unlock_user_struct(target_fl64, arg, 1);
5662 break;
5663 case TARGET_F_SETLK64:
5664 case TARGET_F_SETLKW64:
5665 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5666 return -TARGET_EFAULT;
5667 fl64.l_type =
5668 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5669 fl64.l_whence = tswap16(target_fl64->l_whence);
5670 fl64.l_start = tswap64(target_fl64->l_start);
5671 fl64.l_len = tswap64(target_fl64->l_len);
5672 fl64.l_pid = tswap32(target_fl64->l_pid);
5673 unlock_user_struct(target_fl64, arg, 0);
5674 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5675 break;
5677 case TARGET_F_GETFL:
5678 ret = get_errno(fcntl(fd, host_cmd, arg));
5679 if (ret >= 0) {
5680 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5682 break;
5684 case TARGET_F_SETFL:
5685 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
5686 break;
5688 #ifdef F_GETOWN_EX
5689 case TARGET_F_GETOWN_EX:
5690 ret = get_errno(fcntl(fd, host_cmd, &fox));
5691 if (ret >= 0) {
5692 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5693 return -TARGET_EFAULT;
5694 target_fox->type = tswap32(fox.type);
5695 target_fox->pid = tswap32(fox.pid);
5696 unlock_user_struct(target_fox, arg, 1);
5698 break;
5699 #endif
5701 #ifdef F_SETOWN_EX
5702 case TARGET_F_SETOWN_EX:
5703 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5704 return -TARGET_EFAULT;
5705 fox.type = tswap32(target_fox->type);
5706 fox.pid = tswap32(target_fox->pid);
5707 unlock_user_struct(target_fox, arg, 0);
5708 ret = get_errno(fcntl(fd, host_cmd, &fox));
5709 break;
5710 #endif
5712 case TARGET_F_SETOWN:
5713 case TARGET_F_GETOWN:
5714 case TARGET_F_SETSIG:
5715 case TARGET_F_GETSIG:
5716 case TARGET_F_SETLEASE:
5717 case TARGET_F_GETLEASE:
5718 ret = get_errno(fcntl(fd, host_cmd, arg));
5719 break;
5721 default:
5722 ret = get_errno(fcntl(fd, cmd, arg));
5723 break;
5725 return ret;
5728 #ifdef USE_UID16
5730 static inline int high2lowuid(int uid)
5732 if (uid > 65535)
5733 return 65534;
5734 else
5735 return uid;
5738 static inline int high2lowgid(int gid)
5740 if (gid > 65535)
5741 return 65534;
5742 else
5743 return gid;
5746 static inline int low2highuid(int uid)
5748 if ((int16_t)uid == -1)
5749 return -1;
5750 else
5751 return uid;
5754 static inline int low2highgid(int gid)
5756 if ((int16_t)gid == -1)
5757 return -1;
5758 else
5759 return gid;
5761 static inline int tswapid(int id)
5763 return tswap16(id);
5766 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5768 #else /* !USE_UID16 */
5769 static inline int high2lowuid(int uid)
5771 return uid;
5773 static inline int high2lowgid(int gid)
5775 return gid;
5777 static inline int low2highuid(int uid)
5779 return uid;
5781 static inline int low2highgid(int gid)
5783 return gid;
5785 static inline int tswapid(int id)
5787 return tswap32(id);
5790 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5792 #endif /* USE_UID16 */
5794 /* We must do direct syscalls for setting UID/GID, because we want to
5795 * implement the Linux system call semantics of "change only for this thread",
5796 * not the libc/POSIX semantics of "change for all threads in process".
5797 * (See http://ewontfix.com/17/ for more details.)
5798 * We use the 32-bit version of the syscalls if present; if it is not
5799 * then either the host architecture supports 32-bit UIDs natively with
5800 * the standard syscall, or the 16-bit UID is the best we can do.
5802 #ifdef __NR_setuid32
5803 #define __NR_sys_setuid __NR_setuid32
5804 #else
5805 #define __NR_sys_setuid __NR_setuid
5806 #endif
5807 #ifdef __NR_setgid32
5808 #define __NR_sys_setgid __NR_setgid32
5809 #else
5810 #define __NR_sys_setgid __NR_setgid
5811 #endif
5812 #ifdef __NR_setresuid32
5813 #define __NR_sys_setresuid __NR_setresuid32
5814 #else
5815 #define __NR_sys_setresuid __NR_setresuid
5816 #endif
5817 #ifdef __NR_setresgid32
5818 #define __NR_sys_setresgid __NR_setresgid32
5819 #else
5820 #define __NR_sys_setresgid __NR_setresgid
5821 #endif
5823 _syscall1(int, sys_setuid, uid_t, uid)
5824 _syscall1(int, sys_setgid, gid_t, gid)
5825 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5826 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5828 void syscall_init(void)
5830 IOCTLEntry *ie;
5831 const argtype *arg_type;
5832 int size;
5833 int i;
5835 thunk_init(STRUCT_MAX);
5837 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5838 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5839 #include "syscall_types.h"
5840 #undef STRUCT
5841 #undef STRUCT_SPECIAL
5843 /* Build target_to_host_errno_table[] table from
5844 * host_to_target_errno_table[]. */
5845 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5846 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5849 /* we patch the ioctl size if necessary. We rely on the fact that
5850 no ioctl has all the bits at '1' in the size field */
5851 ie = ioctl_entries;
5852 while (ie->target_cmd != 0) {
5853 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5854 TARGET_IOC_SIZEMASK) {
5855 arg_type = ie->arg_type;
5856 if (arg_type[0] != TYPE_PTR) {
5857 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5858 ie->target_cmd);
5859 exit(1);
5861 arg_type++;
5862 size = thunk_type_size(arg_type, 0);
5863 ie->target_cmd = (ie->target_cmd &
5864 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5865 (size << TARGET_IOC_SIZESHIFT);
5868 /* automatic consistency check if same arch */
5869 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5870 (defined(__x86_64__) && defined(TARGET_X86_64))
5871 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5872 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5873 ie->name, ie->target_cmd, ie->host_cmd);
5875 #endif
5876 ie++;
5880 #if TARGET_ABI_BITS == 32
5881 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5883 #ifdef TARGET_WORDS_BIGENDIAN
5884 return ((uint64_t)word0 << 32) | word1;
5885 #else
5886 return ((uint64_t)word1 << 32) | word0;
5887 #endif
5889 #else /* TARGET_ABI_BITS == 32 */
5890 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5892 return word0;
5894 #endif /* TARGET_ABI_BITS != 32 */
5896 #ifdef TARGET_NR_truncate64
5897 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5898 abi_long arg2,
5899 abi_long arg3,
5900 abi_long arg4)
5902 if (regpairs_aligned(cpu_env)) {
5903 arg2 = arg3;
5904 arg3 = arg4;
5906 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5908 #endif
5910 #ifdef TARGET_NR_ftruncate64
5911 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5912 abi_long arg2,
5913 abi_long arg3,
5914 abi_long arg4)
5916 if (regpairs_aligned(cpu_env)) {
5917 arg2 = arg3;
5918 arg3 = arg4;
5920 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5922 #endif
5924 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5925 abi_ulong target_addr)
5927 struct target_timespec *target_ts;
5929 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5930 return -TARGET_EFAULT;
5931 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5932 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5933 unlock_user_struct(target_ts, target_addr, 0);
5934 return 0;
5937 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5938 struct timespec *host_ts)
5940 struct target_timespec *target_ts;
5942 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5943 return -TARGET_EFAULT;
5944 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5945 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5946 unlock_user_struct(target_ts, target_addr, 1);
5947 return 0;
5950 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5951 abi_ulong target_addr)
5953 struct target_itimerspec *target_itspec;
5955 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5956 return -TARGET_EFAULT;
5959 host_itspec->it_interval.tv_sec =
5960 tswapal(target_itspec->it_interval.tv_sec);
5961 host_itspec->it_interval.tv_nsec =
5962 tswapal(target_itspec->it_interval.tv_nsec);
5963 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5964 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5966 unlock_user_struct(target_itspec, target_addr, 1);
5967 return 0;
5970 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5971 struct itimerspec *host_its)
5973 struct target_itimerspec *target_itspec;
5975 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5976 return -TARGET_EFAULT;
5979 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5980 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5982 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5983 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5985 unlock_user_struct(target_itspec, target_addr, 0);
5986 return 0;
5989 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5990 abi_ulong target_addr)
5992 struct target_sigevent *target_sevp;
5994 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5995 return -TARGET_EFAULT;
5998 /* This union is awkward on 64 bit systems because it has a 32 bit
5999 * integer and a pointer in it; we follow the conversion approach
6000 * used for handling sigval types in signal.c so the guest should get
6001 * the correct value back even if we did a 64 bit byteswap and it's
6002 * using the 32 bit integer.
6004 host_sevp->sigev_value.sival_ptr =
6005 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6006 host_sevp->sigev_signo =
6007 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6008 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6009 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6011 unlock_user_struct(target_sevp, target_addr, 1);
6012 return 0;
6015 #if defined(TARGET_NR_mlockall)
6016 static inline int target_to_host_mlockall_arg(int arg)
6018 int result = 0;
6020 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6021 result |= MCL_CURRENT;
6023 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6024 result |= MCL_FUTURE;
6026 return result;
6028 #endif
6030 static inline abi_long host_to_target_stat64(void *cpu_env,
6031 abi_ulong target_addr,
6032 struct stat *host_st)
6034 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6035 if (((CPUARMState *)cpu_env)->eabi) {
6036 struct target_eabi_stat64 *target_st;
6038 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6039 return -TARGET_EFAULT;
6040 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6041 __put_user(host_st->st_dev, &target_st->st_dev);
6042 __put_user(host_st->st_ino, &target_st->st_ino);
6043 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6044 __put_user(host_st->st_ino, &target_st->__st_ino);
6045 #endif
6046 __put_user(host_st->st_mode, &target_st->st_mode);
6047 __put_user(host_st->st_nlink, &target_st->st_nlink);
6048 __put_user(host_st->st_uid, &target_st->st_uid);
6049 __put_user(host_st->st_gid, &target_st->st_gid);
6050 __put_user(host_st->st_rdev, &target_st->st_rdev);
6051 __put_user(host_st->st_size, &target_st->st_size);
6052 __put_user(host_st->st_blksize, &target_st->st_blksize);
6053 __put_user(host_st->st_blocks, &target_st->st_blocks);
6054 __put_user(host_st->st_atime, &target_st->target_st_atime);
6055 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6056 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6057 unlock_user_struct(target_st, target_addr, 1);
6058 } else
6059 #endif
6061 #if defined(TARGET_HAS_STRUCT_STAT64)
6062 struct target_stat64 *target_st;
6063 #else
6064 struct target_stat *target_st;
6065 #endif
6067 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6068 return -TARGET_EFAULT;
6069 memset(target_st, 0, sizeof(*target_st));
6070 __put_user(host_st->st_dev, &target_st->st_dev);
6071 __put_user(host_st->st_ino, &target_st->st_ino);
6072 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6073 __put_user(host_st->st_ino, &target_st->__st_ino);
6074 #endif
6075 __put_user(host_st->st_mode, &target_st->st_mode);
6076 __put_user(host_st->st_nlink, &target_st->st_nlink);
6077 __put_user(host_st->st_uid, &target_st->st_uid);
6078 __put_user(host_st->st_gid, &target_st->st_gid);
6079 __put_user(host_st->st_rdev, &target_st->st_rdev);
6080 /* XXX: better use of kernel struct */
6081 __put_user(host_st->st_size, &target_st->st_size);
6082 __put_user(host_st->st_blksize, &target_st->st_blksize);
6083 __put_user(host_st->st_blocks, &target_st->st_blocks);
6084 __put_user(host_st->st_atime, &target_st->target_st_atime);
6085 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6086 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6087 unlock_user_struct(target_st, target_addr, 1);
6090 return 0;
6093 /* ??? Using host futex calls even when target atomic operations
6094 are not really atomic probably breaks things. However implementing
6095 futexes locally would make futexes shared between multiple processes
6096 tricky. However they're probably useless because guest atomic
6097 operations won't work either. */
6098 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6099 target_ulong uaddr2, int val3)
6101 struct timespec ts, *pts;
6102 int base_op;
6104 /* ??? We assume FUTEX_* constants are the same on both host
6105 and target. */
6106 #ifdef FUTEX_CMD_MASK
6107 base_op = op & FUTEX_CMD_MASK;
6108 #else
6109 base_op = op;
6110 #endif
6111 switch (base_op) {
6112 case FUTEX_WAIT:
6113 case FUTEX_WAIT_BITSET:
6114 if (timeout) {
6115 pts = &ts;
6116 target_to_host_timespec(pts, timeout);
6117 } else {
6118 pts = NULL;
6120 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6121 pts, NULL, val3));
6122 case FUTEX_WAKE:
6123 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6124 case FUTEX_FD:
6125 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6126 case FUTEX_REQUEUE:
6127 case FUTEX_CMP_REQUEUE:
6128 case FUTEX_WAKE_OP:
6129 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6130 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6131 But the prototype takes a `struct timespec *'; insert casts
6132 to satisfy the compiler. We do not need to tswap TIMEOUT
6133 since it's not compared to guest memory. */
6134 pts = (struct timespec *)(uintptr_t) timeout;
6135 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6136 g2h(uaddr2),
6137 (base_op == FUTEX_CMP_REQUEUE
6138 ? tswap32(val3)
6139 : val3)));
6140 default:
6141 return -TARGET_ENOSYS;
6144 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6145 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6146 abi_long handle, abi_long mount_id,
6147 abi_long flags)
6149 struct file_handle *target_fh;
6150 struct file_handle *fh;
6151 int mid = 0;
6152 abi_long ret;
6153 char *name;
6154 unsigned int size, total_size;
6156 if (get_user_s32(size, handle)) {
6157 return -TARGET_EFAULT;
6160 name = lock_user_string(pathname);
6161 if (!name) {
6162 return -TARGET_EFAULT;
6165 total_size = sizeof(struct file_handle) + size;
6166 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6167 if (!target_fh) {
6168 unlock_user(name, pathname, 0);
6169 return -TARGET_EFAULT;
6172 fh = g_malloc0(total_size);
6173 fh->handle_bytes = size;
6175 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6176 unlock_user(name, pathname, 0);
6178 /* man name_to_handle_at(2):
6179 * Other than the use of the handle_bytes field, the caller should treat
6180 * the file_handle structure as an opaque data type
6183 memcpy(target_fh, fh, total_size);
6184 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6185 target_fh->handle_type = tswap32(fh->handle_type);
6186 g_free(fh);
6187 unlock_user(target_fh, handle, total_size);
6189 if (put_user_s32(mid, mount_id)) {
6190 return -TARGET_EFAULT;
6193 return ret;
6196 #endif
6198 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6199 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6200 abi_long flags)
6202 struct file_handle *target_fh;
6203 struct file_handle *fh;
6204 unsigned int size, total_size;
6205 abi_long ret;
6207 if (get_user_s32(size, handle)) {
6208 return -TARGET_EFAULT;
6211 total_size = sizeof(struct file_handle) + size;
6212 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6213 if (!target_fh) {
6214 return -TARGET_EFAULT;
6217 fh = g_memdup(target_fh, total_size);
6218 fh->handle_bytes = size;
6219 fh->handle_type = tswap32(target_fh->handle_type);
6221 ret = get_errno(open_by_handle_at(mount_fd, fh,
6222 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6224 g_free(fh);
6226 unlock_user(target_fh, handle, total_size);
6228 return ret;
6230 #endif
6232 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6234 /* signalfd siginfo conversion */
6236 static void
6237 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6238 const struct signalfd_siginfo *info)
6240 int sig = host_to_target_signal(info->ssi_signo);
6242 /* linux/signalfd.h defines a ssi_addr_lsb
6243 * not defined in sys/signalfd.h but used by some kernels
6246 #ifdef BUS_MCEERR_AO
6247 if (tinfo->ssi_signo == SIGBUS &&
6248 (tinfo->ssi_code == BUS_MCEERR_AR ||
6249 tinfo->ssi_code == BUS_MCEERR_AO)) {
6250 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6251 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6252 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6254 #endif
6256 tinfo->ssi_signo = tswap32(sig);
6257 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6258 tinfo->ssi_code = tswap32(info->ssi_code);
6259 tinfo->ssi_pid = tswap32(info->ssi_pid);
6260 tinfo->ssi_uid = tswap32(info->ssi_uid);
6261 tinfo->ssi_fd = tswap32(info->ssi_fd);
6262 tinfo->ssi_tid = tswap32(info->ssi_tid);
6263 tinfo->ssi_band = tswap32(info->ssi_band);
6264 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6265 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6266 tinfo->ssi_status = tswap32(info->ssi_status);
6267 tinfo->ssi_int = tswap32(info->ssi_int);
6268 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6269 tinfo->ssi_utime = tswap64(info->ssi_utime);
6270 tinfo->ssi_stime = tswap64(info->ssi_stime);
6271 tinfo->ssi_addr = tswap64(info->ssi_addr);
6274 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6276 int i;
6278 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6279 host_to_target_signalfd_siginfo(buf + i, buf + i);
6282 return len;
6285 static TargetFdTrans target_signalfd_trans = {
6286 .host_to_target_data = host_to_target_data_signalfd,
6289 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6291 int host_flags;
6292 target_sigset_t *target_mask;
6293 sigset_t host_mask;
6294 abi_long ret;
6296 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6297 return -TARGET_EINVAL;
6299 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6300 return -TARGET_EFAULT;
6303 target_to_host_sigset(&host_mask, target_mask);
6305 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6307 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6308 if (ret >= 0) {
6309 fd_trans_register(ret, &target_signalfd_trans);
6312 unlock_user_struct(target_mask, mask, 0);
6314 return ret;
6316 #endif
6318 /* Map host to target signal numbers for the wait family of syscalls.
6319 Assume all other status bits are the same. */
6320 int host_to_target_waitstatus(int status)
6322 if (WIFSIGNALED(status)) {
6323 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6325 if (WIFSTOPPED(status)) {
6326 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6327 | (status & 0xff);
6329 return status;
6332 static int open_self_cmdline(void *cpu_env, int fd)
6334 int fd_orig = -1;
6335 bool word_skipped = false;
6337 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6338 if (fd_orig < 0) {
6339 return fd_orig;
6342 while (true) {
6343 ssize_t nb_read;
6344 char buf[128];
6345 char *cp_buf = buf;
6347 nb_read = read(fd_orig, buf, sizeof(buf));
6348 if (nb_read < 0) {
6349 int e = errno;
6350 fd_orig = close(fd_orig);
6351 errno = e;
6352 return -1;
6353 } else if (nb_read == 0) {
6354 break;
6357 if (!word_skipped) {
6358 /* Skip the first string, which is the path to qemu-*-static
6359 instead of the actual command. */
6360 cp_buf = memchr(buf, 0, sizeof(buf));
6361 if (cp_buf) {
6362 /* Null byte found, skip one string */
6363 cp_buf++;
6364 nb_read -= cp_buf - buf;
6365 word_skipped = true;
6369 if (word_skipped) {
6370 if (write(fd, cp_buf, nb_read) != nb_read) {
6371 int e = errno;
6372 close(fd_orig);
6373 errno = e;
6374 return -1;
6379 return close(fd_orig);
6382 static int open_self_maps(void *cpu_env, int fd)
6384 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6385 TaskState *ts = cpu->opaque;
6386 FILE *fp;
6387 char *line = NULL;
6388 size_t len = 0;
6389 ssize_t read;
6391 fp = fopen("/proc/self/maps", "r");
6392 if (fp == NULL) {
6393 return -1;
6396 while ((read = getline(&line, &len, fp)) != -1) {
6397 int fields, dev_maj, dev_min, inode;
6398 uint64_t min, max, offset;
6399 char flag_r, flag_w, flag_x, flag_p;
6400 char path[512] = "";
6401 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6402 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6403 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6405 if ((fields < 10) || (fields > 11)) {
6406 continue;
6408 if (h2g_valid(min)) {
6409 int flags = page_get_flags(h2g(min));
6410 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6411 if (page_check_range(h2g(min), max - min, flags) == -1) {
6412 continue;
6414 if (h2g(min) == ts->info->stack_limit) {
6415 pstrcpy(path, sizeof(path), " [stack]");
6417 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6418 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6419 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6420 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6421 path[0] ? " " : "", path);
6425 free(line);
6426 fclose(fp);
6428 return 0;
6431 static int open_self_stat(void *cpu_env, int fd)
6433 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6434 TaskState *ts = cpu->opaque;
6435 abi_ulong start_stack = ts->info->start_stack;
6436 int i;
6438 for (i = 0; i < 44; i++) {
6439 char buf[128];
6440 int len;
6441 uint64_t val = 0;
6443 if (i == 0) {
6444 /* pid */
6445 val = getpid();
6446 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6447 } else if (i == 1) {
6448 /* app name */
6449 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6450 } else if (i == 27) {
6451 /* stack bottom */
6452 val = start_stack;
6453 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6454 } else {
6455 /* for the rest, there is MasterCard */
6456 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6459 len = strlen(buf);
6460 if (write(fd, buf, len) != len) {
6461 return -1;
6465 return 0;
6468 static int open_self_auxv(void *cpu_env, int fd)
6470 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6471 TaskState *ts = cpu->opaque;
6472 abi_ulong auxv = ts->info->saved_auxv;
6473 abi_ulong len = ts->info->auxv_len;
6474 char *ptr;
6477 * Auxiliary vector is stored in target process stack.
6478 * read in whole auxv vector and copy it to file
6480 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6481 if (ptr != NULL) {
6482 while (len > 0) {
6483 ssize_t r;
6484 r = write(fd, ptr, len);
6485 if (r <= 0) {
6486 break;
6488 len -= r;
6489 ptr += r;
6491 lseek(fd, 0, SEEK_SET);
6492 unlock_user(ptr, auxv, len);
6495 return 0;
6498 static int is_proc_myself(const char *filename, const char *entry)
6500 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6501 filename += strlen("/proc/");
6502 if (!strncmp(filename, "self/", strlen("self/"))) {
6503 filename += strlen("self/");
6504 } else if (*filename >= '1' && *filename <= '9') {
6505 char myself[80];
6506 snprintf(myself, sizeof(myself), "%d/", getpid());
6507 if (!strncmp(filename, myself, strlen(myself))) {
6508 filename += strlen(myself);
6509 } else {
6510 return 0;
6512 } else {
6513 return 0;
6515 if (!strcmp(filename, entry)) {
6516 return 1;
6519 return 0;
6522 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6523 static int is_proc(const char *filename, const char *entry)
6525 return strcmp(filename, entry) == 0;
6528 static int open_net_route(void *cpu_env, int fd)
6530 FILE *fp;
6531 char *line = NULL;
6532 size_t len = 0;
6533 ssize_t read;
6535 fp = fopen("/proc/net/route", "r");
6536 if (fp == NULL) {
6537 return -1;
6540 /* read header */
6542 read = getline(&line, &len, fp);
6543 dprintf(fd, "%s", line);
6545 /* read routes */
6547 while ((read = getline(&line, &len, fp)) != -1) {
6548 char iface[16];
6549 uint32_t dest, gw, mask;
6550 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6551 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6552 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6553 &mask, &mtu, &window, &irtt);
6554 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6555 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6556 metric, tswap32(mask), mtu, window, irtt);
6559 free(line);
6560 fclose(fp);
6562 return 0;
6564 #endif
6566 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6568 struct fake_open {
6569 const char *filename;
6570 int (*fill)(void *cpu_env, int fd);
6571 int (*cmp)(const char *s1, const char *s2);
6573 const struct fake_open *fake_open;
6574 static const struct fake_open fakes[] = {
6575 { "maps", open_self_maps, is_proc_myself },
6576 { "stat", open_self_stat, is_proc_myself },
6577 { "auxv", open_self_auxv, is_proc_myself },
6578 { "cmdline", open_self_cmdline, is_proc_myself },
6579 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6580 { "/proc/net/route", open_net_route, is_proc },
6581 #endif
6582 { NULL, NULL, NULL }
6585 if (is_proc_myself(pathname, "exe")) {
6586 int execfd = qemu_getauxval(AT_EXECFD);
6587 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6590 for (fake_open = fakes; fake_open->filename; fake_open++) {
6591 if (fake_open->cmp(pathname, fake_open->filename)) {
6592 break;
6596 if (fake_open->filename) {
6597 const char *tmpdir;
6598 char filename[PATH_MAX];
6599 int fd, r;
6601 /* create temporary file to map stat to */
6602 tmpdir = getenv("TMPDIR");
6603 if (!tmpdir)
6604 tmpdir = "/tmp";
6605 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6606 fd = mkstemp(filename);
6607 if (fd < 0) {
6608 return fd;
6610 unlink(filename);
6612 if ((r = fake_open->fill(cpu_env, fd))) {
6613 int e = errno;
6614 close(fd);
6615 errno = e;
6616 return r;
6618 lseek(fd, 0, SEEK_SET);
6620 return fd;
6623 return safe_openat(dirfd, path(pathname), flags, mode);
6626 #define TIMER_MAGIC 0x0caf0000
6627 #define TIMER_MAGIC_MASK 0xffff0000
6629 /* Convert QEMU provided timer ID back to internal 16bit index format */
6630 static target_timer_t get_timer_id(abi_long arg)
6632 target_timer_t timerid = arg;
6634 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6635 return -TARGET_EINVAL;
6638 timerid &= 0xffff;
6640 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6641 return -TARGET_EINVAL;
6644 return timerid;
6647 /* do_syscall() should always have a single exit point at the end so
6648 that actions, such as logging of syscall results, can be performed.
6649 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6650 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6651 abi_long arg2, abi_long arg3, abi_long arg4,
6652 abi_long arg5, abi_long arg6, abi_long arg7,
6653 abi_long arg8)
6655 CPUState *cpu = ENV_GET_CPU(cpu_env);
6656 abi_long ret;
6657 struct stat st;
6658 struct statfs stfs;
6659 void *p;
6661 #if defined(DEBUG_ERESTARTSYS)
6662 /* Debug-only code for exercising the syscall-restart code paths
6663 * in the per-architecture cpu main loops: restart every syscall
6664 * the guest makes once before letting it through.
6667 static int flag;
6669 flag = !flag;
6670 if (flag) {
6671 return -TARGET_ERESTARTSYS;
6674 #endif
6676 #ifdef DEBUG
6677 gemu_log("syscall %d", num);
6678 #endif
6679 if(do_strace)
6680 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6682 switch(num) {
6683 case TARGET_NR_exit:
6684 /* In old applications this may be used to implement _exit(2).
6685 However in threaded applictions it is used for thread termination,
6686 and _exit_group is used for application termination.
6687 Do thread termination if we have more then one thread. */
6689 if (block_signals()) {
6690 ret = -TARGET_ERESTARTSYS;
6691 break;
6694 if (CPU_NEXT(first_cpu)) {
6695 TaskState *ts;
6697 cpu_list_lock();
6698 /* Remove the CPU from the list. */
6699 QTAILQ_REMOVE(&cpus, cpu, node);
6700 cpu_list_unlock();
6701 ts = cpu->opaque;
6702 if (ts->child_tidptr) {
6703 put_user_u32(0, ts->child_tidptr);
6704 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6705 NULL, NULL, 0);
6707 thread_cpu = NULL;
6708 object_unref(OBJECT(cpu));
6709 g_free(ts);
6710 rcu_unregister_thread();
6711 pthread_exit(NULL);
6713 #ifdef TARGET_GPROF
6714 _mcleanup();
6715 #endif
6716 gdb_exit(cpu_env, arg1);
6717 _exit(arg1);
6718 ret = 0; /* avoid warning */
6719 break;
6720 case TARGET_NR_read:
6721 if (arg3 == 0)
6722 ret = 0;
6723 else {
6724 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6725 goto efault;
6726 ret = get_errno(safe_read(arg1, p, arg3));
6727 if (ret >= 0 &&
6728 fd_trans_host_to_target_data(arg1)) {
6729 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6731 unlock_user(p, arg2, ret);
6733 break;
6734 case TARGET_NR_write:
6735 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6736 goto efault;
6737 ret = get_errno(safe_write(arg1, p, arg3));
6738 unlock_user(p, arg2, 0);
6739 break;
6740 #ifdef TARGET_NR_open
6741 case TARGET_NR_open:
6742 if (!(p = lock_user_string(arg1)))
6743 goto efault;
6744 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6745 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6746 arg3));
6747 fd_trans_unregister(ret);
6748 unlock_user(p, arg1, 0);
6749 break;
6750 #endif
6751 case TARGET_NR_openat:
6752 if (!(p = lock_user_string(arg2)))
6753 goto efault;
6754 ret = get_errno(do_openat(cpu_env, arg1, p,
6755 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6756 arg4));
6757 fd_trans_unregister(ret);
6758 unlock_user(p, arg2, 0);
6759 break;
6760 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6761 case TARGET_NR_name_to_handle_at:
6762 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6763 break;
6764 #endif
6765 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6766 case TARGET_NR_open_by_handle_at:
6767 ret = do_open_by_handle_at(arg1, arg2, arg3);
6768 fd_trans_unregister(ret);
6769 break;
6770 #endif
6771 case TARGET_NR_close:
6772 fd_trans_unregister(arg1);
6773 ret = get_errno(close(arg1));
6774 break;
6775 case TARGET_NR_brk:
6776 ret = do_brk(arg1);
6777 break;
6778 #ifdef TARGET_NR_fork
6779 case TARGET_NR_fork:
6780 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6781 break;
6782 #endif
6783 #ifdef TARGET_NR_waitpid
6784 case TARGET_NR_waitpid:
6786 int status;
6787 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6788 if (!is_error(ret) && arg2 && ret
6789 && put_user_s32(host_to_target_waitstatus(status), arg2))
6790 goto efault;
6792 break;
6793 #endif
6794 #ifdef TARGET_NR_waitid
6795 case TARGET_NR_waitid:
6797 siginfo_t info;
6798 info.si_pid = 0;
6799 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6800 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6801 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6802 goto efault;
6803 host_to_target_siginfo(p, &info);
6804 unlock_user(p, arg3, sizeof(target_siginfo_t));
6807 break;
6808 #endif
6809 #ifdef TARGET_NR_creat /* not on alpha */
6810 case TARGET_NR_creat:
6811 if (!(p = lock_user_string(arg1)))
6812 goto efault;
6813 ret = get_errno(creat(p, arg2));
6814 fd_trans_unregister(ret);
6815 unlock_user(p, arg1, 0);
6816 break;
6817 #endif
6818 #ifdef TARGET_NR_link
6819 case TARGET_NR_link:
6821 void * p2;
6822 p = lock_user_string(arg1);
6823 p2 = lock_user_string(arg2);
6824 if (!p || !p2)
6825 ret = -TARGET_EFAULT;
6826 else
6827 ret = get_errno(link(p, p2));
6828 unlock_user(p2, arg2, 0);
6829 unlock_user(p, arg1, 0);
6831 break;
6832 #endif
6833 #if defined(TARGET_NR_linkat)
6834 case TARGET_NR_linkat:
6836 void * p2 = NULL;
6837 if (!arg2 || !arg4)
6838 goto efault;
6839 p = lock_user_string(arg2);
6840 p2 = lock_user_string(arg4);
6841 if (!p || !p2)
6842 ret = -TARGET_EFAULT;
6843 else
6844 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6845 unlock_user(p, arg2, 0);
6846 unlock_user(p2, arg4, 0);
6848 break;
6849 #endif
6850 #ifdef TARGET_NR_unlink
6851 case TARGET_NR_unlink:
6852 if (!(p = lock_user_string(arg1)))
6853 goto efault;
6854 ret = get_errno(unlink(p));
6855 unlock_user(p, arg1, 0);
6856 break;
6857 #endif
6858 #if defined(TARGET_NR_unlinkat)
6859 case TARGET_NR_unlinkat:
6860 if (!(p = lock_user_string(arg2)))
6861 goto efault;
6862 ret = get_errno(unlinkat(arg1, p, arg3));
6863 unlock_user(p, arg2, 0);
6864 break;
6865 #endif
6866 case TARGET_NR_execve:
6868 char **argp, **envp;
6869 int argc, envc;
6870 abi_ulong gp;
6871 abi_ulong guest_argp;
6872 abi_ulong guest_envp;
6873 abi_ulong addr;
6874 char **q;
6875 int total_size = 0;
6877 argc = 0;
6878 guest_argp = arg2;
6879 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6880 if (get_user_ual(addr, gp))
6881 goto efault;
6882 if (!addr)
6883 break;
6884 argc++;
6886 envc = 0;
6887 guest_envp = arg3;
6888 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6889 if (get_user_ual(addr, gp))
6890 goto efault;
6891 if (!addr)
6892 break;
6893 envc++;
6896 argp = alloca((argc + 1) * sizeof(void *));
6897 envp = alloca((envc + 1) * sizeof(void *));
6899 for (gp = guest_argp, q = argp; gp;
6900 gp += sizeof(abi_ulong), q++) {
6901 if (get_user_ual(addr, gp))
6902 goto execve_efault;
6903 if (!addr)
6904 break;
6905 if (!(*q = lock_user_string(addr)))
6906 goto execve_efault;
6907 total_size += strlen(*q) + 1;
6909 *q = NULL;
6911 for (gp = guest_envp, q = envp; gp;
6912 gp += sizeof(abi_ulong), q++) {
6913 if (get_user_ual(addr, gp))
6914 goto execve_efault;
6915 if (!addr)
6916 break;
6917 if (!(*q = lock_user_string(addr)))
6918 goto execve_efault;
6919 total_size += strlen(*q) + 1;
6921 *q = NULL;
6923 if (!(p = lock_user_string(arg1)))
6924 goto execve_efault;
6925 /* Although execve() is not an interruptible syscall it is
6926 * a special case where we must use the safe_syscall wrapper:
6927 * if we allow a signal to happen before we make the host
6928 * syscall then we will 'lose' it, because at the point of
6929 * execve the process leaves QEMU's control. So we use the
6930 * safe syscall wrapper to ensure that we either take the
6931 * signal as a guest signal, or else it does not happen
6932 * before the execve completes and makes it the other
6933 * program's problem.
6935 ret = get_errno(safe_execve(p, argp, envp));
6936 unlock_user(p, arg1, 0);
6938 goto execve_end;
6940 execve_efault:
6941 ret = -TARGET_EFAULT;
6943 execve_end:
6944 for (gp = guest_argp, q = argp; *q;
6945 gp += sizeof(abi_ulong), q++) {
6946 if (get_user_ual(addr, gp)
6947 || !addr)
6948 break;
6949 unlock_user(*q, addr, 0);
6951 for (gp = guest_envp, q = envp; *q;
6952 gp += sizeof(abi_ulong), q++) {
6953 if (get_user_ual(addr, gp)
6954 || !addr)
6955 break;
6956 unlock_user(*q, addr, 0);
6959 break;
6960 case TARGET_NR_chdir:
6961 if (!(p = lock_user_string(arg1)))
6962 goto efault;
6963 ret = get_errno(chdir(p));
6964 unlock_user(p, arg1, 0);
6965 break;
6966 #ifdef TARGET_NR_time
6967 case TARGET_NR_time:
6969 time_t host_time;
6970 ret = get_errno(time(&host_time));
6971 if (!is_error(ret)
6972 && arg1
6973 && put_user_sal(host_time, arg1))
6974 goto efault;
6976 break;
6977 #endif
6978 #ifdef TARGET_NR_mknod
6979 case TARGET_NR_mknod:
6980 if (!(p = lock_user_string(arg1)))
6981 goto efault;
6982 ret = get_errno(mknod(p, arg2, arg3));
6983 unlock_user(p, arg1, 0);
6984 break;
6985 #endif
6986 #if defined(TARGET_NR_mknodat)
6987 case TARGET_NR_mknodat:
6988 if (!(p = lock_user_string(arg2)))
6989 goto efault;
6990 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6991 unlock_user(p, arg2, 0);
6992 break;
6993 #endif
6994 #ifdef TARGET_NR_chmod
6995 case TARGET_NR_chmod:
6996 if (!(p = lock_user_string(arg1)))
6997 goto efault;
6998 ret = get_errno(chmod(p, arg2));
6999 unlock_user(p, arg1, 0);
7000 break;
7001 #endif
7002 #ifdef TARGET_NR_break
7003 case TARGET_NR_break:
7004 goto unimplemented;
7005 #endif
7006 #ifdef TARGET_NR_oldstat
7007 case TARGET_NR_oldstat:
7008 goto unimplemented;
7009 #endif
7010 case TARGET_NR_lseek:
7011 ret = get_errno(lseek(arg1, arg2, arg3));
7012 break;
7013 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7014 /* Alpha specific */
7015 case TARGET_NR_getxpid:
7016 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7017 ret = get_errno(getpid());
7018 break;
7019 #endif
7020 #ifdef TARGET_NR_getpid
7021 case TARGET_NR_getpid:
7022 ret = get_errno(getpid());
7023 break;
7024 #endif
7025 case TARGET_NR_mount:
7027 /* need to look at the data field */
7028 void *p2, *p3;
7030 if (arg1) {
7031 p = lock_user_string(arg1);
7032 if (!p) {
7033 goto efault;
7035 } else {
7036 p = NULL;
7039 p2 = lock_user_string(arg2);
7040 if (!p2) {
7041 if (arg1) {
7042 unlock_user(p, arg1, 0);
7044 goto efault;
7047 if (arg3) {
7048 p3 = lock_user_string(arg3);
7049 if (!p3) {
7050 if (arg1) {
7051 unlock_user(p, arg1, 0);
7053 unlock_user(p2, arg2, 0);
7054 goto efault;
7056 } else {
7057 p3 = NULL;
7060 /* FIXME - arg5 should be locked, but it isn't clear how to
7061 * do that since it's not guaranteed to be a NULL-terminated
7062 * string.
7064 if (!arg5) {
7065 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7066 } else {
7067 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7069 ret = get_errno(ret);
7071 if (arg1) {
7072 unlock_user(p, arg1, 0);
7074 unlock_user(p2, arg2, 0);
7075 if (arg3) {
7076 unlock_user(p3, arg3, 0);
7079 break;
7080 #ifdef TARGET_NR_umount
7081 case TARGET_NR_umount:
7082 if (!(p = lock_user_string(arg1)))
7083 goto efault;
7084 ret = get_errno(umount(p));
7085 unlock_user(p, arg1, 0);
7086 break;
7087 #endif
7088 #ifdef TARGET_NR_stime /* not on alpha */
7089 case TARGET_NR_stime:
7091 time_t host_time;
7092 if (get_user_sal(host_time, arg1))
7093 goto efault;
7094 ret = get_errno(stime(&host_time));
7096 break;
7097 #endif
7098 case TARGET_NR_ptrace:
7099 goto unimplemented;
7100 #ifdef TARGET_NR_alarm /* not on alpha */
7101 case TARGET_NR_alarm:
7102 ret = alarm(arg1);
7103 break;
7104 #endif
7105 #ifdef TARGET_NR_oldfstat
7106 case TARGET_NR_oldfstat:
7107 goto unimplemented;
7108 #endif
7109 #ifdef TARGET_NR_pause /* not on alpha */
7110 case TARGET_NR_pause:
7111 if (!block_signals()) {
7112 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7114 ret = -TARGET_EINTR;
7115 break;
7116 #endif
7117 #ifdef TARGET_NR_utime
7118 case TARGET_NR_utime:
7120 struct utimbuf tbuf, *host_tbuf;
7121 struct target_utimbuf *target_tbuf;
7122 if (arg2) {
7123 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7124 goto efault;
7125 tbuf.actime = tswapal(target_tbuf->actime);
7126 tbuf.modtime = tswapal(target_tbuf->modtime);
7127 unlock_user_struct(target_tbuf, arg2, 0);
7128 host_tbuf = &tbuf;
7129 } else {
7130 host_tbuf = NULL;
7132 if (!(p = lock_user_string(arg1)))
7133 goto efault;
7134 ret = get_errno(utime(p, host_tbuf));
7135 unlock_user(p, arg1, 0);
7137 break;
7138 #endif
7139 #ifdef TARGET_NR_utimes
7140 case TARGET_NR_utimes:
7142 struct timeval *tvp, tv[2];
7143 if (arg2) {
7144 if (copy_from_user_timeval(&tv[0], arg2)
7145 || copy_from_user_timeval(&tv[1],
7146 arg2 + sizeof(struct target_timeval)))
7147 goto efault;
7148 tvp = tv;
7149 } else {
7150 tvp = NULL;
7152 if (!(p = lock_user_string(arg1)))
7153 goto efault;
7154 ret = get_errno(utimes(p, tvp));
7155 unlock_user(p, arg1, 0);
7157 break;
7158 #endif
7159 #if defined(TARGET_NR_futimesat)
7160 case TARGET_NR_futimesat:
7162 struct timeval *tvp, tv[2];
7163 if (arg3) {
7164 if (copy_from_user_timeval(&tv[0], arg3)
7165 || copy_from_user_timeval(&tv[1],
7166 arg3 + sizeof(struct target_timeval)))
7167 goto efault;
7168 tvp = tv;
7169 } else {
7170 tvp = NULL;
7172 if (!(p = lock_user_string(arg2)))
7173 goto efault;
7174 ret = get_errno(futimesat(arg1, path(p), tvp));
7175 unlock_user(p, arg2, 0);
7177 break;
7178 #endif
7179 #ifdef TARGET_NR_stty
7180 case TARGET_NR_stty:
7181 goto unimplemented;
7182 #endif
7183 #ifdef TARGET_NR_gtty
7184 case TARGET_NR_gtty:
7185 goto unimplemented;
7186 #endif
7187 #ifdef TARGET_NR_access
7188 case TARGET_NR_access:
7189 if (!(p = lock_user_string(arg1)))
7190 goto efault;
7191 ret = get_errno(access(path(p), arg2));
7192 unlock_user(p, arg1, 0);
7193 break;
7194 #endif
7195 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7196 case TARGET_NR_faccessat:
7197 if (!(p = lock_user_string(arg2)))
7198 goto efault;
7199 ret = get_errno(faccessat(arg1, p, arg3, 0));
7200 unlock_user(p, arg2, 0);
7201 break;
7202 #endif
7203 #ifdef TARGET_NR_nice /* not on alpha */
7204 case TARGET_NR_nice:
7205 ret = get_errno(nice(arg1));
7206 break;
7207 #endif
7208 #ifdef TARGET_NR_ftime
7209 case TARGET_NR_ftime:
7210 goto unimplemented;
7211 #endif
7212 case TARGET_NR_sync:
7213 sync();
7214 ret = 0;
7215 break;
7216 case TARGET_NR_kill:
7217 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7218 break;
7219 #ifdef TARGET_NR_rename
7220 case TARGET_NR_rename:
7222 void *p2;
7223 p = lock_user_string(arg1);
7224 p2 = lock_user_string(arg2);
7225 if (!p || !p2)
7226 ret = -TARGET_EFAULT;
7227 else
7228 ret = get_errno(rename(p, p2));
7229 unlock_user(p2, arg2, 0);
7230 unlock_user(p, arg1, 0);
7232 break;
7233 #endif
7234 #if defined(TARGET_NR_renameat)
7235 case TARGET_NR_renameat:
7237 void *p2;
7238 p = lock_user_string(arg2);
7239 p2 = lock_user_string(arg4);
7240 if (!p || !p2)
7241 ret = -TARGET_EFAULT;
7242 else
7243 ret = get_errno(renameat(arg1, p, arg3, p2));
7244 unlock_user(p2, arg4, 0);
7245 unlock_user(p, arg2, 0);
7247 break;
7248 #endif
7249 #ifdef TARGET_NR_mkdir
7250 case TARGET_NR_mkdir:
7251 if (!(p = lock_user_string(arg1)))
7252 goto efault;
7253 ret = get_errno(mkdir(p, arg2));
7254 unlock_user(p, arg1, 0);
7255 break;
7256 #endif
7257 #if defined(TARGET_NR_mkdirat)
7258 case TARGET_NR_mkdirat:
7259 if (!(p = lock_user_string(arg2)))
7260 goto efault;
7261 ret = get_errno(mkdirat(arg1, p, arg3));
7262 unlock_user(p, arg2, 0);
7263 break;
7264 #endif
7265 #ifdef TARGET_NR_rmdir
7266 case TARGET_NR_rmdir:
7267 if (!(p = lock_user_string(arg1)))
7268 goto efault;
7269 ret = get_errno(rmdir(p));
7270 unlock_user(p, arg1, 0);
7271 break;
7272 #endif
7273 case TARGET_NR_dup:
7274 ret = get_errno(dup(arg1));
7275 if (ret >= 0) {
7276 fd_trans_dup(arg1, ret);
7278 break;
7279 #ifdef TARGET_NR_pipe
7280 case TARGET_NR_pipe:
7281 ret = do_pipe(cpu_env, arg1, 0, 0);
7282 break;
7283 #endif
7284 #ifdef TARGET_NR_pipe2
7285 case TARGET_NR_pipe2:
7286 ret = do_pipe(cpu_env, arg1,
7287 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7288 break;
7289 #endif
7290 case TARGET_NR_times:
7292 struct target_tms *tmsp;
7293 struct tms tms;
7294 ret = get_errno(times(&tms));
7295 if (arg1) {
7296 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7297 if (!tmsp)
7298 goto efault;
7299 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7300 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7301 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7302 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7304 if (!is_error(ret))
7305 ret = host_to_target_clock_t(ret);
7307 break;
7308 #ifdef TARGET_NR_prof
7309 case TARGET_NR_prof:
7310 goto unimplemented;
7311 #endif
7312 #ifdef TARGET_NR_signal
7313 case TARGET_NR_signal:
7314 goto unimplemented;
7315 #endif
7316 case TARGET_NR_acct:
7317 if (arg1 == 0) {
7318 ret = get_errno(acct(NULL));
7319 } else {
7320 if (!(p = lock_user_string(arg1)))
7321 goto efault;
7322 ret = get_errno(acct(path(p)));
7323 unlock_user(p, arg1, 0);
7325 break;
7326 #ifdef TARGET_NR_umount2
7327 case TARGET_NR_umount2:
7328 if (!(p = lock_user_string(arg1)))
7329 goto efault;
7330 ret = get_errno(umount2(p, arg2));
7331 unlock_user(p, arg1, 0);
7332 break;
7333 #endif
7334 #ifdef TARGET_NR_lock
7335 case TARGET_NR_lock:
7336 goto unimplemented;
7337 #endif
7338 case TARGET_NR_ioctl:
7339 ret = do_ioctl(arg1, arg2, arg3);
7340 break;
7341 case TARGET_NR_fcntl:
7342 ret = do_fcntl(arg1, arg2, arg3);
7343 break;
7344 #ifdef TARGET_NR_mpx
7345 case TARGET_NR_mpx:
7346 goto unimplemented;
7347 #endif
7348 case TARGET_NR_setpgid:
7349 ret = get_errno(setpgid(arg1, arg2));
7350 break;
7351 #ifdef TARGET_NR_ulimit
7352 case TARGET_NR_ulimit:
7353 goto unimplemented;
7354 #endif
7355 #ifdef TARGET_NR_oldolduname
7356 case TARGET_NR_oldolduname:
7357 goto unimplemented;
7358 #endif
7359 case TARGET_NR_umask:
7360 ret = get_errno(umask(arg1));
7361 break;
7362 case TARGET_NR_chroot:
7363 if (!(p = lock_user_string(arg1)))
7364 goto efault;
7365 ret = get_errno(chroot(p));
7366 unlock_user(p, arg1, 0);
7367 break;
7368 #ifdef TARGET_NR_ustat
7369 case TARGET_NR_ustat:
7370 goto unimplemented;
7371 #endif
7372 #ifdef TARGET_NR_dup2
7373 case TARGET_NR_dup2:
7374 ret = get_errno(dup2(arg1, arg2));
7375 if (ret >= 0) {
7376 fd_trans_dup(arg1, arg2);
7378 break;
7379 #endif
7380 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7381 case TARGET_NR_dup3:
7382 ret = get_errno(dup3(arg1, arg2, arg3));
7383 if (ret >= 0) {
7384 fd_trans_dup(arg1, arg2);
7386 break;
7387 #endif
7388 #ifdef TARGET_NR_getppid /* not on alpha */
7389 case TARGET_NR_getppid:
7390 ret = get_errno(getppid());
7391 break;
7392 #endif
7393 #ifdef TARGET_NR_getpgrp
7394 case TARGET_NR_getpgrp:
7395 ret = get_errno(getpgrp());
7396 break;
7397 #endif
7398 case TARGET_NR_setsid:
7399 ret = get_errno(setsid());
7400 break;
7401 #ifdef TARGET_NR_sigaction
7402 case TARGET_NR_sigaction:
7404 #if defined(TARGET_ALPHA)
7405 struct target_sigaction act, oact, *pact = 0;
7406 struct target_old_sigaction *old_act;
7407 if (arg2) {
7408 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7409 goto efault;
7410 act._sa_handler = old_act->_sa_handler;
7411 target_siginitset(&act.sa_mask, old_act->sa_mask);
7412 act.sa_flags = old_act->sa_flags;
7413 act.sa_restorer = 0;
7414 unlock_user_struct(old_act, arg2, 0);
7415 pact = &act;
7417 ret = get_errno(do_sigaction(arg1, pact, &oact));
7418 if (!is_error(ret) && arg3) {
7419 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7420 goto efault;
7421 old_act->_sa_handler = oact._sa_handler;
7422 old_act->sa_mask = oact.sa_mask.sig[0];
7423 old_act->sa_flags = oact.sa_flags;
7424 unlock_user_struct(old_act, arg3, 1);
7426 #elif defined(TARGET_MIPS)
7427 struct target_sigaction act, oact, *pact, *old_act;
7429 if (arg2) {
7430 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7431 goto efault;
7432 act._sa_handler = old_act->_sa_handler;
7433 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7434 act.sa_flags = old_act->sa_flags;
7435 unlock_user_struct(old_act, arg2, 0);
7436 pact = &act;
7437 } else {
7438 pact = NULL;
7441 ret = get_errno(do_sigaction(arg1, pact, &oact));
7443 if (!is_error(ret) && arg3) {
7444 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7445 goto efault;
7446 old_act->_sa_handler = oact._sa_handler;
7447 old_act->sa_flags = oact.sa_flags;
7448 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7449 old_act->sa_mask.sig[1] = 0;
7450 old_act->sa_mask.sig[2] = 0;
7451 old_act->sa_mask.sig[3] = 0;
7452 unlock_user_struct(old_act, arg3, 1);
7454 #else
7455 struct target_old_sigaction *old_act;
7456 struct target_sigaction act, oact, *pact;
7457 if (arg2) {
7458 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7459 goto efault;
7460 act._sa_handler = old_act->_sa_handler;
7461 target_siginitset(&act.sa_mask, old_act->sa_mask);
7462 act.sa_flags = old_act->sa_flags;
7463 act.sa_restorer = old_act->sa_restorer;
7464 unlock_user_struct(old_act, arg2, 0);
7465 pact = &act;
7466 } else {
7467 pact = NULL;
7469 ret = get_errno(do_sigaction(arg1, pact, &oact));
7470 if (!is_error(ret) && arg3) {
7471 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7472 goto efault;
7473 old_act->_sa_handler = oact._sa_handler;
7474 old_act->sa_mask = oact.sa_mask.sig[0];
7475 old_act->sa_flags = oact.sa_flags;
7476 old_act->sa_restorer = oact.sa_restorer;
7477 unlock_user_struct(old_act, arg3, 1);
7479 #endif
7481 break;
7482 #endif
7483 case TARGET_NR_rt_sigaction:
7485 #if defined(TARGET_ALPHA)
7486 struct target_sigaction act, oact, *pact = 0;
7487 struct target_rt_sigaction *rt_act;
7488 /* ??? arg4 == sizeof(sigset_t). */
7489 if (arg2) {
7490 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7491 goto efault;
7492 act._sa_handler = rt_act->_sa_handler;
7493 act.sa_mask = rt_act->sa_mask;
7494 act.sa_flags = rt_act->sa_flags;
7495 act.sa_restorer = arg5;
7496 unlock_user_struct(rt_act, arg2, 0);
7497 pact = &act;
7499 ret = get_errno(do_sigaction(arg1, pact, &oact));
7500 if (!is_error(ret) && arg3) {
7501 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7502 goto efault;
7503 rt_act->_sa_handler = oact._sa_handler;
7504 rt_act->sa_mask = oact.sa_mask;
7505 rt_act->sa_flags = oact.sa_flags;
7506 unlock_user_struct(rt_act, arg3, 1);
7508 #else
7509 struct target_sigaction *act;
7510 struct target_sigaction *oact;
7512 if (arg2) {
7513 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7514 goto efault;
7515 } else
7516 act = NULL;
7517 if (arg3) {
7518 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7519 ret = -TARGET_EFAULT;
7520 goto rt_sigaction_fail;
7522 } else
7523 oact = NULL;
7524 ret = get_errno(do_sigaction(arg1, act, oact));
7525 rt_sigaction_fail:
7526 if (act)
7527 unlock_user_struct(act, arg2, 0);
7528 if (oact)
7529 unlock_user_struct(oact, arg3, 1);
7530 #endif
7532 break;
7533 #ifdef TARGET_NR_sgetmask /* not on alpha */
7534 case TARGET_NR_sgetmask:
7536 sigset_t cur_set;
7537 abi_ulong target_set;
7538 ret = do_sigprocmask(0, NULL, &cur_set);
7539 if (!ret) {
7540 host_to_target_old_sigset(&target_set, &cur_set);
7541 ret = target_set;
7544 break;
7545 #endif
7546 #ifdef TARGET_NR_ssetmask /* not on alpha */
7547 case TARGET_NR_ssetmask:
7549 sigset_t set, oset, cur_set;
7550 abi_ulong target_set = arg1;
7551 /* We only have one word of the new mask so we must read
7552 * the rest of it with do_sigprocmask() and OR in this word.
7553 * We are guaranteed that a do_sigprocmask() that only queries
7554 * the signal mask will not fail.
7556 ret = do_sigprocmask(0, NULL, &cur_set);
7557 assert(!ret);
7558 target_to_host_old_sigset(&set, &target_set);
7559 sigorset(&set, &set, &cur_set);
7560 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7561 if (!ret) {
7562 host_to_target_old_sigset(&target_set, &oset);
7563 ret = target_set;
7566 break;
7567 #endif
7568 #ifdef TARGET_NR_sigprocmask
7569 case TARGET_NR_sigprocmask:
7571 #if defined(TARGET_ALPHA)
7572 sigset_t set, oldset;
7573 abi_ulong mask;
7574 int how;
7576 switch (arg1) {
7577 case TARGET_SIG_BLOCK:
7578 how = SIG_BLOCK;
7579 break;
7580 case TARGET_SIG_UNBLOCK:
7581 how = SIG_UNBLOCK;
7582 break;
7583 case TARGET_SIG_SETMASK:
7584 how = SIG_SETMASK;
7585 break;
7586 default:
7587 ret = -TARGET_EINVAL;
7588 goto fail;
7590 mask = arg2;
7591 target_to_host_old_sigset(&set, &mask);
7593 ret = do_sigprocmask(how, &set, &oldset);
7594 if (!is_error(ret)) {
7595 host_to_target_old_sigset(&mask, &oldset);
7596 ret = mask;
7597 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7599 #else
7600 sigset_t set, oldset, *set_ptr;
7601 int how;
7603 if (arg2) {
7604 switch (arg1) {
7605 case TARGET_SIG_BLOCK:
7606 how = SIG_BLOCK;
7607 break;
7608 case TARGET_SIG_UNBLOCK:
7609 how = SIG_UNBLOCK;
7610 break;
7611 case TARGET_SIG_SETMASK:
7612 how = SIG_SETMASK;
7613 break;
7614 default:
7615 ret = -TARGET_EINVAL;
7616 goto fail;
7618 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7619 goto efault;
7620 target_to_host_old_sigset(&set, p);
7621 unlock_user(p, arg2, 0);
7622 set_ptr = &set;
7623 } else {
7624 how = 0;
7625 set_ptr = NULL;
7627 ret = do_sigprocmask(how, set_ptr, &oldset);
7628 if (!is_error(ret) && arg3) {
7629 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7630 goto efault;
7631 host_to_target_old_sigset(p, &oldset);
7632 unlock_user(p, arg3, sizeof(target_sigset_t));
7634 #endif
7636 break;
7637 #endif
7638 case TARGET_NR_rt_sigprocmask:
7640 int how = arg1;
7641 sigset_t set, oldset, *set_ptr;
7643 if (arg2) {
7644 switch(how) {
7645 case TARGET_SIG_BLOCK:
7646 how = SIG_BLOCK;
7647 break;
7648 case TARGET_SIG_UNBLOCK:
7649 how = SIG_UNBLOCK;
7650 break;
7651 case TARGET_SIG_SETMASK:
7652 how = SIG_SETMASK;
7653 break;
7654 default:
7655 ret = -TARGET_EINVAL;
7656 goto fail;
7658 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7659 goto efault;
7660 target_to_host_sigset(&set, p);
7661 unlock_user(p, arg2, 0);
7662 set_ptr = &set;
7663 } else {
7664 how = 0;
7665 set_ptr = NULL;
7667 ret = do_sigprocmask(how, set_ptr, &oldset);
7668 if (!is_error(ret) && arg3) {
7669 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7670 goto efault;
7671 host_to_target_sigset(p, &oldset);
7672 unlock_user(p, arg3, sizeof(target_sigset_t));
7675 break;
7676 #ifdef TARGET_NR_sigpending
7677 case TARGET_NR_sigpending:
7679 sigset_t set;
7680 ret = get_errno(sigpending(&set));
7681 if (!is_error(ret)) {
7682 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7683 goto efault;
7684 host_to_target_old_sigset(p, &set);
7685 unlock_user(p, arg1, sizeof(target_sigset_t));
7688 break;
7689 #endif
7690 case TARGET_NR_rt_sigpending:
7692 sigset_t set;
7693 ret = get_errno(sigpending(&set));
7694 if (!is_error(ret)) {
7695 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7696 goto efault;
7697 host_to_target_sigset(p, &set);
7698 unlock_user(p, arg1, sizeof(target_sigset_t));
7701 break;
7702 #ifdef TARGET_NR_sigsuspend
7703 case TARGET_NR_sigsuspend:
7705 TaskState *ts = cpu->opaque;
7706 #if defined(TARGET_ALPHA)
7707 abi_ulong mask = arg1;
7708 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7709 #else
7710 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7711 goto efault;
7712 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7713 unlock_user(p, arg1, 0);
7714 #endif
7715 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7716 SIGSET_T_SIZE));
7717 if (ret != -TARGET_ERESTARTSYS) {
7718 ts->in_sigsuspend = 1;
7721 break;
7722 #endif
7723 case TARGET_NR_rt_sigsuspend:
7725 TaskState *ts = cpu->opaque;
7726 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7727 goto efault;
7728 target_to_host_sigset(&ts->sigsuspend_mask, p);
7729 unlock_user(p, arg1, 0);
7730 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7731 SIGSET_T_SIZE));
7732 if (ret != -TARGET_ERESTARTSYS) {
7733 ts->in_sigsuspend = 1;
7736 break;
7737 case TARGET_NR_rt_sigtimedwait:
7739 sigset_t set;
7740 struct timespec uts, *puts;
7741 siginfo_t uinfo;
7743 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7744 goto efault;
7745 target_to_host_sigset(&set, p);
7746 unlock_user(p, arg1, 0);
7747 if (arg3) {
7748 puts = &uts;
7749 target_to_host_timespec(puts, arg3);
7750 } else {
7751 puts = NULL;
7753 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
7754 if (!is_error(ret)) {
7755 if (arg2) {
7756 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7758 if (!p) {
7759 goto efault;
7761 host_to_target_siginfo(p, &uinfo);
7762 unlock_user(p, arg2, sizeof(target_siginfo_t));
7764 ret = host_to_target_signal(ret);
7767 break;
7768 case TARGET_NR_rt_sigqueueinfo:
7770 siginfo_t uinfo;
7771 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7772 goto efault;
7773 target_to_host_siginfo(&uinfo, p);
7774 unlock_user(p, arg1, 0);
7775 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7777 break;
7778 #ifdef TARGET_NR_sigreturn
7779 case TARGET_NR_sigreturn:
7780 if (block_signals()) {
7781 ret = -TARGET_ERESTARTSYS;
7782 } else {
7783 ret = do_sigreturn(cpu_env);
7785 break;
7786 #endif
7787 case TARGET_NR_rt_sigreturn:
7788 if (block_signals()) {
7789 ret = -TARGET_ERESTARTSYS;
7790 } else {
7791 ret = do_rt_sigreturn(cpu_env);
7793 break;
7794 case TARGET_NR_sethostname:
7795 if (!(p = lock_user_string(arg1)))
7796 goto efault;
7797 ret = get_errno(sethostname(p, arg2));
7798 unlock_user(p, arg1, 0);
7799 break;
7800 case TARGET_NR_setrlimit:
7802 int resource = target_to_host_resource(arg1);
7803 struct target_rlimit *target_rlim;
7804 struct rlimit rlim;
7805 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7806 goto efault;
7807 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7808 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7809 unlock_user_struct(target_rlim, arg2, 0);
7810 ret = get_errno(setrlimit(resource, &rlim));
7812 break;
7813 case TARGET_NR_getrlimit:
7815 int resource = target_to_host_resource(arg1);
7816 struct target_rlimit *target_rlim;
7817 struct rlimit rlim;
7819 ret = get_errno(getrlimit(resource, &rlim));
7820 if (!is_error(ret)) {
7821 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7822 goto efault;
7823 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7824 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7825 unlock_user_struct(target_rlim, arg2, 1);
7828 break;
7829 case TARGET_NR_getrusage:
7831 struct rusage rusage;
7832 ret = get_errno(getrusage(arg1, &rusage));
7833 if (!is_error(ret)) {
7834 ret = host_to_target_rusage(arg2, &rusage);
7837 break;
7838 case TARGET_NR_gettimeofday:
7840 struct timeval tv;
7841 ret = get_errno(gettimeofday(&tv, NULL));
7842 if (!is_error(ret)) {
7843 if (copy_to_user_timeval(arg1, &tv))
7844 goto efault;
7847 break;
7848 case TARGET_NR_settimeofday:
7850 struct timeval tv, *ptv = NULL;
7851 struct timezone tz, *ptz = NULL;
7853 if (arg1) {
7854 if (copy_from_user_timeval(&tv, arg1)) {
7855 goto efault;
7857 ptv = &tv;
7860 if (arg2) {
7861 if (copy_from_user_timezone(&tz, arg2)) {
7862 goto efault;
7864 ptz = &tz;
7867 ret = get_errno(settimeofday(ptv, ptz));
7869 break;
7870 #if defined(TARGET_NR_select)
7871 case TARGET_NR_select:
7872 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7873 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7874 #else
7876 struct target_sel_arg_struct *sel;
7877 abi_ulong inp, outp, exp, tvp;
7878 long nsel;
7880 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7881 goto efault;
7882 nsel = tswapal(sel->n);
7883 inp = tswapal(sel->inp);
7884 outp = tswapal(sel->outp);
7885 exp = tswapal(sel->exp);
7886 tvp = tswapal(sel->tvp);
7887 unlock_user_struct(sel, arg1, 0);
7888 ret = do_select(nsel, inp, outp, exp, tvp);
7890 #endif
7891 break;
7892 #endif
7893 #ifdef TARGET_NR_pselect6
7894 case TARGET_NR_pselect6:
7896 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7897 fd_set rfds, wfds, efds;
7898 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7899 struct timespec ts, *ts_ptr;
7902 * The 6th arg is actually two args smashed together,
7903 * so we cannot use the C library.
7905 sigset_t set;
7906 struct {
7907 sigset_t *set;
7908 size_t size;
7909 } sig, *sig_ptr;
7911 abi_ulong arg_sigset, arg_sigsize, *arg7;
7912 target_sigset_t *target_sigset;
7914 n = arg1;
7915 rfd_addr = arg2;
7916 wfd_addr = arg3;
7917 efd_addr = arg4;
7918 ts_addr = arg5;
7920 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7921 if (ret) {
7922 goto fail;
7924 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7925 if (ret) {
7926 goto fail;
7928 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7929 if (ret) {
7930 goto fail;
7934 * This takes a timespec, and not a timeval, so we cannot
7935 * use the do_select() helper ...
7937 if (ts_addr) {
7938 if (target_to_host_timespec(&ts, ts_addr)) {
7939 goto efault;
7941 ts_ptr = &ts;
7942 } else {
7943 ts_ptr = NULL;
7946 /* Extract the two packed args for the sigset */
7947 if (arg6) {
7948 sig_ptr = &sig;
7949 sig.size = SIGSET_T_SIZE;
7951 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7952 if (!arg7) {
7953 goto efault;
7955 arg_sigset = tswapal(arg7[0]);
7956 arg_sigsize = tswapal(arg7[1]);
7957 unlock_user(arg7, arg6, 0);
7959 if (arg_sigset) {
7960 sig.set = &set;
7961 if (arg_sigsize != sizeof(*target_sigset)) {
7962 /* Like the kernel, we enforce correct size sigsets */
7963 ret = -TARGET_EINVAL;
7964 goto fail;
7966 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7967 sizeof(*target_sigset), 1);
7968 if (!target_sigset) {
7969 goto efault;
7971 target_to_host_sigset(&set, target_sigset);
7972 unlock_user(target_sigset, arg_sigset, 0);
7973 } else {
7974 sig.set = NULL;
7976 } else {
7977 sig_ptr = NULL;
7980 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7981 ts_ptr, sig_ptr));
7983 if (!is_error(ret)) {
7984 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7985 goto efault;
7986 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7987 goto efault;
7988 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7989 goto efault;
7991 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7992 goto efault;
7995 break;
7996 #endif
7997 #ifdef TARGET_NR_symlink
7998 case TARGET_NR_symlink:
8000 void *p2;
8001 p = lock_user_string(arg1);
8002 p2 = lock_user_string(arg2);
8003 if (!p || !p2)
8004 ret = -TARGET_EFAULT;
8005 else
8006 ret = get_errno(symlink(p, p2));
8007 unlock_user(p2, arg2, 0);
8008 unlock_user(p, arg1, 0);
8010 break;
8011 #endif
8012 #if defined(TARGET_NR_symlinkat)
8013 case TARGET_NR_symlinkat:
8015 void *p2;
8016 p = lock_user_string(arg1);
8017 p2 = lock_user_string(arg3);
8018 if (!p || !p2)
8019 ret = -TARGET_EFAULT;
8020 else
8021 ret = get_errno(symlinkat(p, arg2, p2));
8022 unlock_user(p2, arg3, 0);
8023 unlock_user(p, arg1, 0);
8025 break;
8026 #endif
8027 #ifdef TARGET_NR_oldlstat
8028 case TARGET_NR_oldlstat:
8029 goto unimplemented;
8030 #endif
8031 #ifdef TARGET_NR_readlink
8032 case TARGET_NR_readlink:
8034 void *p2;
8035 p = lock_user_string(arg1);
8036 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8037 if (!p || !p2) {
8038 ret = -TARGET_EFAULT;
8039 } else if (!arg3) {
8040 /* Short circuit this for the magic exe check. */
8041 ret = -TARGET_EINVAL;
8042 } else if (is_proc_myself((const char *)p, "exe")) {
8043 char real[PATH_MAX], *temp;
8044 temp = realpath(exec_path, real);
8045 /* Return value is # of bytes that we wrote to the buffer. */
8046 if (temp == NULL) {
8047 ret = get_errno(-1);
8048 } else {
8049 /* Don't worry about sign mismatch as earlier mapping
8050 * logic would have thrown a bad address error. */
8051 ret = MIN(strlen(real), arg3);
8052 /* We cannot NUL terminate the string. */
8053 memcpy(p2, real, ret);
8055 } else {
8056 ret = get_errno(readlink(path(p), p2, arg3));
8058 unlock_user(p2, arg2, ret);
8059 unlock_user(p, arg1, 0);
8061 break;
8062 #endif
8063 #if defined(TARGET_NR_readlinkat)
8064 case TARGET_NR_readlinkat:
8066 void *p2;
8067 p = lock_user_string(arg2);
8068 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8069 if (!p || !p2) {
8070 ret = -TARGET_EFAULT;
8071 } else if (is_proc_myself((const char *)p, "exe")) {
8072 char real[PATH_MAX], *temp;
8073 temp = realpath(exec_path, real);
8074 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8075 snprintf((char *)p2, arg4, "%s", real);
8076 } else {
8077 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8079 unlock_user(p2, arg3, ret);
8080 unlock_user(p, arg2, 0);
8082 break;
8083 #endif
8084 #ifdef TARGET_NR_uselib
8085 case TARGET_NR_uselib:
8086 goto unimplemented;
8087 #endif
8088 #ifdef TARGET_NR_swapon
8089 case TARGET_NR_swapon:
8090 if (!(p = lock_user_string(arg1)))
8091 goto efault;
8092 ret = get_errno(swapon(p, arg2));
8093 unlock_user(p, arg1, 0);
8094 break;
8095 #endif
8096 case TARGET_NR_reboot:
8097 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8098 /* arg4 must be ignored in all other cases */
8099 p = lock_user_string(arg4);
8100 if (!p) {
8101 goto efault;
8103 ret = get_errno(reboot(arg1, arg2, arg3, p));
8104 unlock_user(p, arg4, 0);
8105 } else {
8106 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8108 break;
8109 #ifdef TARGET_NR_readdir
8110 case TARGET_NR_readdir:
8111 goto unimplemented;
8112 #endif
8113 #ifdef TARGET_NR_mmap
8114 case TARGET_NR_mmap:
8115 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8116 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8117 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8118 || defined(TARGET_S390X)
8120 abi_ulong *v;
8121 abi_ulong v1, v2, v3, v4, v5, v6;
8122 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8123 goto efault;
8124 v1 = tswapal(v[0]);
8125 v2 = tswapal(v[1]);
8126 v3 = tswapal(v[2]);
8127 v4 = tswapal(v[3]);
8128 v5 = tswapal(v[4]);
8129 v6 = tswapal(v[5]);
8130 unlock_user(v, arg1, 0);
8131 ret = get_errno(target_mmap(v1, v2, v3,
8132 target_to_host_bitmask(v4, mmap_flags_tbl),
8133 v5, v6));
8135 #else
8136 ret = get_errno(target_mmap(arg1, arg2, arg3,
8137 target_to_host_bitmask(arg4, mmap_flags_tbl),
8138 arg5,
8139 arg6));
8140 #endif
8141 break;
8142 #endif
8143 #ifdef TARGET_NR_mmap2
8144 case TARGET_NR_mmap2:
8145 #ifndef MMAP_SHIFT
8146 #define MMAP_SHIFT 12
8147 #endif
8148 ret = get_errno(target_mmap(arg1, arg2, arg3,
8149 target_to_host_bitmask(arg4, mmap_flags_tbl),
8150 arg5,
8151 arg6 << MMAP_SHIFT));
8152 break;
8153 #endif
8154 case TARGET_NR_munmap:
8155 ret = get_errno(target_munmap(arg1, arg2));
8156 break;
8157 case TARGET_NR_mprotect:
8159 TaskState *ts = cpu->opaque;
8160 /* Special hack to detect libc making the stack executable. */
8161 if ((arg3 & PROT_GROWSDOWN)
8162 && arg1 >= ts->info->stack_limit
8163 && arg1 <= ts->info->start_stack) {
8164 arg3 &= ~PROT_GROWSDOWN;
8165 arg2 = arg2 + arg1 - ts->info->stack_limit;
8166 arg1 = ts->info->stack_limit;
8169 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8170 break;
8171 #ifdef TARGET_NR_mremap
8172 case TARGET_NR_mremap:
8173 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8174 break;
8175 #endif
8176 /* ??? msync/mlock/munlock are broken for softmmu. */
8177 #ifdef TARGET_NR_msync
8178 case TARGET_NR_msync:
8179 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8180 break;
8181 #endif
8182 #ifdef TARGET_NR_mlock
8183 case TARGET_NR_mlock:
8184 ret = get_errno(mlock(g2h(arg1), arg2));
8185 break;
8186 #endif
8187 #ifdef TARGET_NR_munlock
8188 case TARGET_NR_munlock:
8189 ret = get_errno(munlock(g2h(arg1), arg2));
8190 break;
8191 #endif
8192 #ifdef TARGET_NR_mlockall
8193 case TARGET_NR_mlockall:
8194 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8195 break;
8196 #endif
8197 #ifdef TARGET_NR_munlockall
8198 case TARGET_NR_munlockall:
8199 ret = get_errno(munlockall());
8200 break;
8201 #endif
8202 case TARGET_NR_truncate:
8203 if (!(p = lock_user_string(arg1)))
8204 goto efault;
8205 ret = get_errno(truncate(p, arg2));
8206 unlock_user(p, arg1, 0);
8207 break;
8208 case TARGET_NR_ftruncate:
8209 ret = get_errno(ftruncate(arg1, arg2));
8210 break;
8211 case TARGET_NR_fchmod:
8212 ret = get_errno(fchmod(arg1, arg2));
8213 break;
8214 #if defined(TARGET_NR_fchmodat)
8215 case TARGET_NR_fchmodat:
8216 if (!(p = lock_user_string(arg2)))
8217 goto efault;
8218 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8219 unlock_user(p, arg2, 0);
8220 break;
8221 #endif
8222 case TARGET_NR_getpriority:
8223 /* Note that negative values are valid for getpriority, so we must
8224 differentiate based on errno settings. */
8225 errno = 0;
8226 ret = getpriority(arg1, arg2);
8227 if (ret == -1 && errno != 0) {
8228 ret = -host_to_target_errno(errno);
8229 break;
8231 #ifdef TARGET_ALPHA
8232 /* Return value is the unbiased priority. Signal no error. */
8233 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8234 #else
8235 /* Return value is a biased priority to avoid negative numbers. */
8236 ret = 20 - ret;
8237 #endif
8238 break;
8239 case TARGET_NR_setpriority:
8240 ret = get_errno(setpriority(arg1, arg2, arg3));
8241 break;
8242 #ifdef TARGET_NR_profil
8243 case TARGET_NR_profil:
8244 goto unimplemented;
8245 #endif
8246 case TARGET_NR_statfs:
8247 if (!(p = lock_user_string(arg1)))
8248 goto efault;
8249 ret = get_errno(statfs(path(p), &stfs));
8250 unlock_user(p, arg1, 0);
8251 convert_statfs:
8252 if (!is_error(ret)) {
8253 struct target_statfs *target_stfs;
8255 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8256 goto efault;
8257 __put_user(stfs.f_type, &target_stfs->f_type);
8258 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8259 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8260 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8261 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8262 __put_user(stfs.f_files, &target_stfs->f_files);
8263 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8264 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8265 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8266 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8267 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8268 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8269 unlock_user_struct(target_stfs, arg2, 1);
8271 break;
8272 case TARGET_NR_fstatfs:
8273 ret = get_errno(fstatfs(arg1, &stfs));
8274 goto convert_statfs;
8275 #ifdef TARGET_NR_statfs64
8276 case TARGET_NR_statfs64:
8277 if (!(p = lock_user_string(arg1)))
8278 goto efault;
8279 ret = get_errno(statfs(path(p), &stfs));
8280 unlock_user(p, arg1, 0);
8281 convert_statfs64:
8282 if (!is_error(ret)) {
8283 struct target_statfs64 *target_stfs;
8285 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8286 goto efault;
8287 __put_user(stfs.f_type, &target_stfs->f_type);
8288 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8289 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8290 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8291 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8292 __put_user(stfs.f_files, &target_stfs->f_files);
8293 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8294 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8295 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8296 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8297 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8298 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8299 unlock_user_struct(target_stfs, arg3, 1);
8301 break;
8302 case TARGET_NR_fstatfs64:
8303 ret = get_errno(fstatfs(arg1, &stfs));
8304 goto convert_statfs64;
8305 #endif
8306 #ifdef TARGET_NR_ioperm
8307 case TARGET_NR_ioperm:
8308 goto unimplemented;
8309 #endif
8310 #ifdef TARGET_NR_socketcall
8311 case TARGET_NR_socketcall:
8312 ret = do_socketcall(arg1, arg2);
8313 break;
8314 #endif
8315 #ifdef TARGET_NR_accept
8316 case TARGET_NR_accept:
8317 ret = do_accept4(arg1, arg2, arg3, 0);
8318 break;
8319 #endif
8320 #ifdef TARGET_NR_accept4
8321 case TARGET_NR_accept4:
8322 #ifdef CONFIG_ACCEPT4
8323 ret = do_accept4(arg1, arg2, arg3, arg4);
8324 #else
8325 goto unimplemented;
8326 #endif
8327 break;
8328 #endif
8329 #ifdef TARGET_NR_bind
8330 case TARGET_NR_bind:
8331 ret = do_bind(arg1, arg2, arg3);
8332 break;
8333 #endif
8334 #ifdef TARGET_NR_connect
8335 case TARGET_NR_connect:
8336 ret = do_connect(arg1, arg2, arg3);
8337 break;
8338 #endif
8339 #ifdef TARGET_NR_getpeername
8340 case TARGET_NR_getpeername:
8341 ret = do_getpeername(arg1, arg2, arg3);
8342 break;
8343 #endif
8344 #ifdef TARGET_NR_getsockname
8345 case TARGET_NR_getsockname:
8346 ret = do_getsockname(arg1, arg2, arg3);
8347 break;
8348 #endif
8349 #ifdef TARGET_NR_getsockopt
8350 case TARGET_NR_getsockopt:
8351 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8352 break;
8353 #endif
8354 #ifdef TARGET_NR_listen
8355 case TARGET_NR_listen:
8356 ret = get_errno(listen(arg1, arg2));
8357 break;
8358 #endif
8359 #ifdef TARGET_NR_recv
8360 case TARGET_NR_recv:
8361 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8362 break;
8363 #endif
8364 #ifdef TARGET_NR_recvfrom
8365 case TARGET_NR_recvfrom:
8366 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8367 break;
8368 #endif
8369 #ifdef TARGET_NR_recvmsg
8370 case TARGET_NR_recvmsg:
8371 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8372 break;
8373 #endif
8374 #ifdef TARGET_NR_send
8375 case TARGET_NR_send:
8376 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8377 break;
8378 #endif
8379 #ifdef TARGET_NR_sendmsg
8380 case TARGET_NR_sendmsg:
8381 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8382 break;
8383 #endif
8384 #ifdef TARGET_NR_sendmmsg
8385 case TARGET_NR_sendmmsg:
8386 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8387 break;
8388 case TARGET_NR_recvmmsg:
8389 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8390 break;
8391 #endif
8392 #ifdef TARGET_NR_sendto
8393 case TARGET_NR_sendto:
8394 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8395 break;
8396 #endif
8397 #ifdef TARGET_NR_shutdown
8398 case TARGET_NR_shutdown:
8399 ret = get_errno(shutdown(arg1, arg2));
8400 break;
8401 #endif
8402 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8403 case TARGET_NR_getrandom:
8404 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8405 if (!p) {
8406 goto efault;
8408 ret = get_errno(getrandom(p, arg2, arg3));
8409 unlock_user(p, arg1, ret);
8410 break;
8411 #endif
8412 #ifdef TARGET_NR_socket
8413 case TARGET_NR_socket:
8414 ret = do_socket(arg1, arg2, arg3);
8415 fd_trans_unregister(ret);
8416 break;
8417 #endif
8418 #ifdef TARGET_NR_socketpair
8419 case TARGET_NR_socketpair:
8420 ret = do_socketpair(arg1, arg2, arg3, arg4);
8421 break;
8422 #endif
8423 #ifdef TARGET_NR_setsockopt
8424 case TARGET_NR_setsockopt:
8425 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8426 break;
8427 #endif
8429 case TARGET_NR_syslog:
8430 if (!(p = lock_user_string(arg2)))
8431 goto efault;
8432 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8433 unlock_user(p, arg2, 0);
8434 break;
8436 case TARGET_NR_setitimer:
8438 struct itimerval value, ovalue, *pvalue;
8440 if (arg2) {
8441 pvalue = &value;
8442 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8443 || copy_from_user_timeval(&pvalue->it_value,
8444 arg2 + sizeof(struct target_timeval)))
8445 goto efault;
8446 } else {
8447 pvalue = NULL;
8449 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8450 if (!is_error(ret) && arg3) {
8451 if (copy_to_user_timeval(arg3,
8452 &ovalue.it_interval)
8453 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8454 &ovalue.it_value))
8455 goto efault;
8458 break;
8459 case TARGET_NR_getitimer:
8461 struct itimerval value;
8463 ret = get_errno(getitimer(arg1, &value));
8464 if (!is_error(ret) && arg2) {
8465 if (copy_to_user_timeval(arg2,
8466 &value.it_interval)
8467 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8468 &value.it_value))
8469 goto efault;
8472 break;
8473 #ifdef TARGET_NR_stat
8474 case TARGET_NR_stat:
8475 if (!(p = lock_user_string(arg1)))
8476 goto efault;
8477 ret = get_errno(stat(path(p), &st));
8478 unlock_user(p, arg1, 0);
8479 goto do_stat;
8480 #endif
8481 #ifdef TARGET_NR_lstat
8482 case TARGET_NR_lstat:
8483 if (!(p = lock_user_string(arg1)))
8484 goto efault;
8485 ret = get_errno(lstat(path(p), &st));
8486 unlock_user(p, arg1, 0);
8487 goto do_stat;
8488 #endif
8489 case TARGET_NR_fstat:
8491 ret = get_errno(fstat(arg1, &st));
8492 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8493 do_stat:
8494 #endif
8495 if (!is_error(ret)) {
8496 struct target_stat *target_st;
8498 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8499 goto efault;
8500 memset(target_st, 0, sizeof(*target_st));
8501 __put_user(st.st_dev, &target_st->st_dev);
8502 __put_user(st.st_ino, &target_st->st_ino);
8503 __put_user(st.st_mode, &target_st->st_mode);
8504 __put_user(st.st_uid, &target_st->st_uid);
8505 __put_user(st.st_gid, &target_st->st_gid);
8506 __put_user(st.st_nlink, &target_st->st_nlink);
8507 __put_user(st.st_rdev, &target_st->st_rdev);
8508 __put_user(st.st_size, &target_st->st_size);
8509 __put_user(st.st_blksize, &target_st->st_blksize);
8510 __put_user(st.st_blocks, &target_st->st_blocks);
8511 __put_user(st.st_atime, &target_st->target_st_atime);
8512 __put_user(st.st_mtime, &target_st->target_st_mtime);
8513 __put_user(st.st_ctime, &target_st->target_st_ctime);
8514 unlock_user_struct(target_st, arg2, 1);
8517 break;
8518 #ifdef TARGET_NR_olduname
8519 case TARGET_NR_olduname:
8520 goto unimplemented;
8521 #endif
8522 #ifdef TARGET_NR_iopl
8523 case TARGET_NR_iopl:
8524 goto unimplemented;
8525 #endif
8526 case TARGET_NR_vhangup:
8527 ret = get_errno(vhangup());
8528 break;
8529 #ifdef TARGET_NR_idle
8530 case TARGET_NR_idle:
8531 goto unimplemented;
8532 #endif
8533 #ifdef TARGET_NR_syscall
8534 case TARGET_NR_syscall:
8535 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8536 arg6, arg7, arg8, 0);
8537 break;
8538 #endif
8539 case TARGET_NR_wait4:
8541 int status;
8542 abi_long status_ptr = arg2;
8543 struct rusage rusage, *rusage_ptr;
8544 abi_ulong target_rusage = arg4;
8545 abi_long rusage_err;
8546 if (target_rusage)
8547 rusage_ptr = &rusage;
8548 else
8549 rusage_ptr = NULL;
8550 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8551 if (!is_error(ret)) {
8552 if (status_ptr && ret) {
8553 status = host_to_target_waitstatus(status);
8554 if (put_user_s32(status, status_ptr))
8555 goto efault;
8557 if (target_rusage) {
8558 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8559 if (rusage_err) {
8560 ret = rusage_err;
8565 break;
8566 #ifdef TARGET_NR_swapoff
8567 case TARGET_NR_swapoff:
8568 if (!(p = lock_user_string(arg1)))
8569 goto efault;
8570 ret = get_errno(swapoff(p));
8571 unlock_user(p, arg1, 0);
8572 break;
8573 #endif
8574 case TARGET_NR_sysinfo:
8576 struct target_sysinfo *target_value;
8577 struct sysinfo value;
8578 ret = get_errno(sysinfo(&value));
8579 if (!is_error(ret) && arg1)
8581 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8582 goto efault;
8583 __put_user(value.uptime, &target_value->uptime);
8584 __put_user(value.loads[0], &target_value->loads[0]);
8585 __put_user(value.loads[1], &target_value->loads[1]);
8586 __put_user(value.loads[2], &target_value->loads[2]);
8587 __put_user(value.totalram, &target_value->totalram);
8588 __put_user(value.freeram, &target_value->freeram);
8589 __put_user(value.sharedram, &target_value->sharedram);
8590 __put_user(value.bufferram, &target_value->bufferram);
8591 __put_user(value.totalswap, &target_value->totalswap);
8592 __put_user(value.freeswap, &target_value->freeswap);
8593 __put_user(value.procs, &target_value->procs);
8594 __put_user(value.totalhigh, &target_value->totalhigh);
8595 __put_user(value.freehigh, &target_value->freehigh);
8596 __put_user(value.mem_unit, &target_value->mem_unit);
8597 unlock_user_struct(target_value, arg1, 1);
8600 break;
8601 #ifdef TARGET_NR_ipc
8602 case TARGET_NR_ipc:
8603 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8604 break;
8605 #endif
8606 #ifdef TARGET_NR_semget
8607 case TARGET_NR_semget:
8608 ret = get_errno(semget(arg1, arg2, arg3));
8609 break;
8610 #endif
8611 #ifdef TARGET_NR_semop
8612 case TARGET_NR_semop:
8613 ret = do_semop(arg1, arg2, arg3);
8614 break;
8615 #endif
8616 #ifdef TARGET_NR_semctl
8617 case TARGET_NR_semctl:
8618 ret = do_semctl(arg1, arg2, arg3, arg4);
8619 break;
8620 #endif
8621 #ifdef TARGET_NR_msgctl
8622 case TARGET_NR_msgctl:
8623 ret = do_msgctl(arg1, arg2, arg3);
8624 break;
8625 #endif
8626 #ifdef TARGET_NR_msgget
8627 case TARGET_NR_msgget:
8628 ret = get_errno(msgget(arg1, arg2));
8629 break;
8630 #endif
8631 #ifdef TARGET_NR_msgrcv
8632 case TARGET_NR_msgrcv:
8633 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8634 break;
8635 #endif
8636 #ifdef TARGET_NR_msgsnd
8637 case TARGET_NR_msgsnd:
8638 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8639 break;
8640 #endif
8641 #ifdef TARGET_NR_shmget
8642 case TARGET_NR_shmget:
8643 ret = get_errno(shmget(arg1, arg2, arg3));
8644 break;
8645 #endif
8646 #ifdef TARGET_NR_shmctl
8647 case TARGET_NR_shmctl:
8648 ret = do_shmctl(arg1, arg2, arg3);
8649 break;
8650 #endif
8651 #ifdef TARGET_NR_shmat
8652 case TARGET_NR_shmat:
8653 ret = do_shmat(arg1, arg2, arg3);
8654 break;
8655 #endif
8656 #ifdef TARGET_NR_shmdt
8657 case TARGET_NR_shmdt:
8658 ret = do_shmdt(arg1);
8659 break;
8660 #endif
8661 case TARGET_NR_fsync:
8662 ret = get_errno(fsync(arg1));
8663 break;
8664 case TARGET_NR_clone:
8665 /* Linux manages to have three different orderings for its
8666 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8667 * match the kernel's CONFIG_CLONE_* settings.
8668 * Microblaze is further special in that it uses a sixth
8669 * implicit argument to clone for the TLS pointer.
8671 #if defined(TARGET_MICROBLAZE)
8672 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8673 #elif defined(TARGET_CLONE_BACKWARDS)
8674 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8675 #elif defined(TARGET_CLONE_BACKWARDS2)
8676 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8677 #else
8678 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8679 #endif
8680 break;
8681 #ifdef __NR_exit_group
8682 /* new thread calls */
8683 case TARGET_NR_exit_group:
8684 #ifdef TARGET_GPROF
8685 _mcleanup();
8686 #endif
8687 gdb_exit(cpu_env, arg1);
8688 ret = get_errno(exit_group(arg1));
8689 break;
8690 #endif
8691 case TARGET_NR_setdomainname:
8692 if (!(p = lock_user_string(arg1)))
8693 goto efault;
8694 ret = get_errno(setdomainname(p, arg2));
8695 unlock_user(p, arg1, 0);
8696 break;
8697 case TARGET_NR_uname:
8698 /* no need to transcode because we use the linux syscall */
8700 struct new_utsname * buf;
8702 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8703 goto efault;
8704 ret = get_errno(sys_uname(buf));
8705 if (!is_error(ret)) {
8706 /* Overrite the native machine name with whatever is being
8707 emulated. */
8708 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8709 /* Allow the user to override the reported release. */
8710 if (qemu_uname_release && *qemu_uname_release)
8711 strcpy (buf->release, qemu_uname_release);
8713 unlock_user_struct(buf, arg1, 1);
8715 break;
8716 #ifdef TARGET_I386
8717 case TARGET_NR_modify_ldt:
8718 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8719 break;
8720 #if !defined(TARGET_X86_64)
8721 case TARGET_NR_vm86old:
8722 goto unimplemented;
8723 case TARGET_NR_vm86:
8724 ret = do_vm86(cpu_env, arg1, arg2);
8725 break;
8726 #endif
8727 #endif
8728 case TARGET_NR_adjtimex:
8729 goto unimplemented;
8730 #ifdef TARGET_NR_create_module
8731 case TARGET_NR_create_module:
8732 #endif
8733 case TARGET_NR_init_module:
8734 case TARGET_NR_delete_module:
8735 #ifdef TARGET_NR_get_kernel_syms
8736 case TARGET_NR_get_kernel_syms:
8737 #endif
8738 goto unimplemented;
8739 case TARGET_NR_quotactl:
8740 goto unimplemented;
8741 case TARGET_NR_getpgid:
8742 ret = get_errno(getpgid(arg1));
8743 break;
8744 case TARGET_NR_fchdir:
8745 ret = get_errno(fchdir(arg1));
8746 break;
8747 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8748 case TARGET_NR_bdflush:
8749 goto unimplemented;
8750 #endif
8751 #ifdef TARGET_NR_sysfs
8752 case TARGET_NR_sysfs:
8753 goto unimplemented;
8754 #endif
8755 case TARGET_NR_personality:
8756 ret = get_errno(personality(arg1));
8757 break;
8758 #ifdef TARGET_NR_afs_syscall
8759 case TARGET_NR_afs_syscall:
8760 goto unimplemented;
8761 #endif
8762 #ifdef TARGET_NR__llseek /* Not on alpha */
8763 case TARGET_NR__llseek:
8765 int64_t res;
8766 #if !defined(__NR_llseek)
8767 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8768 if (res == -1) {
8769 ret = get_errno(res);
8770 } else {
8771 ret = 0;
8773 #else
8774 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8775 #endif
8776 if ((ret == 0) && put_user_s64(res, arg4)) {
8777 goto efault;
8780 break;
8781 #endif
8782 #ifdef TARGET_NR_getdents
8783 case TARGET_NR_getdents:
8784 #ifdef __NR_getdents
8785 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8787 struct target_dirent *target_dirp;
8788 struct linux_dirent *dirp;
8789 abi_long count = arg3;
8791 dirp = g_try_malloc(count);
8792 if (!dirp) {
8793 ret = -TARGET_ENOMEM;
8794 goto fail;
8797 ret = get_errno(sys_getdents(arg1, dirp, count));
8798 if (!is_error(ret)) {
8799 struct linux_dirent *de;
8800 struct target_dirent *tde;
8801 int len = ret;
8802 int reclen, treclen;
8803 int count1, tnamelen;
8805 count1 = 0;
8806 de = dirp;
8807 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8808 goto efault;
8809 tde = target_dirp;
8810 while (len > 0) {
8811 reclen = de->d_reclen;
8812 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8813 assert(tnamelen >= 0);
8814 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8815 assert(count1 + treclen <= count);
8816 tde->d_reclen = tswap16(treclen);
8817 tde->d_ino = tswapal(de->d_ino);
8818 tde->d_off = tswapal(de->d_off);
8819 memcpy(tde->d_name, de->d_name, tnamelen);
8820 de = (struct linux_dirent *)((char *)de + reclen);
8821 len -= reclen;
8822 tde = (struct target_dirent *)((char *)tde + treclen);
8823 count1 += treclen;
8825 ret = count1;
8826 unlock_user(target_dirp, arg2, ret);
8828 g_free(dirp);
8830 #else
8832 struct linux_dirent *dirp;
8833 abi_long count = arg3;
8835 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8836 goto efault;
8837 ret = get_errno(sys_getdents(arg1, dirp, count));
8838 if (!is_error(ret)) {
8839 struct linux_dirent *de;
8840 int len = ret;
8841 int reclen;
8842 de = dirp;
8843 while (len > 0) {
8844 reclen = de->d_reclen;
8845 if (reclen > len)
8846 break;
8847 de->d_reclen = tswap16(reclen);
8848 tswapls(&de->d_ino);
8849 tswapls(&de->d_off);
8850 de = (struct linux_dirent *)((char *)de + reclen);
8851 len -= reclen;
8854 unlock_user(dirp, arg2, ret);
8856 #endif
8857 #else
8858 /* Implement getdents in terms of getdents64 */
8860 struct linux_dirent64 *dirp;
8861 abi_long count = arg3;
8863 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8864 if (!dirp) {
8865 goto efault;
8867 ret = get_errno(sys_getdents64(arg1, dirp, count));
8868 if (!is_error(ret)) {
8869 /* Convert the dirent64 structs to target dirent. We do this
8870 * in-place, since we can guarantee that a target_dirent is no
8871 * larger than a dirent64; however this means we have to be
8872 * careful to read everything before writing in the new format.
8874 struct linux_dirent64 *de;
8875 struct target_dirent *tde;
8876 int len = ret;
8877 int tlen = 0;
8879 de = dirp;
8880 tde = (struct target_dirent *)dirp;
8881 while (len > 0) {
8882 int namelen, treclen;
8883 int reclen = de->d_reclen;
8884 uint64_t ino = de->d_ino;
8885 int64_t off = de->d_off;
8886 uint8_t type = de->d_type;
8888 namelen = strlen(de->d_name);
8889 treclen = offsetof(struct target_dirent, d_name)
8890 + namelen + 2;
8891 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8893 memmove(tde->d_name, de->d_name, namelen + 1);
8894 tde->d_ino = tswapal(ino);
8895 tde->d_off = tswapal(off);
8896 tde->d_reclen = tswap16(treclen);
8897 /* The target_dirent type is in what was formerly a padding
8898 * byte at the end of the structure:
8900 *(((char *)tde) + treclen - 1) = type;
8902 de = (struct linux_dirent64 *)((char *)de + reclen);
8903 tde = (struct target_dirent *)((char *)tde + treclen);
8904 len -= reclen;
8905 tlen += treclen;
8907 ret = tlen;
8909 unlock_user(dirp, arg2, ret);
8911 #endif
8912 break;
8913 #endif /* TARGET_NR_getdents */
8914 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8915 case TARGET_NR_getdents64:
8917 struct linux_dirent64 *dirp;
8918 abi_long count = arg3;
8919 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8920 goto efault;
8921 ret = get_errno(sys_getdents64(arg1, dirp, count));
8922 if (!is_error(ret)) {
8923 struct linux_dirent64 *de;
8924 int len = ret;
8925 int reclen;
8926 de = dirp;
8927 while (len > 0) {
8928 reclen = de->d_reclen;
8929 if (reclen > len)
8930 break;
8931 de->d_reclen = tswap16(reclen);
8932 tswap64s((uint64_t *)&de->d_ino);
8933 tswap64s((uint64_t *)&de->d_off);
8934 de = (struct linux_dirent64 *)((char *)de + reclen);
8935 len -= reclen;
8938 unlock_user(dirp, arg2, ret);
8940 break;
8941 #endif /* TARGET_NR_getdents64 */
8942 #if defined(TARGET_NR__newselect)
8943 case TARGET_NR__newselect:
8944 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8945 break;
8946 #endif
8947 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8948 # ifdef TARGET_NR_poll
8949 case TARGET_NR_poll:
8950 # endif
8951 # ifdef TARGET_NR_ppoll
8952 case TARGET_NR_ppoll:
8953 # endif
8955 struct target_pollfd *target_pfd;
8956 unsigned int nfds = arg2;
8957 int timeout = arg3;
8958 struct pollfd *pfd;
8959 unsigned int i;
8961 pfd = NULL;
8962 target_pfd = NULL;
8963 if (nfds) {
8964 target_pfd = lock_user(VERIFY_WRITE, arg1,
8965 sizeof(struct target_pollfd) * nfds, 1);
8966 if (!target_pfd) {
8967 goto efault;
8970 pfd = alloca(sizeof(struct pollfd) * nfds);
8971 for (i = 0; i < nfds; i++) {
8972 pfd[i].fd = tswap32(target_pfd[i].fd);
8973 pfd[i].events = tswap16(target_pfd[i].events);
8977 # ifdef TARGET_NR_ppoll
8978 if (num == TARGET_NR_ppoll) {
8979 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8980 target_sigset_t *target_set;
8981 sigset_t _set, *set = &_set;
8983 if (arg3) {
8984 if (target_to_host_timespec(timeout_ts, arg3)) {
8985 unlock_user(target_pfd, arg1, 0);
8986 goto efault;
8988 } else {
8989 timeout_ts = NULL;
8992 if (arg4) {
8993 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8994 if (!target_set) {
8995 unlock_user(target_pfd, arg1, 0);
8996 goto efault;
8998 target_to_host_sigset(set, target_set);
8999 } else {
9000 set = NULL;
9003 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts,
9004 set, SIGSET_T_SIZE));
9006 if (!is_error(ret) && arg3) {
9007 host_to_target_timespec(arg3, timeout_ts);
9009 if (arg4) {
9010 unlock_user(target_set, arg4, 0);
9012 } else
9013 # endif
9014 ret = get_errno(poll(pfd, nfds, timeout));
9016 if (!is_error(ret)) {
9017 for(i = 0; i < nfds; i++) {
9018 target_pfd[i].revents = tswap16(pfd[i].revents);
9021 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9023 break;
9024 #endif
9025 case TARGET_NR_flock:
9026 /* NOTE: the flock constant seems to be the same for every
9027 Linux platform */
9028 ret = get_errno(flock(arg1, arg2));
9029 break;
9030 case TARGET_NR_readv:
9032 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9033 if (vec != NULL) {
9034 ret = get_errno(safe_readv(arg1, vec, arg3));
9035 unlock_iovec(vec, arg2, arg3, 1);
9036 } else {
9037 ret = -host_to_target_errno(errno);
9040 break;
9041 case TARGET_NR_writev:
9043 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9044 if (vec != NULL) {
9045 ret = get_errno(safe_writev(arg1, vec, arg3));
9046 unlock_iovec(vec, arg2, arg3, 0);
9047 } else {
9048 ret = -host_to_target_errno(errno);
9051 break;
9052 case TARGET_NR_getsid:
9053 ret = get_errno(getsid(arg1));
9054 break;
9055 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9056 case TARGET_NR_fdatasync:
9057 ret = get_errno(fdatasync(arg1));
9058 break;
9059 #endif
9060 #ifdef TARGET_NR__sysctl
9061 case TARGET_NR__sysctl:
9062 /* We don't implement this, but ENOTDIR is always a safe
9063 return value. */
9064 ret = -TARGET_ENOTDIR;
9065 break;
9066 #endif
9067 case TARGET_NR_sched_getaffinity:
9069 unsigned int mask_size;
9070 unsigned long *mask;
9073 * sched_getaffinity needs multiples of ulong, so need to take
9074 * care of mismatches between target ulong and host ulong sizes.
9076 if (arg2 & (sizeof(abi_ulong) - 1)) {
9077 ret = -TARGET_EINVAL;
9078 break;
9080 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9082 mask = alloca(mask_size);
9083 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9085 if (!is_error(ret)) {
9086 if (ret > arg2) {
9087 /* More data returned than the caller's buffer will fit.
9088 * This only happens if sizeof(abi_long) < sizeof(long)
9089 * and the caller passed us a buffer holding an odd number
9090 * of abi_longs. If the host kernel is actually using the
9091 * extra 4 bytes then fail EINVAL; otherwise we can just
9092 * ignore them and only copy the interesting part.
9094 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9095 if (numcpus > arg2 * 8) {
9096 ret = -TARGET_EINVAL;
9097 break;
9099 ret = arg2;
9102 if (copy_to_user(arg3, mask, ret)) {
9103 goto efault;
9107 break;
9108 case TARGET_NR_sched_setaffinity:
9110 unsigned int mask_size;
9111 unsigned long *mask;
9114 * sched_setaffinity needs multiples of ulong, so need to take
9115 * care of mismatches between target ulong and host ulong sizes.
9117 if (arg2 & (sizeof(abi_ulong) - 1)) {
9118 ret = -TARGET_EINVAL;
9119 break;
9121 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9123 mask = alloca(mask_size);
9124 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9125 goto efault;
9127 memcpy(mask, p, arg2);
9128 unlock_user_struct(p, arg2, 0);
9130 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9132 break;
9133 case TARGET_NR_sched_setparam:
9135 struct sched_param *target_schp;
9136 struct sched_param schp;
9138 if (arg2 == 0) {
9139 return -TARGET_EINVAL;
9141 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9142 goto efault;
9143 schp.sched_priority = tswap32(target_schp->sched_priority);
9144 unlock_user_struct(target_schp, arg2, 0);
9145 ret = get_errno(sched_setparam(arg1, &schp));
9147 break;
9148 case TARGET_NR_sched_getparam:
9150 struct sched_param *target_schp;
9151 struct sched_param schp;
9153 if (arg2 == 0) {
9154 return -TARGET_EINVAL;
9156 ret = get_errno(sched_getparam(arg1, &schp));
9157 if (!is_error(ret)) {
9158 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9159 goto efault;
9160 target_schp->sched_priority = tswap32(schp.sched_priority);
9161 unlock_user_struct(target_schp, arg2, 1);
9164 break;
9165 case TARGET_NR_sched_setscheduler:
9167 struct sched_param *target_schp;
9168 struct sched_param schp;
9169 if (arg3 == 0) {
9170 return -TARGET_EINVAL;
9172 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9173 goto efault;
9174 schp.sched_priority = tswap32(target_schp->sched_priority);
9175 unlock_user_struct(target_schp, arg3, 0);
9176 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9178 break;
9179 case TARGET_NR_sched_getscheduler:
9180 ret = get_errno(sched_getscheduler(arg1));
9181 break;
9182 case TARGET_NR_sched_yield:
9183 ret = get_errno(sched_yield());
9184 break;
9185 case TARGET_NR_sched_get_priority_max:
9186 ret = get_errno(sched_get_priority_max(arg1));
9187 break;
9188 case TARGET_NR_sched_get_priority_min:
9189 ret = get_errno(sched_get_priority_min(arg1));
9190 break;
9191 case TARGET_NR_sched_rr_get_interval:
9193 struct timespec ts;
9194 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9195 if (!is_error(ret)) {
9196 ret = host_to_target_timespec(arg2, &ts);
9199 break;
9200 case TARGET_NR_nanosleep:
9202 struct timespec req, rem;
9203 target_to_host_timespec(&req, arg1);
9204 ret = get_errno(nanosleep(&req, &rem));
9205 if (is_error(ret) && arg2) {
9206 host_to_target_timespec(arg2, &rem);
9209 break;
9210 #ifdef TARGET_NR_query_module
9211 case TARGET_NR_query_module:
9212 goto unimplemented;
9213 #endif
9214 #ifdef TARGET_NR_nfsservctl
9215 case TARGET_NR_nfsservctl:
9216 goto unimplemented;
9217 #endif
9218 case TARGET_NR_prctl:
9219 switch (arg1) {
9220 case PR_GET_PDEATHSIG:
9222 int deathsig;
9223 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9224 if (!is_error(ret) && arg2
9225 && put_user_ual(deathsig, arg2)) {
9226 goto efault;
9228 break;
9230 #ifdef PR_GET_NAME
9231 case PR_GET_NAME:
9233 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9234 if (!name) {
9235 goto efault;
9237 ret = get_errno(prctl(arg1, (unsigned long)name,
9238 arg3, arg4, arg5));
9239 unlock_user(name, arg2, 16);
9240 break;
9242 case PR_SET_NAME:
9244 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9245 if (!name) {
9246 goto efault;
9248 ret = get_errno(prctl(arg1, (unsigned long)name,
9249 arg3, arg4, arg5));
9250 unlock_user(name, arg2, 0);
9251 break;
9253 #endif
9254 default:
9255 /* Most prctl options have no pointer arguments */
9256 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9257 break;
9259 break;
9260 #ifdef TARGET_NR_arch_prctl
9261 case TARGET_NR_arch_prctl:
9262 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9263 ret = do_arch_prctl(cpu_env, arg1, arg2);
9264 break;
9265 #else
9266 goto unimplemented;
9267 #endif
9268 #endif
9269 #ifdef TARGET_NR_pread64
9270 case TARGET_NR_pread64:
9271 if (regpairs_aligned(cpu_env)) {
9272 arg4 = arg5;
9273 arg5 = arg6;
9275 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9276 goto efault;
9277 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9278 unlock_user(p, arg2, ret);
9279 break;
9280 case TARGET_NR_pwrite64:
9281 if (regpairs_aligned(cpu_env)) {
9282 arg4 = arg5;
9283 arg5 = arg6;
9285 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9286 goto efault;
9287 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9288 unlock_user(p, arg2, 0);
9289 break;
9290 #endif
9291 case TARGET_NR_getcwd:
9292 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9293 goto efault;
9294 ret = get_errno(sys_getcwd1(p, arg2));
9295 unlock_user(p, arg1, ret);
9296 break;
9297 case TARGET_NR_capget:
9298 case TARGET_NR_capset:
9300 struct target_user_cap_header *target_header;
9301 struct target_user_cap_data *target_data = NULL;
9302 struct __user_cap_header_struct header;
9303 struct __user_cap_data_struct data[2];
9304 struct __user_cap_data_struct *dataptr = NULL;
9305 int i, target_datalen;
9306 int data_items = 1;
9308 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9309 goto efault;
9311 header.version = tswap32(target_header->version);
9312 header.pid = tswap32(target_header->pid);
9314 if (header.version != _LINUX_CAPABILITY_VERSION) {
9315 /* Version 2 and up takes pointer to two user_data structs */
9316 data_items = 2;
9319 target_datalen = sizeof(*target_data) * data_items;
9321 if (arg2) {
9322 if (num == TARGET_NR_capget) {
9323 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9324 } else {
9325 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9327 if (!target_data) {
9328 unlock_user_struct(target_header, arg1, 0);
9329 goto efault;
9332 if (num == TARGET_NR_capset) {
9333 for (i = 0; i < data_items; i++) {
9334 data[i].effective = tswap32(target_data[i].effective);
9335 data[i].permitted = tswap32(target_data[i].permitted);
9336 data[i].inheritable = tswap32(target_data[i].inheritable);
9340 dataptr = data;
9343 if (num == TARGET_NR_capget) {
9344 ret = get_errno(capget(&header, dataptr));
9345 } else {
9346 ret = get_errno(capset(&header, dataptr));
9349 /* The kernel always updates version for both capget and capset */
9350 target_header->version = tswap32(header.version);
9351 unlock_user_struct(target_header, arg1, 1);
9353 if (arg2) {
9354 if (num == TARGET_NR_capget) {
9355 for (i = 0; i < data_items; i++) {
9356 target_data[i].effective = tswap32(data[i].effective);
9357 target_data[i].permitted = tswap32(data[i].permitted);
9358 target_data[i].inheritable = tswap32(data[i].inheritable);
9360 unlock_user(target_data, arg2, target_datalen);
9361 } else {
9362 unlock_user(target_data, arg2, 0);
9365 break;
9367 case TARGET_NR_sigaltstack:
9368 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9369 break;
9371 #ifdef CONFIG_SENDFILE
9372 case TARGET_NR_sendfile:
9374 off_t *offp = NULL;
9375 off_t off;
9376 if (arg3) {
9377 ret = get_user_sal(off, arg3);
9378 if (is_error(ret)) {
9379 break;
9381 offp = &off;
9383 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9384 if (!is_error(ret) && arg3) {
9385 abi_long ret2 = put_user_sal(off, arg3);
9386 if (is_error(ret2)) {
9387 ret = ret2;
9390 break;
9392 #ifdef TARGET_NR_sendfile64
9393 case TARGET_NR_sendfile64:
9395 off_t *offp = NULL;
9396 off_t off;
9397 if (arg3) {
9398 ret = get_user_s64(off, arg3);
9399 if (is_error(ret)) {
9400 break;
9402 offp = &off;
9404 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9405 if (!is_error(ret) && arg3) {
9406 abi_long ret2 = put_user_s64(off, arg3);
9407 if (is_error(ret2)) {
9408 ret = ret2;
9411 break;
9413 #endif
9414 #else
9415 case TARGET_NR_sendfile:
9416 #ifdef TARGET_NR_sendfile64
9417 case TARGET_NR_sendfile64:
9418 #endif
9419 goto unimplemented;
9420 #endif
9422 #ifdef TARGET_NR_getpmsg
9423 case TARGET_NR_getpmsg:
9424 goto unimplemented;
9425 #endif
9426 #ifdef TARGET_NR_putpmsg
9427 case TARGET_NR_putpmsg:
9428 goto unimplemented;
9429 #endif
9430 #ifdef TARGET_NR_vfork
9431 case TARGET_NR_vfork:
9432 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9433 0, 0, 0, 0));
9434 break;
9435 #endif
9436 #ifdef TARGET_NR_ugetrlimit
9437 case TARGET_NR_ugetrlimit:
9439 struct rlimit rlim;
9440 int resource = target_to_host_resource(arg1);
9441 ret = get_errno(getrlimit(resource, &rlim));
9442 if (!is_error(ret)) {
9443 struct target_rlimit *target_rlim;
9444 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9445 goto efault;
9446 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9447 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9448 unlock_user_struct(target_rlim, arg2, 1);
9450 break;
9452 #endif
9453 #ifdef TARGET_NR_truncate64
9454 case TARGET_NR_truncate64:
9455 if (!(p = lock_user_string(arg1)))
9456 goto efault;
9457 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9458 unlock_user(p, arg1, 0);
9459 break;
9460 #endif
9461 #ifdef TARGET_NR_ftruncate64
9462 case TARGET_NR_ftruncate64:
9463 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9464 break;
9465 #endif
9466 #ifdef TARGET_NR_stat64
9467 case TARGET_NR_stat64:
9468 if (!(p = lock_user_string(arg1)))
9469 goto efault;
9470 ret = get_errno(stat(path(p), &st));
9471 unlock_user(p, arg1, 0);
9472 if (!is_error(ret))
9473 ret = host_to_target_stat64(cpu_env, arg2, &st);
9474 break;
9475 #endif
9476 #ifdef TARGET_NR_lstat64
9477 case TARGET_NR_lstat64:
9478 if (!(p = lock_user_string(arg1)))
9479 goto efault;
9480 ret = get_errno(lstat(path(p), &st));
9481 unlock_user(p, arg1, 0);
9482 if (!is_error(ret))
9483 ret = host_to_target_stat64(cpu_env, arg2, &st);
9484 break;
9485 #endif
9486 #ifdef TARGET_NR_fstat64
9487 case TARGET_NR_fstat64:
9488 ret = get_errno(fstat(arg1, &st));
9489 if (!is_error(ret))
9490 ret = host_to_target_stat64(cpu_env, arg2, &st);
9491 break;
9492 #endif
9493 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9494 #ifdef TARGET_NR_fstatat64
9495 case TARGET_NR_fstatat64:
9496 #endif
9497 #ifdef TARGET_NR_newfstatat
9498 case TARGET_NR_newfstatat:
9499 #endif
9500 if (!(p = lock_user_string(arg2)))
9501 goto efault;
9502 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9503 if (!is_error(ret))
9504 ret = host_to_target_stat64(cpu_env, arg3, &st);
9505 break;
9506 #endif
9507 #ifdef TARGET_NR_lchown
9508 case TARGET_NR_lchown:
9509 if (!(p = lock_user_string(arg1)))
9510 goto efault;
9511 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9512 unlock_user(p, arg1, 0);
9513 break;
9514 #endif
9515 #ifdef TARGET_NR_getuid
9516 case TARGET_NR_getuid:
9517 ret = get_errno(high2lowuid(getuid()));
9518 break;
9519 #endif
9520 #ifdef TARGET_NR_getgid
9521 case TARGET_NR_getgid:
9522 ret = get_errno(high2lowgid(getgid()));
9523 break;
9524 #endif
9525 #ifdef TARGET_NR_geteuid
9526 case TARGET_NR_geteuid:
9527 ret = get_errno(high2lowuid(geteuid()));
9528 break;
9529 #endif
9530 #ifdef TARGET_NR_getegid
9531 case TARGET_NR_getegid:
9532 ret = get_errno(high2lowgid(getegid()));
9533 break;
9534 #endif
9535 case TARGET_NR_setreuid:
9536 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9537 break;
9538 case TARGET_NR_setregid:
9539 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9540 break;
9541 case TARGET_NR_getgroups:
9543 int gidsetsize = arg1;
9544 target_id *target_grouplist;
9545 gid_t *grouplist;
9546 int i;
9548 grouplist = alloca(gidsetsize * sizeof(gid_t));
9549 ret = get_errno(getgroups(gidsetsize, grouplist));
9550 if (gidsetsize == 0)
9551 break;
9552 if (!is_error(ret)) {
9553 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9554 if (!target_grouplist)
9555 goto efault;
9556 for(i = 0;i < ret; i++)
9557 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9558 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9561 break;
9562 case TARGET_NR_setgroups:
9564 int gidsetsize = arg1;
9565 target_id *target_grouplist;
9566 gid_t *grouplist = NULL;
9567 int i;
9568 if (gidsetsize) {
9569 grouplist = alloca(gidsetsize * sizeof(gid_t));
9570 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9571 if (!target_grouplist) {
9572 ret = -TARGET_EFAULT;
9573 goto fail;
9575 for (i = 0; i < gidsetsize; i++) {
9576 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9578 unlock_user(target_grouplist, arg2, 0);
9580 ret = get_errno(setgroups(gidsetsize, grouplist));
9582 break;
9583 case TARGET_NR_fchown:
9584 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9585 break;
9586 #if defined(TARGET_NR_fchownat)
9587 case TARGET_NR_fchownat:
9588 if (!(p = lock_user_string(arg2)))
9589 goto efault;
9590 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9591 low2highgid(arg4), arg5));
9592 unlock_user(p, arg2, 0);
9593 break;
9594 #endif
9595 #ifdef TARGET_NR_setresuid
9596 case TARGET_NR_setresuid:
9597 ret = get_errno(sys_setresuid(low2highuid(arg1),
9598 low2highuid(arg2),
9599 low2highuid(arg3)));
9600 break;
9601 #endif
9602 #ifdef TARGET_NR_getresuid
9603 case TARGET_NR_getresuid:
9605 uid_t ruid, euid, suid;
9606 ret = get_errno(getresuid(&ruid, &euid, &suid));
9607 if (!is_error(ret)) {
9608 if (put_user_id(high2lowuid(ruid), arg1)
9609 || put_user_id(high2lowuid(euid), arg2)
9610 || put_user_id(high2lowuid(suid), arg3))
9611 goto efault;
9614 break;
9615 #endif
9616 #ifdef TARGET_NR_getresgid
9617 case TARGET_NR_setresgid:
9618 ret = get_errno(sys_setresgid(low2highgid(arg1),
9619 low2highgid(arg2),
9620 low2highgid(arg3)));
9621 break;
9622 #endif
9623 #ifdef TARGET_NR_getresgid
9624 case TARGET_NR_getresgid:
9626 gid_t rgid, egid, sgid;
9627 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9628 if (!is_error(ret)) {
9629 if (put_user_id(high2lowgid(rgid), arg1)
9630 || put_user_id(high2lowgid(egid), arg2)
9631 || put_user_id(high2lowgid(sgid), arg3))
9632 goto efault;
9635 break;
9636 #endif
9637 #ifdef TARGET_NR_chown
9638 case TARGET_NR_chown:
9639 if (!(p = lock_user_string(arg1)))
9640 goto efault;
9641 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9642 unlock_user(p, arg1, 0);
9643 break;
9644 #endif
9645 case TARGET_NR_setuid:
9646 ret = get_errno(sys_setuid(low2highuid(arg1)));
9647 break;
9648 case TARGET_NR_setgid:
9649 ret = get_errno(sys_setgid(low2highgid(arg1)));
9650 break;
9651 case TARGET_NR_setfsuid:
9652 ret = get_errno(setfsuid(arg1));
9653 break;
9654 case TARGET_NR_setfsgid:
9655 ret = get_errno(setfsgid(arg1));
9656 break;
9658 #ifdef TARGET_NR_lchown32
9659 case TARGET_NR_lchown32:
9660 if (!(p = lock_user_string(arg1)))
9661 goto efault;
9662 ret = get_errno(lchown(p, arg2, arg3));
9663 unlock_user(p, arg1, 0);
9664 break;
9665 #endif
9666 #ifdef TARGET_NR_getuid32
9667 case TARGET_NR_getuid32:
9668 ret = get_errno(getuid());
9669 break;
9670 #endif
9672 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9673 /* Alpha specific */
9674 case TARGET_NR_getxuid:
9676 uid_t euid;
9677 euid=geteuid();
9678 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9680 ret = get_errno(getuid());
9681 break;
9682 #endif
9683 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9684 /* Alpha specific */
9685 case TARGET_NR_getxgid:
9687 uid_t egid;
9688 egid=getegid();
9689 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9691 ret = get_errno(getgid());
9692 break;
9693 #endif
9694 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9695 /* Alpha specific */
9696 case TARGET_NR_osf_getsysinfo:
9697 ret = -TARGET_EOPNOTSUPP;
9698 switch (arg1) {
9699 case TARGET_GSI_IEEE_FP_CONTROL:
9701 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9703 /* Copied from linux ieee_fpcr_to_swcr. */
9704 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9705 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9706 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9707 | SWCR_TRAP_ENABLE_DZE
9708 | SWCR_TRAP_ENABLE_OVF);
9709 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9710 | SWCR_TRAP_ENABLE_INE);
9711 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9712 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9714 if (put_user_u64 (swcr, arg2))
9715 goto efault;
9716 ret = 0;
9718 break;
9720 /* case GSI_IEEE_STATE_AT_SIGNAL:
9721 -- Not implemented in linux kernel.
9722 case GSI_UACPROC:
9723 -- Retrieves current unaligned access state; not much used.
9724 case GSI_PROC_TYPE:
9725 -- Retrieves implver information; surely not used.
9726 case GSI_GET_HWRPB:
9727 -- Grabs a copy of the HWRPB; surely not used.
9730 break;
9731 #endif
9732 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9733 /* Alpha specific */
9734 case TARGET_NR_osf_setsysinfo:
9735 ret = -TARGET_EOPNOTSUPP;
9736 switch (arg1) {
9737 case TARGET_SSI_IEEE_FP_CONTROL:
9739 uint64_t swcr, fpcr, orig_fpcr;
9741 if (get_user_u64 (swcr, arg2)) {
9742 goto efault;
9744 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9745 fpcr = orig_fpcr & FPCR_DYN_MASK;
9747 /* Copied from linux ieee_swcr_to_fpcr. */
9748 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9749 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9750 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9751 | SWCR_TRAP_ENABLE_DZE
9752 | SWCR_TRAP_ENABLE_OVF)) << 48;
9753 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9754 | SWCR_TRAP_ENABLE_INE)) << 57;
9755 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9756 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9758 cpu_alpha_store_fpcr(cpu_env, fpcr);
9759 ret = 0;
9761 break;
9763 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9765 uint64_t exc, fpcr, orig_fpcr;
9766 int si_code;
9768 if (get_user_u64(exc, arg2)) {
9769 goto efault;
9772 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9774 /* We only add to the exception status here. */
9775 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9777 cpu_alpha_store_fpcr(cpu_env, fpcr);
9778 ret = 0;
9780 /* Old exceptions are not signaled. */
9781 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9783 /* If any exceptions set by this call,
9784 and are unmasked, send a signal. */
9785 si_code = 0;
9786 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9787 si_code = TARGET_FPE_FLTRES;
9789 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9790 si_code = TARGET_FPE_FLTUND;
9792 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9793 si_code = TARGET_FPE_FLTOVF;
9795 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9796 si_code = TARGET_FPE_FLTDIV;
9798 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9799 si_code = TARGET_FPE_FLTINV;
9801 if (si_code != 0) {
9802 target_siginfo_t info;
9803 info.si_signo = SIGFPE;
9804 info.si_errno = 0;
9805 info.si_code = si_code;
9806 info._sifields._sigfault._addr
9807 = ((CPUArchState *)cpu_env)->pc;
9808 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9811 break;
9813 /* case SSI_NVPAIRS:
9814 -- Used with SSIN_UACPROC to enable unaligned accesses.
9815 case SSI_IEEE_STATE_AT_SIGNAL:
9816 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9817 -- Not implemented in linux kernel
9820 break;
9821 #endif
9822 #ifdef TARGET_NR_osf_sigprocmask
9823 /* Alpha specific. */
9824 case TARGET_NR_osf_sigprocmask:
9826 abi_ulong mask;
9827 int how;
9828 sigset_t set, oldset;
9830 switch(arg1) {
9831 case TARGET_SIG_BLOCK:
9832 how = SIG_BLOCK;
9833 break;
9834 case TARGET_SIG_UNBLOCK:
9835 how = SIG_UNBLOCK;
9836 break;
9837 case TARGET_SIG_SETMASK:
9838 how = SIG_SETMASK;
9839 break;
9840 default:
9841 ret = -TARGET_EINVAL;
9842 goto fail;
9844 mask = arg2;
9845 target_to_host_old_sigset(&set, &mask);
9846 ret = do_sigprocmask(how, &set, &oldset);
9847 if (!ret) {
9848 host_to_target_old_sigset(&mask, &oldset);
9849 ret = mask;
9852 break;
9853 #endif
9855 #ifdef TARGET_NR_getgid32
9856 case TARGET_NR_getgid32:
9857 ret = get_errno(getgid());
9858 break;
9859 #endif
9860 #ifdef TARGET_NR_geteuid32
9861 case TARGET_NR_geteuid32:
9862 ret = get_errno(geteuid());
9863 break;
9864 #endif
9865 #ifdef TARGET_NR_getegid32
9866 case TARGET_NR_getegid32:
9867 ret = get_errno(getegid());
9868 break;
9869 #endif
9870 #ifdef TARGET_NR_setreuid32
9871 case TARGET_NR_setreuid32:
9872 ret = get_errno(setreuid(arg1, arg2));
9873 break;
9874 #endif
9875 #ifdef TARGET_NR_setregid32
9876 case TARGET_NR_setregid32:
9877 ret = get_errno(setregid(arg1, arg2));
9878 break;
9879 #endif
9880 #ifdef TARGET_NR_getgroups32
9881 case TARGET_NR_getgroups32:
9883 int gidsetsize = arg1;
9884 uint32_t *target_grouplist;
9885 gid_t *grouplist;
9886 int i;
9888 grouplist = alloca(gidsetsize * sizeof(gid_t));
9889 ret = get_errno(getgroups(gidsetsize, grouplist));
9890 if (gidsetsize == 0)
9891 break;
9892 if (!is_error(ret)) {
9893 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9894 if (!target_grouplist) {
9895 ret = -TARGET_EFAULT;
9896 goto fail;
9898 for(i = 0;i < ret; i++)
9899 target_grouplist[i] = tswap32(grouplist[i]);
9900 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9903 break;
9904 #endif
9905 #ifdef TARGET_NR_setgroups32
9906 case TARGET_NR_setgroups32:
9908 int gidsetsize = arg1;
9909 uint32_t *target_grouplist;
9910 gid_t *grouplist;
9911 int i;
9913 grouplist = alloca(gidsetsize * sizeof(gid_t));
9914 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9915 if (!target_grouplist) {
9916 ret = -TARGET_EFAULT;
9917 goto fail;
9919 for(i = 0;i < gidsetsize; i++)
9920 grouplist[i] = tswap32(target_grouplist[i]);
9921 unlock_user(target_grouplist, arg2, 0);
9922 ret = get_errno(setgroups(gidsetsize, grouplist));
9924 break;
9925 #endif
9926 #ifdef TARGET_NR_fchown32
9927 case TARGET_NR_fchown32:
9928 ret = get_errno(fchown(arg1, arg2, arg3));
9929 break;
9930 #endif
9931 #ifdef TARGET_NR_setresuid32
9932 case TARGET_NR_setresuid32:
9933 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9934 break;
9935 #endif
9936 #ifdef TARGET_NR_getresuid32
9937 case TARGET_NR_getresuid32:
9939 uid_t ruid, euid, suid;
9940 ret = get_errno(getresuid(&ruid, &euid, &suid));
9941 if (!is_error(ret)) {
9942 if (put_user_u32(ruid, arg1)
9943 || put_user_u32(euid, arg2)
9944 || put_user_u32(suid, arg3))
9945 goto efault;
9948 break;
9949 #endif
9950 #ifdef TARGET_NR_setresgid32
9951 case TARGET_NR_setresgid32:
9952 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9953 break;
9954 #endif
9955 #ifdef TARGET_NR_getresgid32
9956 case TARGET_NR_getresgid32:
9958 gid_t rgid, egid, sgid;
9959 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9960 if (!is_error(ret)) {
9961 if (put_user_u32(rgid, arg1)
9962 || put_user_u32(egid, arg2)
9963 || put_user_u32(sgid, arg3))
9964 goto efault;
9967 break;
9968 #endif
9969 #ifdef TARGET_NR_chown32
9970 case TARGET_NR_chown32:
9971 if (!(p = lock_user_string(arg1)))
9972 goto efault;
9973 ret = get_errno(chown(p, arg2, arg3));
9974 unlock_user(p, arg1, 0);
9975 break;
9976 #endif
9977 #ifdef TARGET_NR_setuid32
9978 case TARGET_NR_setuid32:
9979 ret = get_errno(sys_setuid(arg1));
9980 break;
9981 #endif
9982 #ifdef TARGET_NR_setgid32
9983 case TARGET_NR_setgid32:
9984 ret = get_errno(sys_setgid(arg1));
9985 break;
9986 #endif
9987 #ifdef TARGET_NR_setfsuid32
9988 case TARGET_NR_setfsuid32:
9989 ret = get_errno(setfsuid(arg1));
9990 break;
9991 #endif
9992 #ifdef TARGET_NR_setfsgid32
9993 case TARGET_NR_setfsgid32:
9994 ret = get_errno(setfsgid(arg1));
9995 break;
9996 #endif
9998 case TARGET_NR_pivot_root:
9999 goto unimplemented;
10000 #ifdef TARGET_NR_mincore
10001 case TARGET_NR_mincore:
10003 void *a;
10004 ret = -TARGET_EFAULT;
10005 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10006 goto efault;
10007 if (!(p = lock_user_string(arg3)))
10008 goto mincore_fail;
10009 ret = get_errno(mincore(a, arg2, p));
10010 unlock_user(p, arg3, ret);
10011 mincore_fail:
10012 unlock_user(a, arg1, 0);
10014 break;
10015 #endif
10016 #ifdef TARGET_NR_arm_fadvise64_64
10017 case TARGET_NR_arm_fadvise64_64:
10018 /* arm_fadvise64_64 looks like fadvise64_64 but
10019 * with different argument order: fd, advice, offset, len
10020 * rather than the usual fd, offset, len, advice.
10021 * Note that offset and len are both 64-bit so appear as
10022 * pairs of 32-bit registers.
10024 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10025 target_offset64(arg5, arg6), arg2);
10026 ret = -host_to_target_errno(ret);
10027 break;
10028 #endif
10030 #if TARGET_ABI_BITS == 32
10032 #ifdef TARGET_NR_fadvise64_64
10033 case TARGET_NR_fadvise64_64:
10034 /* 6 args: fd, offset (high, low), len (high, low), advice */
10035 if (regpairs_aligned(cpu_env)) {
10036 /* offset is in (3,4), len in (5,6) and advice in 7 */
10037 arg2 = arg3;
10038 arg3 = arg4;
10039 arg4 = arg5;
10040 arg5 = arg6;
10041 arg6 = arg7;
10043 ret = -host_to_target_errno(posix_fadvise(arg1,
10044 target_offset64(arg2, arg3),
10045 target_offset64(arg4, arg5),
10046 arg6));
10047 break;
10048 #endif
10050 #ifdef TARGET_NR_fadvise64
10051 case TARGET_NR_fadvise64:
10052 /* 5 args: fd, offset (high, low), len, advice */
10053 if (regpairs_aligned(cpu_env)) {
10054 /* offset is in (3,4), len in 5 and advice in 6 */
10055 arg2 = arg3;
10056 arg3 = arg4;
10057 arg4 = arg5;
10058 arg5 = arg6;
10060 ret = -host_to_target_errno(posix_fadvise(arg1,
10061 target_offset64(arg2, arg3),
10062 arg4, arg5));
10063 break;
10064 #endif
10066 #else /* not a 32-bit ABI */
10067 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10068 #ifdef TARGET_NR_fadvise64_64
10069 case TARGET_NR_fadvise64_64:
10070 #endif
10071 #ifdef TARGET_NR_fadvise64
10072 case TARGET_NR_fadvise64:
10073 #endif
10074 #ifdef TARGET_S390X
10075 switch (arg4) {
10076 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10077 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10078 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10079 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10080 default: break;
10082 #endif
10083 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10084 break;
10085 #endif
10086 #endif /* end of 64-bit ABI fadvise handling */
10088 #ifdef TARGET_NR_madvise
10089 case TARGET_NR_madvise:
10090 /* A straight passthrough may not be safe because qemu sometimes
10091 turns private file-backed mappings into anonymous mappings.
10092 This will break MADV_DONTNEED.
10093 This is a hint, so ignoring and returning success is ok. */
10094 ret = get_errno(0);
10095 break;
10096 #endif
10097 #if TARGET_ABI_BITS == 32
10098 case TARGET_NR_fcntl64:
10100 int cmd;
10101 struct flock64 fl;
10102 struct target_flock64 *target_fl;
10103 #ifdef TARGET_ARM
10104 struct target_eabi_flock64 *target_efl;
10105 #endif
10107 cmd = target_to_host_fcntl_cmd(arg2);
10108 if (cmd == -TARGET_EINVAL) {
10109 ret = cmd;
10110 break;
10113 switch(arg2) {
10114 case TARGET_F_GETLK64:
10115 #ifdef TARGET_ARM
10116 if (((CPUARMState *)cpu_env)->eabi) {
10117 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10118 goto efault;
10119 fl.l_type = tswap16(target_efl->l_type);
10120 fl.l_whence = tswap16(target_efl->l_whence);
10121 fl.l_start = tswap64(target_efl->l_start);
10122 fl.l_len = tswap64(target_efl->l_len);
10123 fl.l_pid = tswap32(target_efl->l_pid);
10124 unlock_user_struct(target_efl, arg3, 0);
10125 } else
10126 #endif
10128 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10129 goto efault;
10130 fl.l_type = tswap16(target_fl->l_type);
10131 fl.l_whence = tswap16(target_fl->l_whence);
10132 fl.l_start = tswap64(target_fl->l_start);
10133 fl.l_len = tswap64(target_fl->l_len);
10134 fl.l_pid = tswap32(target_fl->l_pid);
10135 unlock_user_struct(target_fl, arg3, 0);
10137 ret = get_errno(fcntl(arg1, cmd, &fl));
10138 if (ret == 0) {
10139 #ifdef TARGET_ARM
10140 if (((CPUARMState *)cpu_env)->eabi) {
10141 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
10142 goto efault;
10143 target_efl->l_type = tswap16(fl.l_type);
10144 target_efl->l_whence = tswap16(fl.l_whence);
10145 target_efl->l_start = tswap64(fl.l_start);
10146 target_efl->l_len = tswap64(fl.l_len);
10147 target_efl->l_pid = tswap32(fl.l_pid);
10148 unlock_user_struct(target_efl, arg3, 1);
10149 } else
10150 #endif
10152 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
10153 goto efault;
10154 target_fl->l_type = tswap16(fl.l_type);
10155 target_fl->l_whence = tswap16(fl.l_whence);
10156 target_fl->l_start = tswap64(fl.l_start);
10157 target_fl->l_len = tswap64(fl.l_len);
10158 target_fl->l_pid = tswap32(fl.l_pid);
10159 unlock_user_struct(target_fl, arg3, 1);
10162 break;
10164 case TARGET_F_SETLK64:
10165 case TARGET_F_SETLKW64:
10166 #ifdef TARGET_ARM
10167 if (((CPUARMState *)cpu_env)->eabi) {
10168 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10169 goto efault;
10170 fl.l_type = tswap16(target_efl->l_type);
10171 fl.l_whence = tswap16(target_efl->l_whence);
10172 fl.l_start = tswap64(target_efl->l_start);
10173 fl.l_len = tswap64(target_efl->l_len);
10174 fl.l_pid = tswap32(target_efl->l_pid);
10175 unlock_user_struct(target_efl, arg3, 0);
10176 } else
10177 #endif
10179 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10180 goto efault;
10181 fl.l_type = tswap16(target_fl->l_type);
10182 fl.l_whence = tswap16(target_fl->l_whence);
10183 fl.l_start = tswap64(target_fl->l_start);
10184 fl.l_len = tswap64(target_fl->l_len);
10185 fl.l_pid = tswap32(target_fl->l_pid);
10186 unlock_user_struct(target_fl, arg3, 0);
10188 ret = get_errno(fcntl(arg1, cmd, &fl));
10189 break;
10190 default:
10191 ret = do_fcntl(arg1, arg2, arg3);
10192 break;
10194 break;
10196 #endif
10197 #ifdef TARGET_NR_cacheflush
10198 case TARGET_NR_cacheflush:
10199 /* self-modifying code is handled automatically, so nothing needed */
10200 ret = 0;
10201 break;
10202 #endif
10203 #ifdef TARGET_NR_security
10204 case TARGET_NR_security:
10205 goto unimplemented;
10206 #endif
10207 #ifdef TARGET_NR_getpagesize
10208 case TARGET_NR_getpagesize:
10209 ret = TARGET_PAGE_SIZE;
10210 break;
10211 #endif
10212 case TARGET_NR_gettid:
10213 ret = get_errno(gettid());
10214 break;
10215 #ifdef TARGET_NR_readahead
10216 case TARGET_NR_readahead:
10217 #if TARGET_ABI_BITS == 32
10218 if (regpairs_aligned(cpu_env)) {
10219 arg2 = arg3;
10220 arg3 = arg4;
10221 arg4 = arg5;
10223 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10224 #else
10225 ret = get_errno(readahead(arg1, arg2, arg3));
10226 #endif
10227 break;
10228 #endif
10229 #ifdef CONFIG_ATTR
10230 #ifdef TARGET_NR_setxattr
10231 case TARGET_NR_listxattr:
10232 case TARGET_NR_llistxattr:
10234 void *p, *b = 0;
10235 if (arg2) {
10236 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10237 if (!b) {
10238 ret = -TARGET_EFAULT;
10239 break;
10242 p = lock_user_string(arg1);
10243 if (p) {
10244 if (num == TARGET_NR_listxattr) {
10245 ret = get_errno(listxattr(p, b, arg3));
10246 } else {
10247 ret = get_errno(llistxattr(p, b, arg3));
10249 } else {
10250 ret = -TARGET_EFAULT;
10252 unlock_user(p, arg1, 0);
10253 unlock_user(b, arg2, arg3);
10254 break;
10256 case TARGET_NR_flistxattr:
10258 void *b = 0;
10259 if (arg2) {
10260 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10261 if (!b) {
10262 ret = -TARGET_EFAULT;
10263 break;
10266 ret = get_errno(flistxattr(arg1, b, arg3));
10267 unlock_user(b, arg2, arg3);
10268 break;
10270 case TARGET_NR_setxattr:
10271 case TARGET_NR_lsetxattr:
10273 void *p, *n, *v = 0;
10274 if (arg3) {
10275 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10276 if (!v) {
10277 ret = -TARGET_EFAULT;
10278 break;
10281 p = lock_user_string(arg1);
10282 n = lock_user_string(arg2);
10283 if (p && n) {
10284 if (num == TARGET_NR_setxattr) {
10285 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10286 } else {
10287 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10289 } else {
10290 ret = -TARGET_EFAULT;
10292 unlock_user(p, arg1, 0);
10293 unlock_user(n, arg2, 0);
10294 unlock_user(v, arg3, 0);
10296 break;
10297 case TARGET_NR_fsetxattr:
10299 void *n, *v = 0;
10300 if (arg3) {
10301 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10302 if (!v) {
10303 ret = -TARGET_EFAULT;
10304 break;
10307 n = lock_user_string(arg2);
10308 if (n) {
10309 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10310 } else {
10311 ret = -TARGET_EFAULT;
10313 unlock_user(n, arg2, 0);
10314 unlock_user(v, arg3, 0);
10316 break;
10317 case TARGET_NR_getxattr:
10318 case TARGET_NR_lgetxattr:
10320 void *p, *n, *v = 0;
10321 if (arg3) {
10322 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10323 if (!v) {
10324 ret = -TARGET_EFAULT;
10325 break;
10328 p = lock_user_string(arg1);
10329 n = lock_user_string(arg2);
10330 if (p && n) {
10331 if (num == TARGET_NR_getxattr) {
10332 ret = get_errno(getxattr(p, n, v, arg4));
10333 } else {
10334 ret = get_errno(lgetxattr(p, n, v, arg4));
10336 } else {
10337 ret = -TARGET_EFAULT;
10339 unlock_user(p, arg1, 0);
10340 unlock_user(n, arg2, 0);
10341 unlock_user(v, arg3, arg4);
10343 break;
10344 case TARGET_NR_fgetxattr:
10346 void *n, *v = 0;
10347 if (arg3) {
10348 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10349 if (!v) {
10350 ret = -TARGET_EFAULT;
10351 break;
10354 n = lock_user_string(arg2);
10355 if (n) {
10356 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10357 } else {
10358 ret = -TARGET_EFAULT;
10360 unlock_user(n, arg2, 0);
10361 unlock_user(v, arg3, arg4);
10363 break;
10364 case TARGET_NR_removexattr:
10365 case TARGET_NR_lremovexattr:
10367 void *p, *n;
10368 p = lock_user_string(arg1);
10369 n = lock_user_string(arg2);
10370 if (p && n) {
10371 if (num == TARGET_NR_removexattr) {
10372 ret = get_errno(removexattr(p, n));
10373 } else {
10374 ret = get_errno(lremovexattr(p, n));
10376 } else {
10377 ret = -TARGET_EFAULT;
10379 unlock_user(p, arg1, 0);
10380 unlock_user(n, arg2, 0);
10382 break;
10383 case TARGET_NR_fremovexattr:
10385 void *n;
10386 n = lock_user_string(arg2);
10387 if (n) {
10388 ret = get_errno(fremovexattr(arg1, n));
10389 } else {
10390 ret = -TARGET_EFAULT;
10392 unlock_user(n, arg2, 0);
10394 break;
10395 #endif
10396 #endif /* CONFIG_ATTR */
10397 #ifdef TARGET_NR_set_thread_area
10398 case TARGET_NR_set_thread_area:
10399 #if defined(TARGET_MIPS)
10400 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10401 ret = 0;
10402 break;
10403 #elif defined(TARGET_CRIS)
10404 if (arg1 & 0xff)
10405 ret = -TARGET_EINVAL;
10406 else {
10407 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10408 ret = 0;
10410 break;
10411 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10412 ret = do_set_thread_area(cpu_env, arg1);
10413 break;
10414 #elif defined(TARGET_M68K)
10416 TaskState *ts = cpu->opaque;
10417 ts->tp_value = arg1;
10418 ret = 0;
10419 break;
10421 #else
10422 goto unimplemented_nowarn;
10423 #endif
10424 #endif
10425 #ifdef TARGET_NR_get_thread_area
10426 case TARGET_NR_get_thread_area:
10427 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10428 ret = do_get_thread_area(cpu_env, arg1);
10429 break;
10430 #elif defined(TARGET_M68K)
10432 TaskState *ts = cpu->opaque;
10433 ret = ts->tp_value;
10434 break;
10436 #else
10437 goto unimplemented_nowarn;
10438 #endif
10439 #endif
10440 #ifdef TARGET_NR_getdomainname
10441 case TARGET_NR_getdomainname:
10442 goto unimplemented_nowarn;
10443 #endif
10445 #ifdef TARGET_NR_clock_gettime
10446 case TARGET_NR_clock_gettime:
10448 struct timespec ts;
10449 ret = get_errno(clock_gettime(arg1, &ts));
10450 if (!is_error(ret)) {
10451 host_to_target_timespec(arg2, &ts);
10453 break;
10455 #endif
10456 #ifdef TARGET_NR_clock_getres
10457 case TARGET_NR_clock_getres:
10459 struct timespec ts;
10460 ret = get_errno(clock_getres(arg1, &ts));
10461 if (!is_error(ret)) {
10462 host_to_target_timespec(arg2, &ts);
10464 break;
10466 #endif
10467 #ifdef TARGET_NR_clock_nanosleep
10468 case TARGET_NR_clock_nanosleep:
10470 struct timespec ts;
10471 target_to_host_timespec(&ts, arg3);
10472 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
10473 if (arg4)
10474 host_to_target_timespec(arg4, &ts);
10476 #if defined(TARGET_PPC)
10477 /* clock_nanosleep is odd in that it returns positive errno values.
10478 * On PPC, CR0 bit 3 should be set in such a situation. */
10479 if (ret) {
10480 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10482 #endif
10483 break;
10485 #endif
10487 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10488 case TARGET_NR_set_tid_address:
10489 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10490 break;
10491 #endif
10493 case TARGET_NR_tkill:
10494 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10495 break;
10497 case TARGET_NR_tgkill:
10498 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10499 target_to_host_signal(arg3)));
10500 break;
10502 #ifdef TARGET_NR_set_robust_list
10503 case TARGET_NR_set_robust_list:
10504 case TARGET_NR_get_robust_list:
10505 /* The ABI for supporting robust futexes has userspace pass
10506 * the kernel a pointer to a linked list which is updated by
10507 * userspace after the syscall; the list is walked by the kernel
10508 * when the thread exits. Since the linked list in QEMU guest
10509 * memory isn't a valid linked list for the host and we have
10510 * no way to reliably intercept the thread-death event, we can't
10511 * support these. Silently return ENOSYS so that guest userspace
10512 * falls back to a non-robust futex implementation (which should
10513 * be OK except in the corner case of the guest crashing while
10514 * holding a mutex that is shared with another process via
10515 * shared memory).
10517 goto unimplemented_nowarn;
10518 #endif
10520 #if defined(TARGET_NR_utimensat)
10521 case TARGET_NR_utimensat:
10523 struct timespec *tsp, ts[2];
10524 if (!arg3) {
10525 tsp = NULL;
10526 } else {
10527 target_to_host_timespec(ts, arg3);
10528 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10529 tsp = ts;
10531 if (!arg2)
10532 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10533 else {
10534 if (!(p = lock_user_string(arg2))) {
10535 ret = -TARGET_EFAULT;
10536 goto fail;
10538 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10539 unlock_user(p, arg2, 0);
10542 break;
10543 #endif
10544 case TARGET_NR_futex:
10545 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10546 break;
10547 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10548 case TARGET_NR_inotify_init:
10549 ret = get_errno(sys_inotify_init());
10550 break;
10551 #endif
10552 #ifdef CONFIG_INOTIFY1
10553 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10554 case TARGET_NR_inotify_init1:
10555 ret = get_errno(sys_inotify_init1(arg1));
10556 break;
10557 #endif
10558 #endif
10559 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10560 case TARGET_NR_inotify_add_watch:
10561 p = lock_user_string(arg2);
10562 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10563 unlock_user(p, arg2, 0);
10564 break;
10565 #endif
10566 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10567 case TARGET_NR_inotify_rm_watch:
10568 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10569 break;
10570 #endif
10572 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10573 case TARGET_NR_mq_open:
10575 struct mq_attr posix_mq_attr, *attrp;
10577 p = lock_user_string(arg1 - 1);
10578 if (arg4 != 0) {
10579 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10580 attrp = &posix_mq_attr;
10581 } else {
10582 attrp = 0;
10584 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10585 unlock_user (p, arg1, 0);
10587 break;
10589 case TARGET_NR_mq_unlink:
10590 p = lock_user_string(arg1 - 1);
10591 ret = get_errno(mq_unlink(p));
10592 unlock_user (p, arg1, 0);
10593 break;
10595 case TARGET_NR_mq_timedsend:
10597 struct timespec ts;
10599 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10600 if (arg5 != 0) {
10601 target_to_host_timespec(&ts, arg5);
10602 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10603 host_to_target_timespec(arg5, &ts);
10604 } else {
10605 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10607 unlock_user (p, arg2, arg3);
10609 break;
10611 case TARGET_NR_mq_timedreceive:
10613 struct timespec ts;
10614 unsigned int prio;
10616 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10617 if (arg5 != 0) {
10618 target_to_host_timespec(&ts, arg5);
10619 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10620 &prio, &ts));
10621 host_to_target_timespec(arg5, &ts);
10622 } else {
10623 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10624 &prio, NULL));
10626 unlock_user (p, arg2, arg3);
10627 if (arg4 != 0)
10628 put_user_u32(prio, arg4);
10630 break;
10632 /* Not implemented for now... */
10633 /* case TARGET_NR_mq_notify: */
10634 /* break; */
10636 case TARGET_NR_mq_getsetattr:
10638 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10639 ret = 0;
10640 if (arg3 != 0) {
10641 ret = mq_getattr(arg1, &posix_mq_attr_out);
10642 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10644 if (arg2 != 0) {
10645 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10646 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10650 break;
10651 #endif
10653 #ifdef CONFIG_SPLICE
10654 #ifdef TARGET_NR_tee
10655 case TARGET_NR_tee:
10657 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10659 break;
10660 #endif
10661 #ifdef TARGET_NR_splice
10662 case TARGET_NR_splice:
10664 loff_t loff_in, loff_out;
10665 loff_t *ploff_in = NULL, *ploff_out = NULL;
10666 if (arg2) {
10667 if (get_user_u64(loff_in, arg2)) {
10668 goto efault;
10670 ploff_in = &loff_in;
10672 if (arg4) {
10673 if (get_user_u64(loff_out, arg4)) {
10674 goto efault;
10676 ploff_out = &loff_out;
10678 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10679 if (arg2) {
10680 if (put_user_u64(loff_in, arg2)) {
10681 goto efault;
10684 if (arg4) {
10685 if (put_user_u64(loff_out, arg4)) {
10686 goto efault;
10690 break;
10691 #endif
10692 #ifdef TARGET_NR_vmsplice
10693 case TARGET_NR_vmsplice:
10695 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10696 if (vec != NULL) {
10697 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10698 unlock_iovec(vec, arg2, arg3, 0);
10699 } else {
10700 ret = -host_to_target_errno(errno);
10703 break;
10704 #endif
10705 #endif /* CONFIG_SPLICE */
10706 #ifdef CONFIG_EVENTFD
10707 #if defined(TARGET_NR_eventfd)
10708 case TARGET_NR_eventfd:
10709 ret = get_errno(eventfd(arg1, 0));
10710 fd_trans_unregister(ret);
10711 break;
10712 #endif
10713 #if defined(TARGET_NR_eventfd2)
10714 case TARGET_NR_eventfd2:
10716 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10717 if (arg2 & TARGET_O_NONBLOCK) {
10718 host_flags |= O_NONBLOCK;
10720 if (arg2 & TARGET_O_CLOEXEC) {
10721 host_flags |= O_CLOEXEC;
10723 ret = get_errno(eventfd(arg1, host_flags));
10724 fd_trans_unregister(ret);
10725 break;
10727 #endif
10728 #endif /* CONFIG_EVENTFD */
10729 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10730 case TARGET_NR_fallocate:
10731 #if TARGET_ABI_BITS == 32
10732 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10733 target_offset64(arg5, arg6)));
10734 #else
10735 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10736 #endif
10737 break;
10738 #endif
10739 #if defined(CONFIG_SYNC_FILE_RANGE)
10740 #if defined(TARGET_NR_sync_file_range)
10741 case TARGET_NR_sync_file_range:
10742 #if TARGET_ABI_BITS == 32
10743 #if defined(TARGET_MIPS)
10744 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10745 target_offset64(arg5, arg6), arg7));
10746 #else
10747 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10748 target_offset64(arg4, arg5), arg6));
10749 #endif /* !TARGET_MIPS */
10750 #else
10751 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10752 #endif
10753 break;
10754 #endif
10755 #if defined(TARGET_NR_sync_file_range2)
10756 case TARGET_NR_sync_file_range2:
10757 /* This is like sync_file_range but the arguments are reordered */
10758 #if TARGET_ABI_BITS == 32
10759 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10760 target_offset64(arg5, arg6), arg2));
10761 #else
10762 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10763 #endif
10764 break;
10765 #endif
10766 #endif
10767 #if defined(TARGET_NR_signalfd4)
10768 case TARGET_NR_signalfd4:
10769 ret = do_signalfd4(arg1, arg2, arg4);
10770 break;
10771 #endif
10772 #if defined(TARGET_NR_signalfd)
10773 case TARGET_NR_signalfd:
10774 ret = do_signalfd4(arg1, arg2, 0);
10775 break;
10776 #endif
10777 #if defined(CONFIG_EPOLL)
10778 #if defined(TARGET_NR_epoll_create)
10779 case TARGET_NR_epoll_create:
10780 ret = get_errno(epoll_create(arg1));
10781 break;
10782 #endif
10783 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10784 case TARGET_NR_epoll_create1:
10785 ret = get_errno(epoll_create1(arg1));
10786 break;
10787 #endif
10788 #if defined(TARGET_NR_epoll_ctl)
10789 case TARGET_NR_epoll_ctl:
10791 struct epoll_event ep;
10792 struct epoll_event *epp = 0;
10793 if (arg4) {
10794 struct target_epoll_event *target_ep;
10795 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10796 goto efault;
10798 ep.events = tswap32(target_ep->events);
10799 /* The epoll_data_t union is just opaque data to the kernel,
10800 * so we transfer all 64 bits across and need not worry what
10801 * actual data type it is.
10803 ep.data.u64 = tswap64(target_ep->data.u64);
10804 unlock_user_struct(target_ep, arg4, 0);
10805 epp = &ep;
10807 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10808 break;
10810 #endif
10812 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10813 #define IMPLEMENT_EPOLL_PWAIT
10814 #endif
10815 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10816 #if defined(TARGET_NR_epoll_wait)
10817 case TARGET_NR_epoll_wait:
10818 #endif
10819 #if defined(IMPLEMENT_EPOLL_PWAIT)
10820 case TARGET_NR_epoll_pwait:
10821 #endif
10823 struct target_epoll_event *target_ep;
10824 struct epoll_event *ep;
10825 int epfd = arg1;
10826 int maxevents = arg3;
10827 int timeout = arg4;
10829 target_ep = lock_user(VERIFY_WRITE, arg2,
10830 maxevents * sizeof(struct target_epoll_event), 1);
10831 if (!target_ep) {
10832 goto efault;
10835 ep = alloca(maxevents * sizeof(struct epoll_event));
10837 switch (num) {
10838 #if defined(IMPLEMENT_EPOLL_PWAIT)
10839 case TARGET_NR_epoll_pwait:
10841 target_sigset_t *target_set;
10842 sigset_t _set, *set = &_set;
10844 if (arg5) {
10845 target_set = lock_user(VERIFY_READ, arg5,
10846 sizeof(target_sigset_t), 1);
10847 if (!target_set) {
10848 unlock_user(target_ep, arg2, 0);
10849 goto efault;
10851 target_to_host_sigset(set, target_set);
10852 unlock_user(target_set, arg5, 0);
10853 } else {
10854 set = NULL;
10857 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10858 break;
10860 #endif
10861 #if defined(TARGET_NR_epoll_wait)
10862 case TARGET_NR_epoll_wait:
10863 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10864 break;
10865 #endif
10866 default:
10867 ret = -TARGET_ENOSYS;
10869 if (!is_error(ret)) {
10870 int i;
10871 for (i = 0; i < ret; i++) {
10872 target_ep[i].events = tswap32(ep[i].events);
10873 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10876 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10877 break;
10879 #endif
10880 #endif
10881 #ifdef TARGET_NR_prlimit64
10882 case TARGET_NR_prlimit64:
10884 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10885 struct target_rlimit64 *target_rnew, *target_rold;
10886 struct host_rlimit64 rnew, rold, *rnewp = 0;
10887 int resource = target_to_host_resource(arg2);
10888 if (arg3) {
10889 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10890 goto efault;
10892 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10893 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10894 unlock_user_struct(target_rnew, arg3, 0);
10895 rnewp = &rnew;
10898 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10899 if (!is_error(ret) && arg4) {
10900 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10901 goto efault;
10903 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10904 target_rold->rlim_max = tswap64(rold.rlim_max);
10905 unlock_user_struct(target_rold, arg4, 1);
10907 break;
10909 #endif
10910 #ifdef TARGET_NR_gethostname
10911 case TARGET_NR_gethostname:
10913 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10914 if (name) {
10915 ret = get_errno(gethostname(name, arg2));
10916 unlock_user(name, arg1, arg2);
10917 } else {
10918 ret = -TARGET_EFAULT;
10920 break;
10922 #endif
10923 #ifdef TARGET_NR_atomic_cmpxchg_32
10924 case TARGET_NR_atomic_cmpxchg_32:
10926 /* should use start_exclusive from main.c */
10927 abi_ulong mem_value;
10928 if (get_user_u32(mem_value, arg6)) {
10929 target_siginfo_t info;
10930 info.si_signo = SIGSEGV;
10931 info.si_errno = 0;
10932 info.si_code = TARGET_SEGV_MAPERR;
10933 info._sifields._sigfault._addr = arg6;
10934 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10935 ret = 0xdeadbeef;
10938 if (mem_value == arg2)
10939 put_user_u32(arg1, arg6);
10940 ret = mem_value;
10941 break;
10943 #endif
10944 #ifdef TARGET_NR_atomic_barrier
10945 case TARGET_NR_atomic_barrier:
10947 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10948 ret = 0;
10949 break;
10951 #endif
10953 #ifdef TARGET_NR_timer_create
10954 case TARGET_NR_timer_create:
10956 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10958 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10960 int clkid = arg1;
10961 int timer_index = next_free_host_timer();
10963 if (timer_index < 0) {
10964 ret = -TARGET_EAGAIN;
10965 } else {
10966 timer_t *phtimer = g_posix_timers + timer_index;
10968 if (arg2) {
10969 phost_sevp = &host_sevp;
10970 ret = target_to_host_sigevent(phost_sevp, arg2);
10971 if (ret != 0) {
10972 break;
10976 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10977 if (ret) {
10978 phtimer = NULL;
10979 } else {
10980 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10981 goto efault;
10985 break;
10987 #endif
10989 #ifdef TARGET_NR_timer_settime
10990 case TARGET_NR_timer_settime:
10992 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10993 * struct itimerspec * old_value */
10994 target_timer_t timerid = get_timer_id(arg1);
10996 if (timerid < 0) {
10997 ret = timerid;
10998 } else if (arg3 == 0) {
10999 ret = -TARGET_EINVAL;
11000 } else {
11001 timer_t htimer = g_posix_timers[timerid];
11002 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11004 target_to_host_itimerspec(&hspec_new, arg3);
11005 ret = get_errno(
11006 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11007 host_to_target_itimerspec(arg2, &hspec_old);
11009 break;
11011 #endif
11013 #ifdef TARGET_NR_timer_gettime
11014 case TARGET_NR_timer_gettime:
11016 /* args: timer_t timerid, struct itimerspec *curr_value */
11017 target_timer_t timerid = get_timer_id(arg1);
11019 if (timerid < 0) {
11020 ret = timerid;
11021 } else if (!arg2) {
11022 ret = -TARGET_EFAULT;
11023 } else {
11024 timer_t htimer = g_posix_timers[timerid];
11025 struct itimerspec hspec;
11026 ret = get_errno(timer_gettime(htimer, &hspec));
11028 if (host_to_target_itimerspec(arg2, &hspec)) {
11029 ret = -TARGET_EFAULT;
11032 break;
11034 #endif
11036 #ifdef TARGET_NR_timer_getoverrun
11037 case TARGET_NR_timer_getoverrun:
11039 /* args: timer_t timerid */
11040 target_timer_t timerid = get_timer_id(arg1);
11042 if (timerid < 0) {
11043 ret = timerid;
11044 } else {
11045 timer_t htimer = g_posix_timers[timerid];
11046 ret = get_errno(timer_getoverrun(htimer));
11048 fd_trans_unregister(ret);
11049 break;
11051 #endif
11053 #ifdef TARGET_NR_timer_delete
11054 case TARGET_NR_timer_delete:
11056 /* args: timer_t timerid */
11057 target_timer_t timerid = get_timer_id(arg1);
11059 if (timerid < 0) {
11060 ret = timerid;
11061 } else {
11062 timer_t htimer = g_posix_timers[timerid];
11063 ret = get_errno(timer_delete(htimer));
11064 g_posix_timers[timerid] = 0;
11066 break;
11068 #endif
11070 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11071 case TARGET_NR_timerfd_create:
11072 ret = get_errno(timerfd_create(arg1,
11073 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11074 break;
11075 #endif
11077 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11078 case TARGET_NR_timerfd_gettime:
11080 struct itimerspec its_curr;
11082 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11084 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11085 goto efault;
11088 break;
11089 #endif
11091 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11092 case TARGET_NR_timerfd_settime:
11094 struct itimerspec its_new, its_old, *p_new;
11096 if (arg3) {
11097 if (target_to_host_itimerspec(&its_new, arg3)) {
11098 goto efault;
11100 p_new = &its_new;
11101 } else {
11102 p_new = NULL;
11105 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11107 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11108 goto efault;
11111 break;
11112 #endif
11114 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11115 case TARGET_NR_ioprio_get:
11116 ret = get_errno(ioprio_get(arg1, arg2));
11117 break;
11118 #endif
11120 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11121 case TARGET_NR_ioprio_set:
11122 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11123 break;
11124 #endif
11126 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11127 case TARGET_NR_setns:
11128 ret = get_errno(setns(arg1, arg2));
11129 break;
11130 #endif
11131 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11132 case TARGET_NR_unshare:
11133 ret = get_errno(unshare(arg1));
11134 break;
11135 #endif
11137 default:
11138 unimplemented:
11139 gemu_log("qemu: Unsupported syscall: %d\n", num);
11140 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11141 unimplemented_nowarn:
11142 #endif
11143 ret = -TARGET_ENOSYS;
11144 break;
11146 fail:
11147 #ifdef DEBUG
11148 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11149 #endif
11150 if(do_strace)
11151 print_syscall_ret(num, ret);
11152 return ret;
11153 efault:
11154 ret = -TARGET_EFAULT;
11155 goto fail;